From 5b6d71af8bd8a361c72e9b6d54e6db8117e7617d Mon Sep 17 00:00:00 2001 From: rich7420 <101171023+rich7420@users.noreply.github.com> Date: Thu, 4 Jul 2024 10:04:45 +0800 Subject: [PATCH 01/12] [#4007] improvement: Use template to reduce Privileges duplicate codes (#4010) ### What changes were proposed in this pull request? - make the template to reduce these duplicate codes. ### Why are the changes needed? - Currently [Privileges.java](https://github.com/datastrato/gravitino/blob/main/api/src/main/java/com/datastrato/gravitino/authorization/Privileges.java#L266) have more than one thousand duplicate code to instantiation CreateCatalog, AlterCatalog, ... - we can use template to make Privileges.java smaller. Fix: #4007 ### Does this PR introduce _any_ user-facing change? N/A ### How was this patch tested? `./gradlew test` --------- Co-authored-by: user --- .../gravitino/authorization/Privileges.java | 1276 ++++------------- 1 file changed, 293 insertions(+), 983 deletions(-) diff --git a/api/src/main/java/com/datastrato/gravitino/authorization/Privileges.java b/api/src/main/java/com/datastrato/gravitino/authorization/Privileges.java index 24df6352110..5fa717cad5f 100644 --- a/api/src/main/java/com/datastrato/gravitino/authorization/Privileges.java +++ b/api/src/main/java/com/datastrato/gravitino/authorization/Privileges.java @@ -18,40 +18,7 @@ */ package com.datastrato.gravitino.authorization; -import static com.datastrato.gravitino.authorization.Privilege.Name.ADD_GROUP; -import static com.datastrato.gravitino.authorization.Privilege.Name.ADD_USER; -import static com.datastrato.gravitino.authorization.Privilege.Name.ALTER_CATALOG; -import static com.datastrato.gravitino.authorization.Privilege.Name.ALTER_SCHEMA; -import static com.datastrato.gravitino.authorization.Privilege.Name.CREATE_CATALOG; -import static com.datastrato.gravitino.authorization.Privilege.Name.CREATE_FILESET; -import static com.datastrato.gravitino.authorization.Privilege.Name.CREATE_METALAKE; -import static com.datastrato.gravitino.authorization.Privilege.Name.CREATE_ROLE; -import static com.datastrato.gravitino.authorization.Privilege.Name.CREATE_SCHEMA; -import static com.datastrato.gravitino.authorization.Privilege.Name.CREATE_TABLE; -import static com.datastrato.gravitino.authorization.Privilege.Name.CREATE_TOPIC; -import static com.datastrato.gravitino.authorization.Privilege.Name.DELETE_ROLE; -import static com.datastrato.gravitino.authorization.Privilege.Name.DROP_CATALOG; -import static com.datastrato.gravitino.authorization.Privilege.Name.DROP_FILESET; -import static com.datastrato.gravitino.authorization.Privilege.Name.DROP_SCHEMA; -import static com.datastrato.gravitino.authorization.Privilege.Name.DROP_TABLE; -import static com.datastrato.gravitino.authorization.Privilege.Name.DROP_TOPIC; -import static com.datastrato.gravitino.authorization.Privilege.Name.GET_GROUP; -import static com.datastrato.gravitino.authorization.Privilege.Name.GET_ROLE; -import static com.datastrato.gravitino.authorization.Privilege.Name.GET_USER; -import static com.datastrato.gravitino.authorization.Privilege.Name.GRANT_ROLE; -import static com.datastrato.gravitino.authorization.Privilege.Name.MANAGE_METALAKE; -import static com.datastrato.gravitino.authorization.Privilege.Name.READ_FILESET; -import static com.datastrato.gravitino.authorization.Privilege.Name.READ_TABLE; -import static com.datastrato.gravitino.authorization.Privilege.Name.READ_TOPIC; -import static com.datastrato.gravitino.authorization.Privilege.Name.REMOVE_GROUP; -import static com.datastrato.gravitino.authorization.Privilege.Name.REMOVE_USER; -import static com.datastrato.gravitino.authorization.Privilege.Name.REVOKE_ROLE; -import static com.datastrato.gravitino.authorization.Privilege.Name.USE_CATALOG; -import static com.datastrato.gravitino.authorization.Privilege.Name.USE_METALAKE; -import static com.datastrato.gravitino.authorization.Privilege.Name.USE_SCHEMA; -import static com.datastrato.gravitino.authorization.Privilege.Name.WRITE_FILESET; -import static com.datastrato.gravitino.authorization.Privilege.Name.WRITE_TABLE; -import static com.datastrato.gravitino.authorization.Privilege.Name.WRITE_TOPIC; +import java.util.Objects; /** The helper class for {@link Privilege}. */ public class Privileges { @@ -276,26 +243,84 @@ public static Privilege deny(Privilege.Name name) { } } - /** The privilege to create a catalog. */ - public abstract static class CreateCatalog implements Privilege { + /** + * Abstract class representing a generic privilege. + * + * @param the type of the privilege + */ + public abstract static class GenericPrivilege> + implements Privilege { + + /** + * Functional interface for creating instances of GenericPrivilege. + * + * @param the type of the privilege + */ + @FunctionalInterface + public interface GenericPrivilegeFactory> { + /** + * Creates a new instance of the privilege. + * + * @param condition the condition of the privilege + * @param name the name of the privilege + * @return the created privilege instance + */ + T create(Condition condition, Name name); + } + + private final Condition condition; + private final Name name; + + /** + * Constructor for GenericPrivilege. + * + * @param condition the condition of the privilege + * @param name the name of the privilege + */ + protected GenericPrivilege(Condition condition, Name name) { + this.condition = condition; + this.name = name; + } - private static final CreateCatalog ALLOW_INSTANCE = - new CreateCatalog() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; + @Override + public Name name() { + return name; + } + + @Override + public Condition condition() { + return condition; + } + + @Override + public String simpleString() { + return condition.name() + " " + name.name().toLowerCase().replace('_', ' '); + } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof GenericPrivilege)) return false; + GenericPrivilege that = (GenericPrivilege) o; + return condition == that.condition && name == that.name; + } + + @Override + public int hashCode() { + return Objects.hash(condition, name); + } + } + + /** The privilege to create a catalog. */ + public static class CreateCatalog extends GenericPrivilege { + private static final CreateCatalog ALLOW_INSTANCE = + new CreateCatalog(Condition.ALLOW, Name.CREATE_CATALOG); private static final CreateCatalog DENY_INSTANCE = - new CreateCatalog() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new CreateCatalog(Condition.DENY, Name.CREATE_CATALOG); - private CreateCatalog() {} + private CreateCatalog(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static CreateCatalog allow() { @@ -306,40 +331,18 @@ public static CreateCatalog allow() { public static CreateCatalog deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return CREATE_CATALOG; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " create catalog"; - } } /** The privilege to alter a catalog. */ - public abstract static class AlterCatalog implements Privilege { - + public static class AlterCatalog extends GenericPrivilege { private static final AlterCatalog ALLOW_INSTANCE = - new AlterCatalog() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new AlterCatalog(Condition.ALLOW, Name.CREATE_CATALOG); private static final AlterCatalog DENY_INSTANCE = - new AlterCatalog() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new AlterCatalog(Condition.DENY, Name.CREATE_CATALOG); - private AlterCatalog() {} + private AlterCatalog(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static AlterCatalog allow() { @@ -350,40 +353,18 @@ public static AlterCatalog allow() { public static AlterCatalog deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return ALTER_CATALOG; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " alter catalog"; - } } /** The privilege to drop a catalog. */ - public abstract static class DropCatalog implements Privilege { - + public static class DropCatalog extends GenericPrivilege { private static final DropCatalog ALLOW_INSTANCE = - new DropCatalog() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new DropCatalog(Condition.ALLOW, Name.CREATE_CATALOG); private static final DropCatalog DENY_INSTANCE = - new DropCatalog() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new DropCatalog(Condition.DENY, Name.CREATE_CATALOG); - private DropCatalog() {} + private DropCatalog(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static DropCatalog allow() { @@ -394,39 +375,18 @@ public static DropCatalog allow() { public static DropCatalog deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return DROP_CATALOG; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " drop catalog"; - } } /** The privilege to use a catalog. */ - public abstract static class UseCatalog implements Privilege { + public static class UseCatalog extends GenericPrivilege { private static final UseCatalog ALLOW_INSTANCE = - new UseCatalog() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new UseCatalog(Condition.ALLOW, Name.CREATE_CATALOG); private static final UseCatalog DENY_INSTANCE = - new UseCatalog() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new UseCatalog(Condition.DENY, Name.CREATE_CATALOG); - private UseCatalog() {} + private UseCatalog(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static UseCatalog allow() { @@ -437,40 +397,18 @@ public static UseCatalog allow() { public static UseCatalog deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return USE_CATALOG; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " use catalog"; - } } /** The privilege to use a schema. */ - public abstract static class UseSchema implements Privilege { - + public static class UseSchema extends GenericPrivilege { private static final UseSchema ALLOW_INSTANCE = - new UseSchema() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new UseSchema(Condition.ALLOW, Name.CREATE_CATALOG); private static final UseSchema DENY_INSTANCE = - new UseSchema() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new UseSchema(Condition.DENY, Name.CREATE_CATALOG); - private UseSchema() {} + private UseSchema(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static UseSchema allow() { @@ -481,40 +419,18 @@ public static UseSchema allow() { public static UseSchema deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return USE_SCHEMA; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " use schema"; - } } /** The privilege to create a schema. */ - public abstract static class CreateSchema implements Privilege { - + public static class CreateSchema extends GenericPrivilege { private static final CreateSchema ALLOW_INSTANCE = - new CreateSchema() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new CreateSchema(Condition.ALLOW, Name.CREATE_CATALOG); private static final CreateSchema DENY_INSTANCE = - new CreateSchema() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new CreateSchema(Condition.DENY, Name.CREATE_CATALOG); - private CreateSchema() {} + private CreateSchema(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static CreateSchema allow() { @@ -525,40 +441,18 @@ public static CreateSchema allow() { public static CreateSchema deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return CREATE_SCHEMA; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " create schema"; - } } /** The privilege to alter a schema. */ - public abstract static class AlterSchema implements Privilege { - + public static class AlterSchema extends GenericPrivilege { private static final AlterSchema ALLOW_INSTANCE = - new AlterSchema() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new AlterSchema(Condition.ALLOW, Name.CREATE_CATALOG); private static final AlterSchema DENY_INSTANCE = - new AlterSchema() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new AlterSchema(Condition.DENY, Name.CREATE_CATALOG); - private AlterSchema() {} + private AlterSchema(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static AlterSchema allow() { @@ -569,40 +463,18 @@ public static AlterSchema allow() { public static AlterSchema deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return ALTER_SCHEMA; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " alter schema"; - } } /** The privilege to drop a schema. */ - public abstract static class DropSchema implements Privilege { - + public static class DropSchema extends GenericPrivilege { private static final DropSchema ALLOW_INSTANCE = - new DropSchema() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new DropSchema(Condition.ALLOW, Name.CREATE_CATALOG); private static final DropSchema DENY_INSTANCE = - new DropSchema() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new DropSchema(Condition.DENY, Name.CREATE_CATALOG); - private DropSchema() {} + private DropSchema(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static DropSchema allow() { @@ -613,40 +485,18 @@ public static DropSchema allow() { public static DropSchema deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return DROP_SCHEMA; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " drop schema"; - } } /** The privilege to create a table. */ - public abstract static class CreateTable implements Privilege { - + public static class CreateTable extends GenericPrivilege { private static final CreateTable ALLOW_INSTANCE = - new CreateTable() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new CreateTable(Condition.ALLOW, Name.CREATE_CATALOG); private static final CreateTable DENY_INSTANCE = - new CreateTable() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new CreateTable(Condition.DENY, Name.CREATE_CATALOG); - private CreateTable() {} + private CreateTable(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static CreateTable allow() { @@ -657,38 +507,18 @@ public static CreateTable allow() { public static CreateTable deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return CREATE_TABLE; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " create table"; - } } /** The privilege to drop a table. */ - public abstract static class DropTable implements Privilege { - + public static class DropTable extends GenericPrivilege { private static final DropTable ALLOW_INSTANCE = - new DropTable() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new DropTable(Condition.ALLOW, Name.CREATE_CATALOG); private static final DropTable DENY_INSTANCE = - new DropTable() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new DropTable(Condition.DENY, Name.CREATE_CATALOG); + + private DropTable(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static DropTable allow() { @@ -699,38 +529,18 @@ public static DropTable allow() { public static DropTable deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return DROP_TABLE; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " drop table"; - } } /** The privilege to read a table. */ - public abstract static class ReadTable implements Privilege { - + public static class ReadTable extends GenericPrivilege { private static final ReadTable ALLOW_INSTANCE = - new ReadTable() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new ReadTable(Condition.ALLOW, Name.CREATE_CATALOG); private static final ReadTable DENY_INSTANCE = - new ReadTable() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new ReadTable(Condition.DENY, Name.CREATE_CATALOG); + + private ReadTable(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static ReadTable allow() { @@ -741,38 +551,18 @@ public static ReadTable allow() { public static ReadTable deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return READ_TABLE; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " read table"; - } } /** The privilege to write a table. */ - public abstract static class WriteTable implements Privilege { - + public static class WriteTable extends GenericPrivilege { private static final WriteTable ALLOW_INSTANCE = - new WriteTable() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new WriteTable(Condition.ALLOW, Name.CREATE_CATALOG); private static final WriteTable DENY_INSTANCE = - new WriteTable() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new WriteTable(Condition.DENY, Name.CREATE_CATALOG); + + private WriteTable(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static WriteTable allow() { @@ -783,38 +573,18 @@ public static WriteTable allow() { public static WriteTable deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return WRITE_TABLE; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " write table"; - } } /** The privilege to create a fileset. */ - public abstract static class CreateFileset implements Privilege { - + public static class CreateFileset extends GenericPrivilege { private static final CreateFileset ALLOW_INSTANCE = - new CreateFileset() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new CreateFileset(Condition.ALLOW, Name.CREATE_CATALOG); private static final CreateFileset DENY_INSTANCE = - new CreateFileset() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new CreateFileset(Condition.DENY, Name.CREATE_CATALOG); + + private CreateFileset(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static CreateFileset allow() { @@ -825,38 +595,18 @@ public static CreateFileset allow() { public static CreateFileset deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return CREATE_FILESET; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " create fileset"; - } } /** The privilege to drop a fileset. */ - public abstract static class DropFileset implements Privilege { - + public static class DropFileset extends GenericPrivilege { private static final DropFileset ALLOW_INSTANCE = - new DropFileset() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new DropFileset(Condition.ALLOW, Name.CREATE_CATALOG); private static final DropFileset DENY_INSTANCE = - new DropFileset() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new DropFileset(Condition.DENY, Name.CREATE_CATALOG); + + private DropFileset(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static DropFileset allow() { @@ -867,38 +617,18 @@ public static DropFileset allow() { public static DropFileset deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return DROP_FILESET; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " drop fileset"; - } } /** The privilege to read a fileset. */ - public abstract static class ReadFileset implements Privilege { - + public static class ReadFileset extends GenericPrivilege { private static final ReadFileset ALLOW_INSTANCE = - new ReadFileset() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new ReadFileset(Condition.ALLOW, Name.CREATE_CATALOG); private static final ReadFileset DENY_INSTANCE = - new ReadFileset() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new ReadFileset(Condition.DENY, Name.CREATE_CATALOG); + + private ReadFileset(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static ReadFileset allow() { @@ -909,38 +639,18 @@ public static ReadFileset allow() { public static ReadFileset deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return READ_FILESET; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " read fileset"; - } } /** The privilege to write a fileset. */ - public abstract static class WriteFileset implements Privilege { - + public static class WriteFileset extends GenericPrivilege { private static final WriteFileset ALLOW_INSTANCE = - new WriteFileset() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new WriteFileset(Condition.ALLOW, Name.CREATE_CATALOG); private static final WriteFileset DENY_INSTANCE = - new WriteFileset() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new WriteFileset(Condition.DENY, Name.CREATE_CATALOG); + + private WriteFileset(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static WriteFileset allow() { @@ -951,40 +661,18 @@ public static WriteFileset allow() { public static WriteFileset deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return WRITE_FILESET; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " write fileset"; - } } /** The privilege to create a topic. */ - public abstract static class CreateTopic implements Privilege { - + public static class CreateTopic extends GenericPrivilege { private static final CreateTopic ALLOW_INSTANCE = - new CreateTopic() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new CreateTopic(Condition.ALLOW, Name.CREATE_CATALOG); private static final CreateTopic DENY_INSTANCE = - new CreateTopic() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new CreateTopic(Condition.DENY, Name.CREATE_CATALOG); - private CreateTopic() {} + private CreateTopic(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static CreateTopic allow() { @@ -995,38 +683,18 @@ public static CreateTopic allow() { public static CreateTopic deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return CREATE_TOPIC; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " create topic"; - } } /** The privilege to drop a topic. */ - public abstract static class DropTopic implements Privilege { - + public static class DropTopic extends GenericPrivilege { private static final DropTopic ALLOW_INSTANCE = - new DropTopic() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new DropTopic(Condition.ALLOW, Name.CREATE_CATALOG); private static final DropTopic DENY_INSTANCE = - new DropTopic() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new DropTopic(Condition.DENY, Name.CREATE_CATALOG); + + private DropTopic(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static DropTopic allow() { @@ -1037,38 +705,18 @@ public static DropTopic allow() { public static DropTopic deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return DROP_TOPIC; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " drop topic"; - } } /** The privilege to read a topic. */ - public abstract static class ReadTopic implements Privilege { - + public static class ReadTopic extends GenericPrivilege { private static final ReadTopic ALLOW_INSTANCE = - new ReadTopic() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new ReadTopic(Condition.ALLOW, Name.CREATE_CATALOG); private static final ReadTopic DENY_INSTANCE = - new ReadTopic() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new ReadTopic(Condition.DENY, Name.CREATE_CATALOG); + + private ReadTopic(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static ReadTopic allow() { @@ -1079,38 +727,18 @@ public static ReadTopic allow() { public static ReadTopic deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return READ_TOPIC; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " read topic"; - } } /** The privilege to write a topic. */ - public abstract static class WriteTopic implements Privilege { - + public static class WriteTopic extends GenericPrivilege { private static final WriteTopic ALLOW_INSTANCE = - new WriteTopic() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new WriteTopic(Condition.ALLOW, Name.CREATE_CATALOG); private static final WriteTopic DENY_INSTANCE = - new WriteTopic() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new WriteTopic(Condition.DENY, Name.CREATE_CATALOG); + + private WriteTopic(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static WriteTopic allow() { @@ -1121,38 +749,18 @@ public static WriteTopic allow() { public static WriteTopic deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return WRITE_TOPIC; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " write topic"; - } } /** The privilege to manage a metalake. */ - public abstract static class ManageMetalake implements Privilege { - + public static class ManageMetalake extends GenericPrivilege { private static final ManageMetalake ALLOW_INSTANCE = - new ManageMetalake() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new ManageMetalake(Condition.ALLOW, Name.CREATE_CATALOG); private static final ManageMetalake DENY_INSTANCE = - new ManageMetalake() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new ManageMetalake(Condition.DENY, Name.CREATE_CATALOG); + + private ManageMetalake(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static ManageMetalake allow() { @@ -1163,38 +771,18 @@ public static ManageMetalake allow() { public static ManageMetalake deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return MANAGE_METALAKE; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " manage metalake"; - } } - /** The privilege to manage a metalake. */ - public abstract static class CreateMetalake implements Privilege { - + /** The privilege to create a metalake. */ + public static class CreateMetalake extends GenericPrivilege { private static final CreateMetalake ALLOW_INSTANCE = - new CreateMetalake() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new CreateMetalake(Condition.ALLOW, Name.CREATE_CATALOG); private static final CreateMetalake DENY_INSTANCE = - new CreateMetalake() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new CreateMetalake(Condition.DENY, Name.CREATE_CATALOG); + + private CreateMetalake(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static CreateMetalake allow() { @@ -1205,40 +793,18 @@ public static CreateMetalake allow() { public static CreateMetalake deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return CREATE_METALAKE; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " create metalake"; - } } /** The privilege to use a metalake. */ - public abstract static class UseMetalake implements Privilege { - + public static class UseMetalake extends GenericPrivilege { private static final UseMetalake ALLOW_INSTANCE = - new UseMetalake() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new UseMetalake(Condition.ALLOW, Name.CREATE_CATALOG); private static final UseMetalake DENY_INSTANCE = - new UseMetalake() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new UseMetalake(Condition.DENY, Name.CREATE_CATALOG); - private UseMetalake() {} + private UseMetalake(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static UseMetalake allow() { @@ -1249,38 +815,16 @@ public static UseMetalake allow() { public static UseMetalake deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return USE_METALAKE; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " use metalake"; - } } /** The privilege to get a user. */ - public abstract static class GetUser implements Privilege { - - private static final GetUser ALLOW_INSTANCE = - new GetUser() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - - private static final GetUser DENY_INSTANCE = - new GetUser() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + public static class GetUser extends GenericPrivilege { + private static final GetUser ALLOW_INSTANCE = new GetUser(Condition.ALLOW, Name.CREATE_CATALOG); + private static final GetUser DENY_INSTANCE = new GetUser(Condition.DENY, Name.CREATE_CATALOG); + + private GetUser(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static GetUser allow() { @@ -1291,40 +835,16 @@ public static GetUser allow() { public static GetUser deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return GET_USER; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " get user"; - } } /** The privilege to add a user. */ - public abstract static class AddUser implements Privilege { - - private static final AddUser ALLOW_INSTANCE = - new AddUser() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - - private static final AddUser DENY_INSTANCE = - new AddUser() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; - - private AddUser() {} + public static class AddUser extends GenericPrivilege { + private static final AddUser ALLOW_INSTANCE = new AddUser(Condition.ALLOW, Name.CREATE_CATALOG); + private static final AddUser DENY_INSTANCE = new AddUser(Condition.DENY, Name.CREATE_CATALOG); + + private AddUser(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static AddUser allow() { @@ -1335,38 +855,18 @@ public static AddUser allow() { public static AddUser deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return ADD_USER; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " add user"; - } } /** The privilege to remove a user. */ - public abstract static class RemoveUser implements Privilege { - + public static class RemoveUser extends GenericPrivilege { private static final RemoveUser ALLOW_INSTANCE = - new RemoveUser() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new RemoveUser(Condition.ALLOW, Name.CREATE_CATALOG); private static final RemoveUser DENY_INSTANCE = - new RemoveUser() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new RemoveUser(Condition.DENY, Name.CREATE_CATALOG); + + private RemoveUser(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static RemoveUser allow() { @@ -1377,40 +877,17 @@ public static RemoveUser allow() { public static RemoveUser deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return REMOVE_USER; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " remove user"; - } } /** The privilege to add a group. */ - public abstract static class AddGroup implements Privilege { - + public static class AddGroup extends GenericPrivilege { private static final AddGroup ALLOW_INSTANCE = - new AddGroup() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - - private static final AddGroup DENY_INSTANCE = - new AddGroup() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; - - private AddGroup() {} + new AddGroup(Condition.ALLOW, Name.CREATE_CATALOG); + private static final AddGroup DENY_INSTANCE = new AddGroup(Condition.DENY, Name.CREATE_CATALOG); + + private AddGroup(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static AddGroup allow() { @@ -1421,40 +898,18 @@ public static AddGroup allow() { public static AddGroup deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return ADD_GROUP; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " add group"; - } } /** The privilege to remove a group. */ - public abstract static class RemoveGroup implements Privilege { - + public static class RemoveGroup extends GenericPrivilege { private static final RemoveGroup ALLOW_INSTANCE = - new RemoveGroup() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new RemoveGroup(Condition.ALLOW, Name.CREATE_CATALOG); private static final RemoveGroup DENY_INSTANCE = - new RemoveGroup() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new RemoveGroup(Condition.DENY, Name.CREATE_CATALOG); - private RemoveGroup() {} + private RemoveGroup(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static RemoveGroup allow() { @@ -1465,40 +920,17 @@ public static RemoveGroup allow() { public static RemoveGroup deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return REMOVE_GROUP; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " remove group"; - } } /** The privilege to get a group. */ - public abstract static class GetGroup implements Privilege { - + public static class GetGroup extends GenericPrivilege { private static final GetGroup ALLOW_INSTANCE = - new GetGroup() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - - private static final GetGroup DENY_INSTANCE = - new GetGroup() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; - - private GetGroup() {} + new GetGroup(Condition.ALLOW, Name.CREATE_CATALOG); + private static final GetGroup DENY_INSTANCE = new GetGroup(Condition.DENY, Name.CREATE_CATALOG); + + private GetGroup(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static GetGroup allow() { @@ -1509,38 +941,18 @@ public static GetGroup allow() { public static GetGroup deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return GET_GROUP; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " get group"; - } } /** The privilege to create a role. */ - public abstract static class CreateRole implements Privilege { - + public static class CreateRole extends GenericPrivilege { private static final CreateRole ALLOW_INSTANCE = - new CreateRole() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new CreateRole(Condition.ALLOW, Name.CREATE_CATALOG); private static final CreateRole DENY_INSTANCE = - new CreateRole() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new CreateRole(Condition.DENY, Name.CREATE_CATALOG); + + private CreateRole(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static CreateRole allow() { @@ -1551,40 +963,16 @@ public static CreateRole allow() { public static CreateRole deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return CREATE_ROLE; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " create role"; - } } /** The privilege to get a role. */ - public abstract static class GetRole implements Privilege { - - private static final GetRole ALLOW_INSTANCE = - new GetRole() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - - private static final GetRole DENY_INSTANCE = - new GetRole() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; - - private GetRole() {} + public static class GetRole extends GenericPrivilege { + private static final GetRole ALLOW_INSTANCE = new GetRole(Condition.ALLOW, Name.CREATE_CATALOG); + private static final GetRole DENY_INSTANCE = new GetRole(Condition.DENY, Name.CREATE_CATALOG); + + private GetRole(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static GetRole allow() { @@ -1595,40 +983,18 @@ public static GetRole allow() { public static GetRole deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return GET_ROLE; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " get role"; - } } /** The privilege to delete a role. */ - public abstract static class DeleteRole implements Privilege { - + public static class DeleteRole extends GenericPrivilege { private static final DeleteRole ALLOW_INSTANCE = - new DeleteRole() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new DeleteRole(Condition.ALLOW, Name.CREATE_CATALOG); private static final DeleteRole DENY_INSTANCE = - new DeleteRole() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new DeleteRole(Condition.DENY, Name.CREATE_CATALOG); - private DeleteRole() {} + private DeleteRole(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static DeleteRole allow() { @@ -1639,40 +1005,18 @@ public static DeleteRole allow() { public static DeleteRole deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return DELETE_ROLE; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " delete role"; - } } /** The privilege to grant a role to the user or the group. */ - public abstract static class GrantRole implements Privilege { - + public static class GrantRole extends GenericPrivilege { private static final GrantRole ALLOW_INSTANCE = - new GrantRole() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new GrantRole(Condition.ALLOW, Name.CREATE_CATALOG); private static final GrantRole DENY_INSTANCE = - new GrantRole() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new GrantRole(Condition.DENY, Name.CREATE_CATALOG); - private GrantRole() {} + private GrantRole(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static GrantRole allow() { @@ -1683,40 +1027,18 @@ public static GrantRole allow() { public static GrantRole deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return GRANT_ROLE; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " grant role"; - } } /** The privilege to revoke a role from the user or the group. */ - public abstract static class RevokeRole implements Privilege { - + public static class RevokeRole extends GenericPrivilege { private static final RevokeRole ALLOW_INSTANCE = - new RevokeRole() { - @Override - public Condition condition() { - return Condition.ALLOW; - } - }; - + new RevokeRole(Condition.ALLOW, Name.CREATE_CATALOG); private static final RevokeRole DENY_INSTANCE = - new RevokeRole() { - @Override - public Condition condition() { - return Condition.DENY; - } - }; + new RevokeRole(Condition.DENY, Name.CREATE_CATALOG); - private RevokeRole() {} + private RevokeRole(Condition condition, Name name) { + super(condition, name); + } /** @return The instance with allow condition of the privilege. */ public static RevokeRole allow() { @@ -1727,17 +1049,5 @@ public static RevokeRole allow() { public static RevokeRole deny() { return DENY_INSTANCE; } - - /** @return The generic name of the privilege. */ - @Override - public Name name() { - return REVOKE_ROLE; - } - - /** @return A readable string representation for the privilege. */ - @Override - public String simpleString() { - return condition().name() + " revoke role"; - } } } From 828658162f8229b7e72dc8c430eed679274af217 Mon Sep 17 00:00:00 2001 From: roryqi Date: Thu, 4 Jul 2024 11:44:14 +0800 Subject: [PATCH 02/12] [#4066] improvment(build): Add dependabots and protected_tags (#4067) ### What changes were proposed in this pull request? Add dependabots and protected_tags. ### Why are the changes needed? Fix: #4066 ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Verified after merging. --- .asf.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.asf.yaml b/.asf.yaml index 4dd25e0c0fa..84a019b42f6 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -48,6 +48,10 @@ github: required_pull_request_reviews: dismiss_stale_reviews: true required_approving_review_count: 1 + dependabot_alerts: true + dependabot_updates: false + protected_tags: + - "v*.*.*" notifications: commits: commits@gravitino.apache.org From 8aeb41a207ab5516cdaf524a42a1e03eeea45107 Mon Sep 17 00:00:00 2001 From: Liang Chun Date: Thu, 4 Jul 2024 12:13:53 +0800 Subject: [PATCH 03/12] [#1061] Improvement(server): Bad request may not response JSON content (#3879) ### Why are the changes needed? Add three ExceptionMapper to deal with exceptions that came from json deserialization. Fix: #1061 ### Does this PR introduce _any_ user-facing change? NO, but when a user sends a malformed json request, the response will be in json format and with the call stack shown, which can make the debug process easier. ### How was this patch tested? Via corresponding unit tests, or manually build and start the server, then send a malformed request, for example, ``` curl -X POST -H "Accept: application/vnd.gravitino.v1+json" \ -H "Content-Type: application/json" -d '{"names":"metalake","comment":"comment","properties":{}}' \ http://localhost:8090/api/metalakes ``` --- .../gravitino/server/GravitinoServer.java | 6 ++ .../mapper/JsonMappingExceptionMapper.java | 39 ++++++++++++ .../web/mapper/JsonParseExceptionMapper.java | 38 ++++++++++++ .../mapper/JsonProcessingExceptionMapper.java | 38 ++++++++++++ .../TestJsonMappingExceptionMapper.java | 45 ++++++++++++++ .../mapper/TestJsonParseExceptionMapper.java | 40 ++++++++++++ .../TestJsonProcessingExceptionMapper.java | 44 +++++++++++++ .../web/rest/TestMetalakeOperations.java | 62 ++++++++++++++++++- 8 files changed, 311 insertions(+), 1 deletion(-) create mode 100644 server/src/main/java/com/datastrato/gravitino/server/web/mapper/JsonMappingExceptionMapper.java create mode 100644 server/src/main/java/com/datastrato/gravitino/server/web/mapper/JsonParseExceptionMapper.java create mode 100644 server/src/main/java/com/datastrato/gravitino/server/web/mapper/JsonProcessingExceptionMapper.java create mode 100644 server/src/test/java/com/datastrato/gravitino/server/web/mapper/TestJsonMappingExceptionMapper.java create mode 100644 server/src/test/java/com/datastrato/gravitino/server/web/mapper/TestJsonParseExceptionMapper.java create mode 100644 server/src/test/java/com/datastrato/gravitino/server/web/mapper/TestJsonProcessingExceptionMapper.java diff --git a/server/src/main/java/com/datastrato/gravitino/server/GravitinoServer.java b/server/src/main/java/com/datastrato/gravitino/server/GravitinoServer.java index ca7b9c36ba3..e21d71680a5 100644 --- a/server/src/main/java/com/datastrato/gravitino/server/GravitinoServer.java +++ b/server/src/main/java/com/datastrato/gravitino/server/GravitinoServer.java @@ -37,6 +37,9 @@ import com.datastrato.gravitino.server.web.ObjectMapperProvider; import com.datastrato.gravitino.server.web.VersioningFilter; import com.datastrato.gravitino.server.web.filter.AccessControlNotAllowedFilter; +import com.datastrato.gravitino.server.web.mapper.JsonMappingExceptionMapper; +import com.datastrato.gravitino.server.web.mapper.JsonParseExceptionMapper; +import com.datastrato.gravitino.server.web.mapper.JsonProcessingExceptionMapper; import com.datastrato.gravitino.server.web.ui.WebUIFilter; import java.io.File; import java.util.Properties; @@ -102,6 +105,9 @@ protected void configure() { bind(gravitinoEnv.topicDispatcher()).to(TopicDispatcher.class).ranked(1); } }); + register(JsonProcessingExceptionMapper.class); + register(JsonParseExceptionMapper.class); + register(JsonMappingExceptionMapper.class); register(ObjectMapperProvider.class).register(JacksonFeature.class); if (!enableAuthorization) { diff --git a/server/src/main/java/com/datastrato/gravitino/server/web/mapper/JsonMappingExceptionMapper.java b/server/src/main/java/com/datastrato/gravitino/server/web/mapper/JsonMappingExceptionMapper.java new file mode 100644 index 00000000000..52e958e2886 --- /dev/null +++ b/server/src/main/java/com/datastrato/gravitino/server/web/mapper/JsonMappingExceptionMapper.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.datastrato.gravitino.server.web.mapper; + +import com.datastrato.gravitino.server.web.Utils; +import com.fasterxml.jackson.databind.JsonMappingException; +import javax.annotation.Priority; +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.ExceptionMapper; + +/** + * JsonMappingExceptionMapper is used for returning consistent format of response when an illegal + * Json request is sent. This class overrides the built-in JsonMappingExceptionMapper defined in + * Jersey. + */ +@Priority(1) +public class JsonMappingExceptionMapper implements ExceptionMapper { + @Override + public Response toResponse(JsonMappingException e) { + String errorMsg = "Malformed json request"; + return Utils.illegalArguments(errorMsg, e); + } +} diff --git a/server/src/main/java/com/datastrato/gravitino/server/web/mapper/JsonParseExceptionMapper.java b/server/src/main/java/com/datastrato/gravitino/server/web/mapper/JsonParseExceptionMapper.java new file mode 100644 index 00000000000..4b1f43b8415 --- /dev/null +++ b/server/src/main/java/com/datastrato/gravitino/server/web/mapper/JsonParseExceptionMapper.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.datastrato.gravitino.server.web.mapper; + +import com.datastrato.gravitino.server.web.Utils; +import com.fasterxml.jackson.core.JsonParseException; +import javax.annotation.Priority; +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.ExceptionMapper; + +/** + * JsonParseExceptionMapper is used for returning consistent format of response when an illegal Json + * request is sent. This class overrides the built-in JsonParseExceptionMapper defined in Jersey. + */ +@Priority(1) +public class JsonParseExceptionMapper implements ExceptionMapper { + @Override + public Response toResponse(JsonParseException e) { + String errorMsg = "Malformed json request"; + return Utils.illegalArguments(errorMsg, e); + } +} diff --git a/server/src/main/java/com/datastrato/gravitino/server/web/mapper/JsonProcessingExceptionMapper.java b/server/src/main/java/com/datastrato/gravitino/server/web/mapper/JsonProcessingExceptionMapper.java new file mode 100644 index 00000000000..85b61e1f434 --- /dev/null +++ b/server/src/main/java/com/datastrato/gravitino/server/web/mapper/JsonProcessingExceptionMapper.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.datastrato.gravitino.server.web.mapper; + +import com.datastrato.gravitino.server.web.Utils; +import com.fasterxml.jackson.core.JsonProcessingException; +import javax.annotation.Priority; +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.ExceptionMapper; + +/** + * JsonProcessingExceptionMapper is used for returning consistent format of response when a general + * Json issue occurs. + */ +@Priority(1) +public class JsonProcessingExceptionMapper implements ExceptionMapper { + @Override + public Response toResponse(JsonProcessingException e) { + String errorMsg = "Unexpected error occurs when json processing."; + return Utils.internalError(errorMsg, e); + } +} diff --git a/server/src/test/java/com/datastrato/gravitino/server/web/mapper/TestJsonMappingExceptionMapper.java b/server/src/test/java/com/datastrato/gravitino/server/web/mapper/TestJsonMappingExceptionMapper.java new file mode 100644 index 00000000000..4540b8c67c4 --- /dev/null +++ b/server/src/test/java/com/datastrato/gravitino/server/web/mapper/TestJsonMappingExceptionMapper.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.datastrato.gravitino.server.web.mapper; + +import com.datastrato.gravitino.dto.responses.ErrorResponse; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.JsonMappingException; +import javax.ws.rs.core.Response; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +public class TestJsonMappingExceptionMapper { + private final JsonMappingExceptionMapper jsonMappingExceptionMapper = + new JsonMappingExceptionMapper(); + + @Test + public void testJsonMappingExceptionMapper() { + JsonParser mockParser = Mockito.mock(JsonParser.class); + Response response = + jsonMappingExceptionMapper.toResponse(JsonMappingException.from(mockParser, "")); + ErrorResponse errorResponse = ErrorResponse.illegalArguments("Malformed json request"); + ErrorResponse entity = (ErrorResponse) response.getEntity(); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + Assertions.assertEquals(errorResponse.getCode(), entity.getCode()); + Assertions.assertEquals(errorResponse.getMessage(), entity.getMessage()); + Assertions.assertEquals(errorResponse.getType(), entity.getType()); + } +} diff --git a/server/src/test/java/com/datastrato/gravitino/server/web/mapper/TestJsonParseExceptionMapper.java b/server/src/test/java/com/datastrato/gravitino/server/web/mapper/TestJsonParseExceptionMapper.java new file mode 100644 index 00000000000..ed4ea3a0886 --- /dev/null +++ b/server/src/test/java/com/datastrato/gravitino/server/web/mapper/TestJsonParseExceptionMapper.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.datastrato.gravitino.server.web.mapper; + +import com.datastrato.gravitino.dto.responses.ErrorResponse; +import com.fasterxml.jackson.core.JsonParseException; +import javax.ws.rs.core.Response; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class TestJsonParseExceptionMapper { + private final JsonParseExceptionMapper jsonParseExceptionMapper = new JsonParseExceptionMapper(); + + @Test + public void testJsonParseExceptionMapper() { + Response response = jsonParseExceptionMapper.toResponse(new JsonParseException("")); + ErrorResponse errorResponse = ErrorResponse.illegalArguments("Malformed json request"); + ErrorResponse entity = (ErrorResponse) response.getEntity(); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + Assertions.assertEquals(errorResponse.getCode(), entity.getCode()); + Assertions.assertEquals(errorResponse.getMessage(), entity.getMessage()); + Assertions.assertEquals(errorResponse.getType(), entity.getType()); + } +} diff --git a/server/src/test/java/com/datastrato/gravitino/server/web/mapper/TestJsonProcessingExceptionMapper.java b/server/src/test/java/com/datastrato/gravitino/server/web/mapper/TestJsonProcessingExceptionMapper.java new file mode 100644 index 00000000000..09d991f3bb5 --- /dev/null +++ b/server/src/test/java/com/datastrato/gravitino/server/web/mapper/TestJsonProcessingExceptionMapper.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.datastrato.gravitino.server.web.mapper; + +import com.datastrato.gravitino.dto.responses.ErrorResponse; +import com.fasterxml.jackson.core.JsonProcessingException; +import javax.ws.rs.core.Response; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class TestJsonProcessingExceptionMapper { + private final JsonProcessingExceptionMapper jsonProcessingExceptionMapper = + new JsonProcessingExceptionMapper(); + + @Test + public void testJsonProcessingExceptionMapper() { + Response response = + jsonProcessingExceptionMapper.toResponse(new JsonProcessingException("") {}); + ErrorResponse errorResponse = + ErrorResponse.internalError("Unexpected error occurs when json processing."); + ErrorResponse entity = (ErrorResponse) response.getEntity(); + Assertions.assertEquals( + Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), response.getStatus()); + Assertions.assertEquals(errorResponse.getCode(), entity.getCode()); + Assertions.assertEquals(errorResponse.getMessage(), entity.getMessage()); + Assertions.assertEquals(errorResponse.getType(), entity.getType()); + } +} diff --git a/server/src/test/java/com/datastrato/gravitino/server/web/rest/TestMetalakeOperations.java b/server/src/test/java/com/datastrato/gravitino/server/web/rest/TestMetalakeOperations.java index 29db5b2efcb..f2cf473fe44 100644 --- a/server/src/test/java/com/datastrato/gravitino/server/web/rest/TestMetalakeOperations.java +++ b/server/src/test/java/com/datastrato/gravitino/server/web/rest/TestMetalakeOperations.java @@ -46,12 +46,21 @@ import com.datastrato.gravitino.metalake.MetalakeDispatcher; import com.datastrato.gravitino.metalake.MetalakeManager; import com.datastrato.gravitino.rest.RESTUtils; +import com.datastrato.gravitino.server.web.mapper.JsonMappingExceptionMapper; +import com.datastrato.gravitino.server.web.mapper.JsonParseExceptionMapper; +import com.datastrato.gravitino.server.web.mapper.JsonProcessingExceptionMapper; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonMappingException; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import java.io.IOException; import java.time.Instant; import java.util.List; import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.GET; +import javax.ws.rs.Path; import javax.ws.rs.client.Entity; import javax.ws.rs.core.Application; import javax.ws.rs.core.MediaType; @@ -107,10 +116,35 @@ protected void configure() { bindFactory(MockServletRequestFactory.class).to(HttpServletRequest.class); } }); - + resourceConfig.register(JsonProcessingExceptionMapper.class); + resourceConfig.register(JsonParseExceptionMapper.class); + resourceConfig.register(JsonMappingExceptionMapper.class); + resourceConfig.register(TestException.class); return resourceConfig; } + @Path("/test") + public static class TestException { + @GET + @Path("/jsonProcessingException") + public Response getJsonProcessingException() throws JsonProcessingException { + throw new JsonProcessingException("Error processing JSON") {}; + } + + @GET + @Path("/jsonMappingException") + public Response getJsonMappingException() throws JsonMappingException { + JsonParser mockParser = Mockito.mock(JsonParser.class); + throw JsonMappingException.from(mockParser, "Error mapping JSON"); + } + + @GET + @Path("/jsonParseException") + public Response getJsonParseException() throws JsonParseException { + throw new JsonParseException("Error parsing JSON"); + } + } + @Test public void testListMetalakes() { String metalakeName = "test"; @@ -391,4 +425,30 @@ public void testDropMetalake() { Assertions.assertTrue( errorResponse.getMessage().contains("Failed to operate object [test] operation [DROP]")); } + + @Test + public void testExceptionMapper() { + Response resp = target("/test/jsonProcessingException").request().get(); + Assertions.assertEquals( + Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), resp.getStatus()); + ErrorResponse errorResp = resp.readEntity(ErrorResponse.class); + Assertions.assertEquals(ErrorConstants.INTERNAL_ERROR_CODE, errorResp.getCode()); + Assertions.assertEquals(RuntimeException.class.getSimpleName(), errorResp.getType()); + Assertions.assertTrue( + errorResp.getMessage().contains("Unexpected error occurs when json processing.")); + + Response resp1 = target("/test/jsonMappingException").request().get(); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp1.getStatus()); + ErrorResponse errorResp1 = resp1.readEntity(ErrorResponse.class); + Assertions.assertEquals(ErrorConstants.ILLEGAL_ARGUMENTS_CODE, errorResp1.getCode()); + Assertions.assertEquals(IllegalArgumentException.class.getSimpleName(), errorResp1.getType()); + Assertions.assertTrue(errorResp1.getMessage().contains("Malformed json request")); + + Response resp2 = target("/test/jsonParseException").request().get(); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp2.getStatus()); + ErrorResponse errorResp2 = resp2.readEntity(ErrorResponse.class); + Assertions.assertEquals(ErrorConstants.ILLEGAL_ARGUMENTS_CODE, errorResp2.getCode()); + Assertions.assertEquals(IllegalArgumentException.class.getSimpleName(), errorResp2.getType()); + Assertions.assertTrue(errorResp2.getMessage().contains("Malformed json request")); + } } From fdc59f88a0a65d82e933fd315fe127e9a46a4c52 Mon Sep 17 00:00:00 2001 From: Justin Mclean Date: Thu, 4 Jul 2024 15:58:31 +1000 Subject: [PATCH 04/12] [#4064] Update to use correct ASF names (#4068) ### What changes were proposed in this pull request? Changed ASF project names in the first instance and in titles to be in the correct form "Apache Foo". This include changed to documentation and code comments. Fixed case of projects where needed e.g trino -> Trino, gravitino -> Gravitino, iceberg -> Iceberg etc etc ### Why are the changes needed? To use the correct name for ASF projects. Fix: #4064 ### Does this PR introduce _any_ user-facing change? None. ### How was this patch tested? Built and non-integration test pass locally. --- CONTRIBUTING.md | 4 +- ROADMAP.md | 6 +-- .../com/datastrato/gravitino/Catalog.java | 2 +- .../datastrato/gravitino/MetadataObject.java | 2 +- .../com/datastrato/gravitino/Metalake.java | 2 +- .../authorization/SecurableObject.java | 2 +- .../exceptions/GravitinoRuntimeException.java | 2 +- .../datastrato/gravitino/file/Fileset.java | 8 ++-- .../datastrato/gravitino/messaging/Topic.java | 4 +- .../distributions/Distributions.java | 2 +- .../rel/expressions/literals/Literals.java | 2 +- .../rel/expressions/sorts/SortOrders.java | 2 +- .../expressions/transforms/Transforms.java | 2 +- .../gravitino/rel/indexes/Indexes.java | 2 +- .../gravitino/rel/types/Decimal.java | 2 +- .../datastrato/gravitino/rel/types/Type.java | 2 +- .../catalog/common/ClassProvider.java | 2 +- .../catalog/property/PropertyConverter.java | 4 +- .../gravitino/catalog/hive/HiveCatalog.java | 2 +- .../catalog/hive/HiveCatalogCapability.java | 4 +- .../catalog/hive/HiveCatalogOperations.java | 6 +-- .../catalog/hive/HiveClientPool.java | 2 +- .../gravitino/catalog/hive/HiveColumn.java | 2 +- .../gravitino/catalog/hive/HiveSchema.java | 2 +- .../gravitino/catalog/hive/HiveTable.java | 2 +- .../hive/integration/test/CatalogHiveIT.java | 46 ++++++++++--------- .../integration/test/ProxyCatalogHiveIT.java | 2 +- .../hive/miniHMS/MiniHiveMetastore.java | 6 +-- .../catalog/jdbc/JdbcCatalogOperations.java | 8 ++-- .../gravitino/catalog/doris/DorisCatalog.java | 2 +- .../converter/DorisExceptionConverter.java | 2 +- .../doris/converter/DorisTypeConverter.java | 2 +- .../operation/DorisDatabaseOperations.java | 2 +- .../doris/operation/DorisTableOperations.java | 4 +- .../integration/test/CatalogDorisIT.java | 2 +- .../gravitino/catalog/mysql/MysqlCatalog.java | 2 +- .../converter/MysqlExceptionConverter.java | 2 +- .../mysql/operation/MysqlTableOperations.java | 2 +- .../integration/test/CatalogMysqlIT.java | 2 +- .../operation/PostgreSqlTableOperations.java | 4 +- .../gravitino/catalog/kafka/KafkaCatalog.java | 4 +- .../catalog/kafka/KafkaCatalogOperations.java | 2 +- .../lakehouse/iceberg/IcebergCatalog.java | 2 +- .../iceberg/IcebergCatalogOperations.java | 2 +- .../lakehouse/iceberg/IcebergColumn.java | 2 +- .../iceberg/IcebergHiveCachedClientPool.java | 10 ++-- .../lakehouse/iceberg/IcebergSchema.java | 2 +- .../lakehouse/iceberg/IcebergTable.java | 4 +- .../IcebergTablePropertiesMetadata.java | 2 +- .../kerberos/HiveBackendProxy.java | 2 +- .../iceberg/converter/ConvertUtil.java | 8 ++-- .../DescribeIcebergSortOrderVisitor.java | 2 +- .../converter/FromIcebergPartitionSpec.java | 2 +- .../converter/FromIcebergSortOrder.java | 2 +- .../iceberg/converter/FromIcebergType.java | 2 +- .../converter/ToIcebergPartitionSpec.java | 8 ++-- .../iceberg/converter/ToIcebergSortOrder.java | 2 +- .../iceberg/converter/ToIcebergType.java | 2 +- .../converter/ToIcebergTypeVisitor.java | 4 +- .../iceberg/ops/IcebergTableOpsHelper.java | 2 +- .../web/metrics/IcebergMetricsStore.java | 2 +- .../test/IcebergRESTServiceIT.java | 2 +- .../paimon/GravitinoPaimonColumn.java | 2 +- .../paimon/GravitinoPaimonTable.java | 5 +- .../lakehouse/paimon/PaimonCatalog.java | 4 +- .../paimon/PaimonCatalogBackend.java | 2 +- .../paimon/PaimonCatalogOperations.java | 2 +- .../lakehouse/paimon/PaimonSchema.java | 4 +- .../PaimonSchemaPropertiesMetadata.java | 3 +- .../paimon/PaimonTablePropertiesMetadata.java | 3 +- .../paimon/ops/PaimonCatalogOps.java | 2 +- .../client/GravitinoAdminClient.java | 4 +- .../gravitino/client/GravitinoClient.java | 4 +- .../gravitino/client/GravitinoMetalake.java | 6 +-- .../gravitino/client/GravitinoVersion.java | 2 +- .../hadoop/GravitinoVirtualFileSystem.java | 4 +- .../java/com/datastrato/gravitino/Entity.java | 2 +- .../EntityAlreadyExistsException.java | 2 +- .../gravitino/EntitySerDeFactory.java | 2 +- .../gravitino/EntityStoreFactory.java | 2 +- .../java/com/datastrato/gravitino/Field.java | 2 +- .../datastrato/gravitino/GravitinoEnv.java | 2 +- .../authorization/AccessControlManager.java | 2 +- .../connector/DataTypeConverter.java | 6 +-- .../gravitino/meta/SchemaEntity.java | 2 +- .../gravitino/meta/TableEntity.java | 2 +- .../gravitino/meta/TopicEntity.java | 2 +- .../datastrato/gravitino/meta/UserEntity.java | 2 +- .../gravitino/metalake/MetalakeManager.java | 2 +- .../converters/H2ExceptionConverter.java | 4 +- .../converters/MySQLExceptionConverter.java | 4 +- .../converters/SQLExceptionConverter.java | 2 +- dev/docker/tools/README.md | 2 +- docs/apache-hive-catalog.md | 2 +- docs/docker-image-details.md | 16 +++---- docs/expression.md | 4 +- docs/getting-started.md | 14 +++--- docs/glossary.md | 14 +++--- docs/gravitino-server-config.md | 10 ++-- docs/hadoop-catalog.md | 2 +- docs/how-to-build.md | 8 ++-- docs/how-to-install.md | 24 +++++----- docs/how-to-sign-releases.md | 2 +- docs/how-to-test.md | 10 ++-- docs/how-to-use-gvfs.md | 6 +-- docs/how-to-use-python-client.md | 10 ++-- docs/how-to-use-relational-backend-storage.md | 4 +- docs/how-to-use-the-playground.md | 8 ++-- docs/iceberg-rest-service.md | 14 +++--- docs/index.md | 10 ++-- docs/jdbc-doris-catalog.md | 2 +- docs/jdbc-mysql-catalog.md | 2 +- docs/jdbc-postgresql-catalog.md | 2 +- docs/lakehouse-iceberg-catalog.md | 2 +- ...manage-fileset-metadata-using-gravitino.md | 2 +- ...nage-messaging-metadata-using-gravitino.md | 4 +- docs/manage-metalake-using-gravitino.md | 4 +- ...age-relational-metadata-using-gravitino.md | 6 +-- .../manage-table-partition-using-gravitino.md | 4 +- docs/metrics.md | 4 +- docs/overview.md | 4 +- docs/publish-docker-images.md | 4 +- docs/security.md | 8 ++-- docs/spark-connector/spark-catalog-hive.md | 2 +- docs/spark-connector/spark-catalog-iceberg.md | 4 +- docs/spark-connector/spark-connector.md | 4 +- docs/trino-connector/catalog-hive.md | 4 +- docs/trino-connector/catalog-iceberg.md | 4 +- docs/trino-connector/catalog-mysql.md | 4 +- docs/trino-connector/catalog-postgresql.md | 4 +- docs/trino-connector/configuration.md | 2 +- docs/trino-connector/development.md | 4 +- docs/trino-connector/index.md | 4 +- docs/trino-connector/installation.md | 12 ++--- docs/trino-connector/requirements.md | 4 +- docs/trino-connector/sql-support.md | 4 +- docs/trino-connector/supported-catalog.md | 6 +-- docs/trino-connector/trino-connector.md | 4 +- docs/webui.md | 4 +- .../flink/connector/PropertiesConverter.java | 2 +- .../catalog/GravitinoCatalogManager.java | 2 +- .../store/GravitinoCatalogStore.java | 2 +- .../test/hive/FlinkHiveCatalogIT.java | 2 +- .../integration/test/MiniGravitino.java | 6 +-- .../test/container/HiveContainer.java | 4 +- .../test/trino/TrinoConnectorIT.java | 14 +++--- .../test/trino/TrinoQueryITBase.java | 4 +- rfc/rfc-1/rfc-1.md | 2 +- rfc/rfc-3/Transaction-implementation-on-kv.md | 6 +-- .../filter/AccessControlNotAllowedFilter.java | 2 +- .../gravitino/server/web/ui/WebUIFilter.java | 2 +- .../spark/connector/PropertiesConverter.java | 2 +- .../spark/connector/SparkTypeConverter.java | 2 +- .../spark/connector/catalog/BaseCatalog.java | 2 +- .../catalog/GravitinoCatalogManager.java | 2 +- .../hive/HivePropertiesConverter.java | 6 +-- .../iceberg/GravitinoIcebergCatalog.java | 6 +-- .../iceberg/IcebergPropertiesConverter.java | 2 +- .../connector/iceberg/SparkIcebergTable.java | 2 +- .../plugin/GravitinoDriverPlugin.java | 4 +- .../plugin/GravitinoSparkPlugin.java | 2 +- .../utils/GravitinoTableInfoHelper.java | 2 +- .../integration/test/SparkCommonIT.java | 2 +- .../integration/test/SparkEnvIT.java | 2 +- .../SparkIcebergCatalogHiveBackendIT.java | 2 +- .../SparkIcebergCatalogRestBackendIT.java | 4 +- .../trino/connector/GravitinoConfig.java | 2 +- .../trino/connector/GravitinoConnector.java | 4 +- .../connector/GravitinoConnectorFactory.java | 6 +-- .../GravitinoDataSourceProvider.java | 2 +- .../trino/connector/GravitinoMetadata.java | 6 +-- .../connector/GravitinoPageSinkProvider.java | 2 +- .../connector/GravitinoRecordSetProvider.java | 2 +- .../trino/connector/GravitinoSplit.java | 4 +- .../trino/connector/GravitinoSplitSource.java | 4 +- .../connector/GravitinoTransactionHandle.java | 4 +- .../catalog/CatalogConnectorContext.java | 8 ++-- .../catalog/CatalogConnectorManager.java | 2 +- .../catalog/CatalogConnectorMetadata.java | 2 +- .../CatalogConnectorMetadataAdapter.java | 26 +++++------ .../connector/catalog/CatalogRegister.java | 6 +-- .../hive/HiveCatalogPropertyConverter.java | 2 +- .../catalog/hive/HiveConnectorAdapter.java | 4 +- .../catalog/hive/HiveDataTypeTransformer.java | 2 +- .../catalog/hive/HiveMetadataAdapter.java | 2 +- .../catalog/hive/HivePropertyMeta.java | 4 +- .../iceberg/IcebergConnectorAdapter.java | 5 +- .../iceberg/IcebergDataTypeTransformer.java | 2 +- .../iceberg/IcebergMetadataAdapter.java | 2 +- .../jdbc/mysql/MySQLConnectorAdapter.java | 2 +- .../jdbc/mysql/MySQLMetadataAdapter.java | 4 +- .../PostgreSQLConnectorAdapter.java | 4 +- .../postgresql/PostgreSQLMetadataAdapter.java | 2 +- .../memory/MemoryConnectorAdapter.java | 4 +- .../connector/metadata/GravitinoCatalog.java | 2 +- .../connector/metadata/GravitinoSchema.java | 2 +- .../connector/metadata/GravitinoTable.java | 2 +- .../system/GravitinoSystemConnector.java | 6 +-- .../GravitinoSystemConnectorMetadata.java | 2 +- .../GravitinoStoredProcedureFactory.java | 2 +- .../util/GeneralDataTypeTransformer.java | 2 +- web/README.md | 4 +- 202 files changed, 423 insertions(+), 403 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index be61fb11846..028b782bec9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,9 +17,9 @@ under the License. --> -# Contributing to Gravitino +# Contributing to Apache Gravitino -Thank you for your interest in contributing to Gravitino! You are welcome to contribute in any way you can to enhance the project. Gravitino appreciates your assistance in making it better, whether through code contributions, documentation, tests, best practices, graphic design, or any other means that have a positive impact. +Thank you for your interest in contributing to Apache Gravitino! You are welcome to contribute in any way you can to enhance the project. Gravitino appreciates your assistance in making it better, whether through code contributions, documentation, tests, best practices, graphic design, or any other means that have a positive impact. Before you get started, please read and follow these guidelines to ensure a smooth and productive collaboration. diff --git a/ROADMAP.md b/ROADMAP.md index a29fa84178f..f2bb88207d4 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,8 +1,8 @@ -# Gravitino Roadmap +# Apache Gravitino Roadmap ## 2024 Roadmap -As of March 2024, this is the current roadmap for the Gravitino project. Please note that Gravitino is an open-source project and relies on the collective efforts of both community contributors and paid developers to shape its future features and direction. While we strive to keep this roadmap up-to-date, it is best seen as a general guide for future developments rather than an exhaustive list of planned features. The Gravitino community may decide to alter the project's direction or prioritize other features that are not listed here. +As of March 2024, this is the current roadmap for the Apache Gravitino project. Please note that Gravitino is an open-source project and relies on the collective efforts of both community contributors and paid developers to shape its future features and direction. While we strive to keep this roadmap up-to-date, it is best seen as a general guide for future developments rather than an exhaustive list of planned features. The Gravitino community may decide to alter the project's direction or prioritize other features that are not listed here. ## First half of 2024 (January-June) - Make Gravitino ready for production environment @@ -20,7 +20,7 @@ As of March 2024, this is the current roadmap for the Gravitino project. Please - Implement support for a basic data compliance framework. - Continue to encourage and support community work. -## Second half of 2024 (July-December) - Improve Gravitino features +## Second half of 2024 (July-December) - Improve Apache Gravitino features ### Q3 (July-September) diff --git a/api/src/main/java/com/datastrato/gravitino/Catalog.java b/api/src/main/java/com/datastrato/gravitino/Catalog.java index 8c2d47efec0..2f75cab38ce 100644 --- a/api/src/main/java/com/datastrato/gravitino/Catalog.java +++ b/api/src/main/java/com/datastrato/gravitino/Catalog.java @@ -25,7 +25,7 @@ import java.util.Map; /** - * The interface of a catalog. The catalog is the second level entity in the gravitino system, + * The interface of a catalog. The catalog is the second level entity in the Gravitino system, * containing a set of tables. The server side should use the other one with the same name in the * core module. */ diff --git a/api/src/main/java/com/datastrato/gravitino/MetadataObject.java b/api/src/main/java/com/datastrato/gravitino/MetadataObject.java index f9011d50cb7..3d250b83846 100644 --- a/api/src/main/java/com/datastrato/gravitino/MetadataObject.java +++ b/api/src/main/java/com/datastrato/gravitino/MetadataObject.java @@ -24,7 +24,7 @@ /** * The MetadataObject is the basic unit of the Gravitino system. It represents the metadata object - * in the Gravitino system. The object can be a metalake, catalog, schema, table, topic, etc. + * in the Apache Gravitino system. The object can be a metalake, catalog, schema, table, topic, etc. */ @Unstable public interface MetadataObject { diff --git a/api/src/main/java/com/datastrato/gravitino/Metalake.java b/api/src/main/java/com/datastrato/gravitino/Metalake.java index a7059761718..60ae2ed5d40 100644 --- a/api/src/main/java/com/datastrato/gravitino/Metalake.java +++ b/api/src/main/java/com/datastrato/gravitino/Metalake.java @@ -22,7 +22,7 @@ import java.util.Map; /** - * The interface of a metalake. The metalake is the top level entity in the gravitino system, + * The interface of a metalake. The metalake is the top level entity in the Apache Gravitino system, * containing a set of catalogs. */ @Evolving diff --git a/api/src/main/java/com/datastrato/gravitino/authorization/SecurableObject.java b/api/src/main/java/com/datastrato/gravitino/authorization/SecurableObject.java index 882faed29f6..3f27c6ff902 100644 --- a/api/src/main/java/com/datastrato/gravitino/authorization/SecurableObject.java +++ b/api/src/main/java/com/datastrato/gravitino/authorization/SecurableObject.java @@ -24,7 +24,7 @@ /** * The securable object is the entity which access can be granted. Unless allowed by a grant, access - * is denied. Gravitino organizes the securable objects using tree structure.
+ * is denied. Apache Gravitino organizes the securable objects using tree structure.
* There are three fields in the securable object: parent, name, and type.
* The types include 6 kinds: CATALOG,SCHEMA,TABLE,FILESET,TOPIC and METALAKE.
* You can use the helper class `SecurableObjects` to create the securable object which you need. diff --git a/api/src/main/java/com/datastrato/gravitino/exceptions/GravitinoRuntimeException.java b/api/src/main/java/com/datastrato/gravitino/exceptions/GravitinoRuntimeException.java index 3c2ad104442..540592a1553 100644 --- a/api/src/main/java/com/datastrato/gravitino/exceptions/GravitinoRuntimeException.java +++ b/api/src/main/java/com/datastrato/gravitino/exceptions/GravitinoRuntimeException.java @@ -21,7 +21,7 @@ import com.google.errorprone.annotations.FormatMethod; import com.google.errorprone.annotations.FormatString; -/** Base class for all Gravitino runtime exceptions. */ +/** Base class for all Apache Gravitino runtime exceptions. */ public class GravitinoRuntimeException extends RuntimeException { /** diff --git a/api/src/main/java/com/datastrato/gravitino/file/Fileset.java b/api/src/main/java/com/datastrato/gravitino/file/Fileset.java index 57e7aa1cf13..40bef34ea03 100644 --- a/api/src/main/java/com/datastrato/gravitino/file/Fileset.java +++ b/api/src/main/java/com/datastrato/gravitino/file/Fileset.java @@ -27,10 +27,10 @@ /** * An interface representing a fileset under a schema {@link Namespace}. A fileset is a virtual - * concept of the file or directory that is managed by Gravitino. Users can create a fileset object - * to manage the non-tabular data on the FS-like storage. The typical use case is to manage the - * training data for AI workloads. The major difference compare to the relational table is that the - * fileset is schema-free, the main property of the fileset is the storage location of the + * concept of the file or directory that is managed by Apache Gravitino. Users can create a fileset + * object to manage the non-tabular data on the FS-like storage. The typical use case is to manage + * the training data for AI workloads. The major difference compare to the relational table is that + * the fileset is schema-free, the main property of the fileset is the storage location of the * underlying data. * *

{@link Fileset} defines the basic properties of a fileset object. A catalog implementation diff --git a/api/src/main/java/com/datastrato/gravitino/messaging/Topic.java b/api/src/main/java/com/datastrato/gravitino/messaging/Topic.java index db9c6486264..f33c8988aa8 100644 --- a/api/src/main/java/com/datastrato/gravitino/messaging/Topic.java +++ b/api/src/main/java/com/datastrato/gravitino/messaging/Topic.java @@ -26,8 +26,8 @@ /** * An interface representing a topic under a schema {@link com.datastrato.gravitino.Namespace}. A - * topic is a message queue that is managed by Gravitino. Users can create/drop/alter a topic on the - * Message Queue system like Kafka, Pulsar, etc. + * topic is a message queue that is managed by Apache Gravitino. Users can create/drop/alter a topic + * on the Message Queue system like Apache Kafka, Apache Pulsar, etc. * *

{@link Topic} defines the basic properties of a topic object. A catalog implementation with * {@link TopicCatalog} should implement this interface. diff --git a/api/src/main/java/com/datastrato/gravitino/rel/expressions/distributions/Distributions.java b/api/src/main/java/com/datastrato/gravitino/rel/expressions/distributions/Distributions.java index d09b1db6774..fc74d57dacd 100644 --- a/api/src/main/java/com/datastrato/gravitino/rel/expressions/distributions/Distributions.java +++ b/api/src/main/java/com/datastrato/gravitino/rel/expressions/distributions/Distributions.java @@ -23,7 +23,7 @@ import java.util.Arrays; import java.util.Objects; -/** Helper methods to create distributions to pass into Gravitino. */ +/** Helper methods to create distributions to pass into Apache Gravitino. */ public class Distributions { /** NONE is used to indicate that there is no distribution. */ diff --git a/api/src/main/java/com/datastrato/gravitino/rel/expressions/literals/Literals.java b/api/src/main/java/com/datastrato/gravitino/rel/expressions/literals/Literals.java index 4db95c86a5f..68327567d72 100644 --- a/api/src/main/java/com/datastrato/gravitino/rel/expressions/literals/Literals.java +++ b/api/src/main/java/com/datastrato/gravitino/rel/expressions/literals/Literals.java @@ -26,7 +26,7 @@ import java.time.LocalTime; import java.util.Objects; -/** The helper class to create literals to pass into Gravitino. */ +/** The helper class to create literals to pass into Apache Gravitino. */ public class Literals { /** Used to represent a null literal. */ diff --git a/api/src/main/java/com/datastrato/gravitino/rel/expressions/sorts/SortOrders.java b/api/src/main/java/com/datastrato/gravitino/rel/expressions/sorts/SortOrders.java index e2727f7f6a4..77b457a0bd8 100644 --- a/api/src/main/java/com/datastrato/gravitino/rel/expressions/sorts/SortOrders.java +++ b/api/src/main/java/com/datastrato/gravitino/rel/expressions/sorts/SortOrders.java @@ -21,7 +21,7 @@ import com.datastrato.gravitino.rel.expressions.Expression; import java.util.Objects; -/** Helper methods to create SortOrders to pass into Gravitino. */ +/** Helper methods to create SortOrders to pass into Apache Gravitino. */ public class SortOrders { /** NONE is used to indicate that there is no sort order. */ diff --git a/api/src/main/java/com/datastrato/gravitino/rel/expressions/transforms/Transforms.java b/api/src/main/java/com/datastrato/gravitino/rel/expressions/transforms/Transforms.java index e1a821ab253..11a07589fff 100644 --- a/api/src/main/java/com/datastrato/gravitino/rel/expressions/transforms/Transforms.java +++ b/api/src/main/java/com/datastrato/gravitino/rel/expressions/transforms/Transforms.java @@ -28,7 +28,7 @@ import java.util.Arrays; import java.util.Objects; -/** Helper methods to create logical transforms to pass into Gravitino. */ +/** Helper methods to create logical transforms to pass into Apache Gravitino. */ public class Transforms { /** An empty array of transforms. */ public static final Transform[] EMPTY_TRANSFORM = new Transform[0]; diff --git a/api/src/main/java/com/datastrato/gravitino/rel/indexes/Indexes.java b/api/src/main/java/com/datastrato/gravitino/rel/indexes/Indexes.java index 6ed0a1ad20b..416e9c933fe 100644 --- a/api/src/main/java/com/datastrato/gravitino/rel/indexes/Indexes.java +++ b/api/src/main/java/com/datastrato/gravitino/rel/indexes/Indexes.java @@ -18,7 +18,7 @@ */ package com.datastrato.gravitino.rel.indexes; -/** Helper methods to create index to pass into Gravitino. */ +/** Helper methods to create index to pass into Apache Gravitino. */ public class Indexes { /** An empty array of indexes. */ diff --git a/api/src/main/java/com/datastrato/gravitino/rel/types/Decimal.java b/api/src/main/java/com/datastrato/gravitino/rel/types/Decimal.java index 702e85f4719..702f77751b8 100644 --- a/api/src/main/java/com/datastrato/gravitino/rel/types/Decimal.java +++ b/api/src/main/java/com/datastrato/gravitino/rel/types/Decimal.java @@ -26,7 +26,7 @@ import java.util.Objects; /** - * Used to represent a {@link Types.DecimalType} value in Gravitino. + * Used to represent a {@link Types.DecimalType} value in Apache Gravitino. * *

For Decimal, we expect the precision is equal to or larger than the scale, however, in {@link * BigDecimal}, the digit count starts from the leftmost nonzero digit of the exact result. For diff --git a/api/src/main/java/com/datastrato/gravitino/rel/types/Type.java b/api/src/main/java/com/datastrato/gravitino/rel/types/Type.java index f6b1145a1f2..ed49ae1845f 100644 --- a/api/src/main/java/com/datastrato/gravitino/rel/types/Type.java +++ b/api/src/main/java/com/datastrato/gravitino/rel/types/Type.java @@ -20,7 +20,7 @@ import com.datastrato.gravitino.annotation.Evolving; -/** An interface representing all data types supported by Gravitino. */ +/** An interface representing all data types supported by Apache Gravitino. */ @Evolving public interface Type { /** @return The generic name of the type. */ diff --git a/catalogs/bundled-catalog/src/main/java/com/datastrato/gravitino/catalog/common/ClassProvider.java b/catalogs/bundled-catalog/src/main/java/com/datastrato/gravitino/catalog/common/ClassProvider.java index 9cba50cba4c..a9deaff5a05 100644 --- a/catalogs/bundled-catalog/src/main/java/com/datastrato/gravitino/catalog/common/ClassProvider.java +++ b/catalogs/bundled-catalog/src/main/java/com/datastrato/gravitino/catalog/common/ClassProvider.java @@ -35,7 +35,7 @@ /** * The {@link ClassProvider} class serves as a container for the necessary classes used by the - * Gravitino query engine, with a primary focus on classes related to property metadata. + * Apache Gravitino query engine, with a primary focus on classes related to property metadata. * *

Purpose of this module and class: * diff --git a/catalogs/bundled-catalog/src/main/java/com/datastrato/gravitino/catalog/property/PropertyConverter.java b/catalogs/bundled-catalog/src/main/java/com/datastrato/gravitino/catalog/property/PropertyConverter.java index bb850ba8944..1194d1239a4 100644 --- a/catalogs/bundled-catalog/src/main/java/com/datastrato/gravitino/catalog/property/PropertyConverter.java +++ b/catalogs/bundled-catalog/src/main/java/com/datastrato/gravitino/catalog/property/PropertyConverter.java @@ -26,7 +26,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** Transforming between gravitino schema/table/column property and engine property. */ +/** Transforming between Apache Gravitino schema/table/column property and engine property. */ public abstract class PropertyConverter { protected static final String TRINO_PROPERTIES_PREFIX = "trino.bypass."; @@ -34,7 +34,7 @@ public abstract class PropertyConverter { private static final Logger LOG = LoggerFactory.getLogger(PropertyConverter.class); /** * Mapping that maps engine properties to Gravitino properties. It will return a map that holds - * the mapping between engine and gravitino properties. + * the mapping between engine and Gravitino properties. * * @return a map that holds the mapping from engine to Gravitino properties. */ diff --git a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveCatalog.java b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveCatalog.java index 3963089896e..c8f689fad77 100644 --- a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveCatalog.java +++ b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveCatalog.java @@ -26,7 +26,7 @@ import java.util.Map; import java.util.Optional; -/** Implementation of a Hive catalog in Gravitino. */ +/** Implementation of an Apache Hive catalog in Apache Gravitino. */ public class HiveCatalog extends BaseCatalog { static final HiveCatalogPropertiesMeta CATALOG_PROPERTIES_METADATA = diff --git a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveCatalogCapability.java b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveCatalogCapability.java index b42a7478a9e..c6008483b8b 100644 --- a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveCatalogCapability.java +++ b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveCatalogCapability.java @@ -24,7 +24,7 @@ public class HiveCatalogCapability implements Capability { @Override public CapabilityResult columnNotNull() { - // The NOT NULL constraint for column is supported since Hive3.0, see + // The NOT NULL constraint for column is supported since Hive 3.0, see // https://issues.apache.org/jira/browse/HIVE-16575 return CapabilityResult.unsupported( "The NOT NULL constraint for column is only supported since Hive 3.0, " @@ -33,7 +33,7 @@ public CapabilityResult columnNotNull() { @Override public CapabilityResult columnDefaultValue() { - // The DEFAULT constraint for column is supported since Hive3.0, see + // The DEFAULT constraint for column is supported since Hive 3.0, see // https://issues.apache.org/jira/browse/HIVE-18726 return CapabilityResult.unsupported( "The DEFAULT constraint for column is only supported since Hive 3.0, " diff --git a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveCatalogOperations.java b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveCatalogOperations.java index 165ab896d16..c0b8865395e 100644 --- a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveCatalogOperations.java +++ b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveCatalogOperations.java @@ -99,7 +99,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** Operations for interacting with the Hive catalog in Gravitino. */ +/** Operations for interacting with an Apache Hive catalog in Apache Gravitino. */ public class HiveCatalogOperations implements CatalogOperations, SupportsSchemas, TableCatalog { public static final Logger LOG = LoggerFactory.getLogger(HiveCatalogOperations.class); @@ -159,7 +159,7 @@ public void initialize( mergeConfig.putAll(gravitinoConfig); Configuration hadoopConf = new Configuration(); - // Set byPass first to make gravitino config overwrite it, only keys in byPassConfig + // Set byPass first to make Gravitino config overwrite it, only keys in byPassConfig // and gravitinoConfig will be passed to Hive config, and gravitinoConfig has higher priority mergeConfig.forEach(hadoopConf::set); hiveConf = new HiveConf(hadoopConf, HiveCatalogOperations.class); @@ -466,7 +466,7 @@ public HiveSchema alterSchema(NameIdentifier ident, SchemaChange... changes) } } - // alter the hive database parameters + // alter the Hive database parameters Database alteredDatabase = database.deepCopy(); alteredDatabase.setParameters(properties); diff --git a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveClientPool.java b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveClientPool.java index c5c7cae7043..a1c107fe802 100644 --- a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveClientPool.java +++ b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveClientPool.java @@ -35,7 +35,7 @@ // hive-metastore/src/main/java/org/apache/iceberg/hive/HiveClientPool.java -/** Represents a client pool for managing connections to the Hive Metastore service. */ +/** Represents a client pool for managing connections to an Apache Hive Metastore service. */ public class HiveClientPool extends ClientPoolImpl { private static final Logger LOG = LoggerFactory.getLogger(HiveClientPool.class); diff --git a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveColumn.java b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveColumn.java index 1fc1d4fea12..068a7fe721d 100644 --- a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveColumn.java +++ b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveColumn.java @@ -21,7 +21,7 @@ import com.datastrato.gravitino.connector.BaseColumn; import lombok.EqualsAndHashCode; -/** Represents a column in the Hive Metastore catalog. */ +/** Represents a column in an Apache Hive Metastore catalog. */ @EqualsAndHashCode(callSuper = true) public class HiveColumn extends BaseColumn { diff --git a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveSchema.java b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveSchema.java index 11c5c30afcd..cf79a290ecc 100644 --- a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveSchema.java +++ b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveSchema.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.PrincipalType; -/** Represents a Hive Schema (Database) entity in the Hive Metastore catalog. */ +/** Represents an Apache Hive Schema (Database) entity in the Hive Metastore catalog. */ @ToString public class HiveSchema extends BaseSchema { private Configuration conf; diff --git a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveTable.java b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveTable.java index 9b812b33a89..f7040aeb763 100644 --- a/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveTable.java +++ b/catalogs/catalog-hive/src/main/java/com/datastrato/gravitino/catalog/hive/HiveTable.java @@ -68,7 +68,7 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -/** Represents a Hive Table entity in the Hive Metastore catalog. */ +/** Represents an Apache Hive Table entity in the Hive Metastore catalog. */ @ToString public class HiveTable extends BaseTable { diff --git a/catalogs/catalog-hive/src/test/java/com/datastrato/gravitino/catalog/hive/integration/test/CatalogHiveIT.java b/catalogs/catalog-hive/src/test/java/com/datastrato/gravitino/catalog/hive/integration/test/CatalogHiveIT.java index 6e5853a27e3..fe3b374d0b4 100644 --- a/catalogs/catalog-hive/src/test/java/com/datastrato/gravitino/catalog/hive/integration/test/CatalogHiveIT.java +++ b/catalogs/catalog-hive/src/test/java/com/datastrato/gravitino/catalog/hive/integration/test/CatalogHiveIT.java @@ -176,7 +176,7 @@ public static void startup() throws Exception { HiveConf hiveConf = new HiveConf(); hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, HIVE_METASTORE_URIS); - // Check if hive client can connect to hive metastore + // Check if Hive client can connect to Hive metastore hiveClientPool = new HiveClientPool(1, hiveConf); List dbs = hiveClientPool.run(client -> client.getAllDatabases()); Assertions.assertFalse(dbs.isEmpty()); @@ -298,7 +298,7 @@ private static void createSchema() throws TException, InterruptedException { Assertions.assertEquals("val2", loadSchema.properties().get("key2")); Assertions.assertNotNull(loadSchema.properties().get(HiveSchemaPropertiesMetadata.LOCATION)); - // Directly get database from hive metastore to verify the schema creation + // Directly get database from Hive metastore to verify the schema creation Database database = hiveClientPool.run(client -> client.getDatabase(schemaName)); Assertions.assertEquals(schemaName.toLowerCase(), database.getName()); Assertions.assertEquals(comment, database.getDescription()); @@ -387,7 +387,7 @@ public void testCreateHiveTableWithDistributionAndSortOrder() distribution, sortOrders); - // Directly get table from hive metastore to check if the table is created successfully. + // Directly get table from Hive metastore to check if the table is created successfully. org.apache.hadoop.hive.metastore.api.Table hiveTab = hiveClientPool.run(client -> client.getTable(schemaName, tableName)); properties @@ -404,7 +404,7 @@ public void testCreateHiveTableWithDistributionAndSortOrder() .asTableCatalog() .createTable(nameIdentifier, columns, TABLE_COMMENT, properties, (Transform[]) null); - // Directly get table from hive metastore to check if the table is created successfully. + // Directly get table from Hive metastore to check if the table is created successfully. org.apache.hadoop.hive.metastore.api.Table hiveTable1 = hiveClientPool.run(client -> client.getTable(schemaName, tableName)); properties @@ -468,7 +468,7 @@ public void testCreateHiveTable() throws TException, InterruptedException { .createTable( nameIdentifier, columns, TABLE_COMMENT, properties, Transforms.EMPTY_TRANSFORM); - // Directly get table from hive metastore to check if the table is created successfully. + // Directly get table from Hive metastore to check if the table is created successfully. org.apache.hadoop.hive.metastore.api.Table hiveTab = hiveClientPool.run(client -> client.getTable(schemaName, tableName)); properties @@ -496,7 +496,7 @@ public void testCreateHiveTable() throws TException, InterruptedException { .asTableCatalog() .createTable(nameIdentifier, columns, TABLE_COMMENT, properties, (Transform[]) null); - // Directly get table from hive metastore to check if the table is created successfully. + // Directly get table from Hive metastore to check if the table is created successfully. org.apache.hadoop.hive.metastore.api.Table hiveTable1 = hiveClientPool.run(client -> client.getTable(schemaName, tableName)); properties @@ -684,7 +684,7 @@ public void testCreatePartitionedHiveTable() throws TException, InterruptedExcep Transforms.identity(columns[1].name()), Transforms.identity(columns[2].name()) }); - // Directly get table from hive metastore to check if the table is created successfully. + // Directly get table from Hive metastore to check if the table is created successfully. org.apache.hadoop.hive.metastore.api.Table hiveTab = hiveClientPool.run(client -> client.getTable(schemaName, tableName)); properties @@ -805,7 +805,7 @@ public void testGetPartition() throws TException, InterruptedException { Assertions.assertEquals( "hive_col_name2=2023-01-01/hive_col_name3=gravitino_it_test", partition.name()); - // Directly get partition from hive metastore + // Directly get partition from Hive metastore org.apache.hadoop.hive.metastore.api.Partition hivePartition = hiveClientPool.run( client -> client.getPartition(schemaName, createdTable.name(), partition.name())); @@ -915,7 +915,7 @@ public void testDropPartition() throws TException, InterruptedException, IOExcep IdentityPartition partitionAdded1 = (IdentityPartition) createdTable.supportPartitions().addPartition(identity1); - // Directly get partition from hive metastore to check if the partition is created successfully. + // Directly get partition from Hive metastore to check if the partition is created successfully. org.apache.hadoop.hive.metastore.api.Partition partitionGot1 = hiveClientPool.run( client -> client.getPartition(schemaName, createdTable.name(), partitionAdded1.name())); @@ -934,7 +934,7 @@ public void testDropPartition() throws TException, InterruptedException, IOExcep Partitions.identity(new String[][] {field5, field6}, new Literal[] {literal5, literal6}); IdentityPartition partitionAdded2 = (IdentityPartition) createdTable.supportPartitions().addPartition(identity2); - // Directly get partition from hive metastore to check if the partition is created successfully. + // Directly get partition from Hive metastore to check if the partition is created successfully. org.apache.hadoop.hive.metastore.api.Partition partitionGot2 = hiveClientPool.run( client -> client.getPartition(schemaName, createdTable.name(), partitionAdded2.name())); @@ -1103,7 +1103,8 @@ public void testAlterHiveTable() throws TException, InterruptedException { new String[] {HIVE_COL_NAME1}, Types.IntegerType.get())); Assertions.assertEquals(AuthConstants.ANONYMOUS_USER, alteredTable.auditInfo().creator()); Assertions.assertEquals(AuthConstants.ANONYMOUS_USER, alteredTable.auditInfo().lastModifier()); - // Direct get table from hive metastore to check if the table is altered successfully. + + // Direct get table from Hive metastore to check if the table is altered successfully. org.apache.hadoop.hive.metastore.api.Table hiveTab = hiveClientPool.run(client -> client.getTable(schemaName, ALTER_TABLE_NAME)); Assertions.assertEquals(schemaName.toLowerCase(), hiveTab.getDbName()); @@ -1252,7 +1253,7 @@ public void testDropHiveTable() { Transforms.EMPTY_TRANSFORM); catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, ALTER_TABLE_NAME)); - // Directly get table from hive metastore to check if the table is dropped successfully. + // Directly get table from Hive metastore to check if the table is dropped successfully. assertThrows( NoSuchObjectException.class, () -> hiveClientPool.run(client -> client.getTable(schemaName, ALTER_TABLE_NAME))); @@ -1463,7 +1464,8 @@ public void testDropHiveManagedTable() throws TException, InterruptedException, TABLE_COMMENT, createProperties(), new Transform[] {Transforms.identity(columns[2].name())}); - // Directly get table from hive metastore to check if the table is created successfully. + + // Directly get table from Hive metastore to check if the table is created successfully. org.apache.hadoop.hive.metastore.api.Table hiveTab = hiveClientPool.run(client -> client.getTable(schemaName, tableName)); checkTableReadWrite(hiveTab); @@ -1471,7 +1473,7 @@ public void testDropHiveManagedTable() throws TException, InterruptedException, Path tableDirectory = new Path(hiveTab.getSd().getLocation()); catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); Boolean existed = hiveClientPool.run(client -> client.tableExists(schemaName, tableName)); - Assertions.assertFalse(existed, "The hive table should not exist"); + Assertions.assertFalse(existed, "The Hive table should not exist"); Assertions.assertFalse(hdfs.exists(tableDirectory), "The table directory should not exist"); } @@ -1486,7 +1488,7 @@ public void testDropHiveExternalTable() throws TException, InterruptedException, TABLE_COMMENT, ImmutableMap.of(TABLE_TYPE, EXTERNAL_TABLE.name().toLowerCase(Locale.ROOT)), new Transform[] {Transforms.identity(columns[2].name())}); - // Directly get table from hive metastore to check if the table is created successfully. + // Directly get table from Hive metastore to check if the table is created successfully. org.apache.hadoop.hive.metastore.api.Table hiveTab = hiveClientPool.run(client -> client.getTable(schemaName, tableName)); checkTableReadWrite(hiveTab); @@ -1511,14 +1513,15 @@ public void testPurgeHiveManagedTable() throws TException, InterruptedException, TABLE_COMMENT, createProperties(), new Transform[] {Transforms.identity(columns[2].name())}); - // Directly get table from hive metastore to check if the table is created successfully. + + // Directly get table from Hive metastore to check if the table is created successfully. org.apache.hadoop.hive.metastore.api.Table hiveTab = hiveClientPool.run(client -> client.getTable(schemaName, tableName)); checkTableReadWrite(hiveTab); Assertions.assertEquals(MANAGED_TABLE.name(), hiveTab.getTableType()); catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, tableName)); Boolean existed = hiveClientPool.run(client -> client.tableExists(schemaName, tableName)); - Assertions.assertFalse(existed, "The hive table should not exist"); + Assertions.assertFalse(existed, "The Hive table should not exist"); Path tableDirectory = new Path(hiveTab.getSd().getLocation()); Assertions.assertFalse(hdfs.exists(tableDirectory), "The table directory should not exist"); Path trashDirectory = hdfs.getTrashRoot(tableDirectory); @@ -1536,7 +1539,8 @@ public void testPurgeHiveExternalTable() throws TException, InterruptedException TABLE_COMMENT, ImmutableMap.of(TABLE_TYPE, EXTERNAL_TABLE.name().toLowerCase(Locale.ROOT)), new Transform[] {Transforms.identity(columns[2].name())}); - // Directly get table from hive metastore to check if the table is created successfully. + + // Directly get table from Hive metastore to check if the table is created successfully. org.apache.hadoop.hive.metastore.api.Table hiveTab = hiveClientPool.run(client -> client.getTable(schemaName, tableName)); checkTableReadWrite(hiveTab); @@ -1548,7 +1552,7 @@ public void testPurgeHiveExternalTable() throws TException, InterruptedException () -> { tableCatalog.purgeTable(id); }, - "Can't purge a external hive table"); + "Can't purge a external Hive table"); Boolean existed = hiveClientPool.run(client -> client.tableExists(schemaName, tableName)); Assertions.assertTrue(existed, "The table should be still exist"); @@ -1569,7 +1573,7 @@ public void testRemoveNonExistTable() throws TException, InterruptedException { ImmutableMap.of(TABLE_TYPE, EXTERNAL_TABLE.name().toLowerCase(Locale.ROOT)), new Transform[] {Transforms.identity(columns[2].name())}); - // Directly drop table from hive metastore. + // Directly drop table from Hive metastore. hiveClientPool.run( client -> { client.dropTable(schemaName, tableName, true, false, false); @@ -1598,7 +1602,7 @@ public void testPurgeNonExistTable() throws TException, InterruptedException { ImmutableMap.of(TABLE_TYPE, EXTERNAL_TABLE.name().toLowerCase(Locale.ROOT)), new Transform[] {Transforms.identity(columns[2].name())}); - // Directly drop table from hive metastore. + // Directly drop table from Hive metastore. hiveClientPool.run( client -> { client.dropTable(schemaName, tableName, true, false, true); diff --git a/catalogs/catalog-hive/src/test/java/com/datastrato/gravitino/catalog/hive/integration/test/ProxyCatalogHiveIT.java b/catalogs/catalog-hive/src/test/java/com/datastrato/gravitino/catalog/hive/integration/test/ProxyCatalogHiveIT.java index ee9305065ea..9212ea3c813 100644 --- a/catalogs/catalog-hive/src/test/java/com/datastrato/gravitino/catalog/hive/integration/test/ProxyCatalogHiveIT.java +++ b/catalogs/catalog-hive/src/test/java/com/datastrato/gravitino/catalog/hive/integration/test/ProxyCatalogHiveIT.java @@ -106,7 +106,7 @@ public static void startIntegrationTest() throws Exception { HiveConf hiveConf = new HiveConf(); hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, HIVE_METASTORE_URIS); - // Check if hive client can connect to hive metastore + // Check if Hive client can connect to Hive metastore hiveClientPool = new HiveClientPool(1, hiveConf); Configuration conf = new Configuration(); diff --git a/catalogs/catalog-hive/src/test/java/com/datastrato/gravitino/catalog/hive/miniHMS/MiniHiveMetastore.java b/catalogs/catalog-hive/src/test/java/com/datastrato/gravitino/catalog/hive/miniHMS/MiniHiveMetastore.java index 52a824ad2c9..0f167757c0b 100644 --- a/catalogs/catalog-hive/src/test/java/com/datastrato/gravitino/catalog/hive/miniHMS/MiniHiveMetastore.java +++ b/catalogs/catalog-hive/src/test/java/com/datastrato/gravitino/catalog/hive/miniHMS/MiniHiveMetastore.java @@ -119,7 +119,7 @@ public class MiniHiveMetastore { } })); } catch (Exception e) { - throw new RuntimeException("Failed to setup local dir for hive metastore", e); + throw new RuntimeException("Failed to setup local dir for Hive metastore", e); } } @@ -160,7 +160,7 @@ public void start() { * Starts a TestHiveMetastore with the default connection pool size (5) with the provided Hive * configuration. * - * @param conf The hive configuration to use. + * @param conf The Hive configuration to use. */ public void start(HiveConf conf) { start(conf, DEFAULT_POOL_SIZE); @@ -169,7 +169,7 @@ public void start(HiveConf conf) { /** * Starts a TestHiveMetastore with a provided connection pool size and Hive configuration. * - * @param conf The hive configuration to use. + * @param conf The Hive configuration to use. * @param poolSize The number of threads in the executor pool. */ public void start(HiveConf conf, int poolSize) { diff --git a/catalogs/catalog-jdbc-common/src/main/java/com/datastrato/gravitino/catalog/jdbc/JdbcCatalogOperations.java b/catalogs/catalog-jdbc-common/src/main/java/com/datastrato/gravitino/catalog/jdbc/JdbcCatalogOperations.java index 14ab7369192..135952c26df 100644 --- a/catalogs/catalog-jdbc-common/src/main/java/com/datastrato/gravitino/catalog/jdbc/JdbcCatalogOperations.java +++ b/catalogs/catalog-jdbc-common/src/main/java/com/datastrato/gravitino/catalog/jdbc/JdbcCatalogOperations.java @@ -73,11 +73,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** Operations for interacting with the Jdbc catalog in Gravitino. */ +/** Operations for interacting with the Jdbc catalog in Apache Gravitino. */ public class JdbcCatalogOperations implements CatalogOperations, SupportsSchemas, TableCatalog { private static final String GRAVITINO_ATTRIBUTE_DOES_NOT_EXIST_MSG = - "The gravitino id attribute does not exist in properties"; + "The Gravitino id attribute does not exist in properties"; public static final Logger LOG = LoggerFactory.getLogger(JdbcCatalogOperations.class); @@ -223,7 +223,7 @@ public JdbcSchema loadSchema(NameIdentifier ident) throws NoSuchSchemaException String comment = load.comment(); StringIdentifier id = StringIdentifier.fromComment(comment); if (id == null) { - LOG.warn("The comment {} does not contain gravitino id attribute", comment); + LOG.warn("The comment {} does not contain Gravitino id attribute", comment); return load; } Map properties = @@ -298,7 +298,7 @@ public Table loadTable(NameIdentifier tableIdent) throws NoSuchTableException { StringIdentifier id = StringIdentifier.fromComment(comment); if (id == null) { LOG.warn( - "The table {} comment {} does not contain gravitino id attribute", tableName, comment); + "The table {} comment {} does not contain Gravitino id attribute", tableName, comment); } else { properties = StringIdentifier.newPropertiesWithId(id, properties); // Remove id from comment diff --git a/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/DorisCatalog.java b/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/DorisCatalog.java index 68a9e5826ad..22532b05683 100644 --- a/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/DorisCatalog.java +++ b/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/DorisCatalog.java @@ -34,7 +34,7 @@ import com.datastrato.gravitino.connector.capability.Capability; import java.util.Map; -/** Implementation of a Doris catalog in Gravitino. */ +/** Implementation of an Apache Doris catalog in Apache Gravitino. */ public class DorisCatalog extends JdbcCatalog { @Override diff --git a/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/converter/DorisExceptionConverter.java b/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/converter/DorisExceptionConverter.java index de47c940025..856015c4fb5 100644 --- a/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/converter/DorisExceptionConverter.java +++ b/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/converter/DorisExceptionConverter.java @@ -30,7 +30,7 @@ import java.sql.SQLException; import java.util.regex.Pattern; -/** Exception converter to Gravitino exception for Doris. */ +/** Exception converter to Apache Gravitino exception for Apache Doris. */ public class DorisExceptionConverter extends JdbcExceptionConverter { // see: https://doris.apache.org/docs/admin-manual/maint-monitor/doris-error-code/ diff --git a/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/converter/DorisTypeConverter.java b/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/converter/DorisTypeConverter.java index e2524982626..9b7386b22ee 100644 --- a/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/converter/DorisTypeConverter.java +++ b/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/converter/DorisTypeConverter.java @@ -22,7 +22,7 @@ import com.datastrato.gravitino.rel.types.Type; import com.datastrato.gravitino.rel.types.Types; -/** Type converter for Doris. */ +/** Type converter for Apache Doris. */ public class DorisTypeConverter extends JdbcTypeConverter { static final String BOOLEAN = "boolean"; static final String TINYINT = "tinyint"; diff --git a/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/operation/DorisDatabaseOperations.java b/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/operation/DorisDatabaseOperations.java index 8bb787d6685..c9e9c3962b8 100644 --- a/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/operation/DorisDatabaseOperations.java +++ b/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/operation/DorisDatabaseOperations.java @@ -36,7 +36,7 @@ import java.util.Set; import org.apache.commons.lang3.StringUtils; -/** Database operations for Doris. */ +/** Database operations for Apache Doris. */ public class DorisDatabaseOperations extends JdbcDatabaseOperations { public static final String COMMENT_KEY = "comment"; diff --git a/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/operation/DorisTableOperations.java b/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/operation/DorisTableOperations.java index 649f099552b..8f2490e84df 100644 --- a/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/operation/DorisTableOperations.java +++ b/catalogs/catalog-jdbc-doris/src/main/java/com/datastrato/gravitino/catalog/doris/operation/DorisTableOperations.java @@ -57,7 +57,7 @@ import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; -/** Table operations for Doris. */ +/** Table operations for Apache Doris. */ public class DorisTableOperations extends JdbcTableOperations { private static final String BACK_QUOTE = "`"; private static final String DORIS_AUTO_INCREMENT = "AUTO_INCREMENT"; @@ -468,7 +468,7 @@ protected String generateAlterTableSql( if (null != updateComment) { String newComment = updateComment.getNewComment(); if (null == StringIdentifier.fromComment(newComment)) { - // Detect and add gravitino id. + // Detect and add Gravitino id. JdbcTable jdbcTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); StringIdentifier identifier = StringIdentifier.fromComment(jdbcTable.comment()); if (null != identifier) { diff --git a/catalogs/catalog-jdbc-doris/src/test/java/com/datastrato/gravitino/catalog/doris/integration/test/CatalogDorisIT.java b/catalogs/catalog-jdbc-doris/src/test/java/com/datastrato/gravitino/catalog/doris/integration/test/CatalogDorisIT.java index 1a2744c6d71..cde55fbccf1 100644 --- a/catalogs/catalog-jdbc-doris/src/test/java/com/datastrato/gravitino/catalog/doris/integration/test/CatalogDorisIT.java +++ b/catalogs/catalog-jdbc-doris/src/test/java/com/datastrato/gravitino/catalog/doris/integration/test/CatalogDorisIT.java @@ -247,7 +247,7 @@ void testDropDorisSchema() { .createTable( NameIdentifier.of(schemaName, tableName), createColumns(), - "Created by gravitino client", + "Created by Gravitino client", createTableProperties(), Transforms.EMPTY_TRANSFORM, createDistribution(), diff --git a/catalogs/catalog-jdbc-mysql/src/main/java/com/datastrato/gravitino/catalog/mysql/MysqlCatalog.java b/catalogs/catalog-jdbc-mysql/src/main/java/com/datastrato/gravitino/catalog/mysql/MysqlCatalog.java index 901cb6b2828..9a788c71536 100644 --- a/catalogs/catalog-jdbc-mysql/src/main/java/com/datastrato/gravitino/catalog/mysql/MysqlCatalog.java +++ b/catalogs/catalog-jdbc-mysql/src/main/java/com/datastrato/gravitino/catalog/mysql/MysqlCatalog.java @@ -35,7 +35,7 @@ import com.datastrato.gravitino.connector.capability.Capability; import java.util.Map; -/** Implementation of a Mysql catalog in Gravitino. */ +/** Implementation of a Mysql catalog in Apache Gravitino. */ public class MysqlCatalog extends JdbcCatalog { private static final MysqlTablePropertiesMetadata TABLE_PROPERTIES_META = diff --git a/catalogs/catalog-jdbc-mysql/src/main/java/com/datastrato/gravitino/catalog/mysql/converter/MysqlExceptionConverter.java b/catalogs/catalog-jdbc-mysql/src/main/java/com/datastrato/gravitino/catalog/mysql/converter/MysqlExceptionConverter.java index 89fd3f171ca..8b68f620a44 100644 --- a/catalogs/catalog-jdbc-mysql/src/main/java/com/datastrato/gravitino/catalog/mysql/converter/MysqlExceptionConverter.java +++ b/catalogs/catalog-jdbc-mysql/src/main/java/com/datastrato/gravitino/catalog/mysql/converter/MysqlExceptionConverter.java @@ -26,7 +26,7 @@ import com.datastrato.gravitino.exceptions.TableAlreadyExistsException; import java.sql.SQLException; -/** Exception converter to Gravitino exception for MySQL. */ +/** Exception converter to Apache Gravitino exception for MySQL. */ public class MysqlExceptionConverter extends JdbcExceptionConverter { @SuppressWarnings("FormatStringAnnotation") diff --git a/catalogs/catalog-jdbc-mysql/src/main/java/com/datastrato/gravitino/catalog/mysql/operation/MysqlTableOperations.java b/catalogs/catalog-jdbc-mysql/src/main/java/com/datastrato/gravitino/catalog/mysql/operation/MysqlTableOperations.java index 64d4b98e144..f9ccead593a 100644 --- a/catalogs/catalog-jdbc-mysql/src/main/java/com/datastrato/gravitino/catalog/mysql/operation/MysqlTableOperations.java +++ b/catalogs/catalog-jdbc-mysql/src/main/java/com/datastrato/gravitino/catalog/mysql/operation/MysqlTableOperations.java @@ -373,7 +373,7 @@ protected String generateAlterTableSql( if (null != updateComment) { String newComment = updateComment.getNewComment(); if (null == StringIdentifier.fromComment(newComment)) { - // Detect and add gravitino id. + // Detect and add Gravitino id. JdbcTable jdbcTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); StringIdentifier identifier = StringIdentifier.fromComment(jdbcTable.comment()); if (null != identifier) { diff --git a/catalogs/catalog-jdbc-mysql/src/test/java/com/datastrato/gravitino/catalog/mysql/integration/test/CatalogMysqlIT.java b/catalogs/catalog-jdbc-mysql/src/test/java/com/datastrato/gravitino/catalog/mysql/integration/test/CatalogMysqlIT.java index 00c560d044b..513b499f84c 100644 --- a/catalogs/catalog-jdbc-mysql/src/test/java/com/datastrato/gravitino/catalog/mysql/integration/test/CatalogMysqlIT.java +++ b/catalogs/catalog-jdbc-mysql/src/test/java/com/datastrato/gravitino/catalog/mysql/integration/test/CatalogMysqlIT.java @@ -826,7 +826,7 @@ void testDropMySQLDatabase() { .createTable( NameIdentifier.of(schemaName, tableName), createColumns(), - "Created by gravitino client", + "Created by Gravitino client", ImmutableMap.builder().build()); // Try to drop a database, and cascade equals to false, it should not be diff --git a/catalogs/catalog-jdbc-postgresql/src/main/java/com/datastrato/gravitino/catalog/postgresql/operation/PostgreSqlTableOperations.java b/catalogs/catalog-jdbc-postgresql/src/main/java/com/datastrato/gravitino/catalog/postgresql/operation/PostgreSqlTableOperations.java index f581b86cbb2..a4628ccd03f 100644 --- a/catalogs/catalog-jdbc-postgresql/src/main/java/com/datastrato/gravitino/catalog/postgresql/operation/PostgreSqlTableOperations.java +++ b/catalogs/catalog-jdbc-postgresql/src/main/java/com/datastrato/gravitino/catalog/postgresql/operation/PostgreSqlTableOperations.java @@ -453,7 +453,7 @@ private String updateCommentDefinition( TableChange.UpdateComment updateComment, JdbcTable jdbcTable) { String newComment = updateComment.getNewComment(); if (null == StringIdentifier.fromComment(newComment)) { - // Detect and add gravitino id. + // Detect and add Gravitino id. if (StringUtils.isNotEmpty(jdbcTable.comment())) { StringIdentifier identifier = StringIdentifier.fromComment(jdbcTable.comment()); if (null != identifier) { @@ -632,7 +632,7 @@ private List addColumnFieldDefinition( // Append position if available if (!(addColumn.getPosition() instanceof TableChange.Default)) { throw new IllegalArgumentException( - "PostgreSQL does not support column position in gravitino."); + "PostgreSQL does not support column position in Gravitino."); } result.add(columnDefinition.append(";").toString()); diff --git a/catalogs/catalog-kafka/src/main/java/com/datastrato/gravitino/catalog/kafka/KafkaCatalog.java b/catalogs/catalog-kafka/src/main/java/com/datastrato/gravitino/catalog/kafka/KafkaCatalog.java index d428e33b624..53152e0fcb3 100644 --- a/catalogs/catalog-kafka/src/main/java/com/datastrato/gravitino/catalog/kafka/KafkaCatalog.java +++ b/catalogs/catalog-kafka/src/main/java/com/datastrato/gravitino/catalog/kafka/KafkaCatalog.java @@ -25,7 +25,9 @@ import com.datastrato.gravitino.connector.capability.Capability; import java.util.Map; -/** Kafka catalog is a messaging catalog that can manage topics on the Kafka messaging system. */ +/** + * Kafka catalog is a messaging catalog that can manage topics on the Apache Kafka messaging system. + */ public class KafkaCatalog extends BaseCatalog { static final KafkaCatalogPropertiesMetadata CATALOG_PROPERTIES_METADATA = diff --git a/catalogs/catalog-kafka/src/main/java/com/datastrato/gravitino/catalog/kafka/KafkaCatalogOperations.java b/catalogs/catalog-kafka/src/main/java/com/datastrato/gravitino/catalog/kafka/KafkaCatalogOperations.java index 8254d94cf52..e34c76fd0fe 100644 --- a/catalogs/catalog-kafka/src/main/java/com/datastrato/gravitino/catalog/kafka/KafkaCatalogOperations.java +++ b/catalogs/catalog-kafka/src/main/java/com/datastrato/gravitino/catalog/kafka/KafkaCatalogOperations.java @@ -142,7 +142,7 @@ public void initialize( adminClientConfig.putAll(bypassConfigs); adminClientConfig.put( AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, config.get(BOOTSTRAP_SERVERS)); - // use gravitino catalog id as the admin client id + // use Gravitino catalog id as the admin client id adminClientConfig.put( AdminClientConfig.CLIENT_ID_CONFIG, String.format(CLIENT_ID_TEMPLATE, config.get(ID_KEY), info.namespace(), info.name())); diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergCatalog.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergCatalog.java index 9ec01954f0d..1332ffbd008 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergCatalog.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergCatalog.java @@ -24,7 +24,7 @@ import com.datastrato.gravitino.connector.capability.Capability; import java.util.Map; -/** Implementation of an Iceberg catalog in Gravitino. */ +/** Implementation of an Apache Iceberg catalog in Apache Gravitino. */ public class IcebergCatalog extends BaseCatalog { static final IcebergCatalogPropertiesMetadata CATALOG_PROPERTIES_META = diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergCatalogOperations.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergCatalogOperations.java index 402c864cbf0..fc19399b0d9 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergCatalogOperations.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergCatalogOperations.java @@ -71,7 +71,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** Operations for interacting with the Iceberg catalog in Gravitino. */ +/** Operations for interacting with an Apache Iceberg catalog in Apache Gravitino. */ public class IcebergCatalogOperations implements CatalogOperations, SupportsSchemas, TableCatalog { private static final String ICEBERG_TABLE_DOES_NOT_EXIST_MSG = "Iceberg table does not exist: %s"; diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergColumn.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergColumn.java index c8218ada057..a84e96b128c 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergColumn.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergColumn.java @@ -21,7 +21,7 @@ import com.datastrato.gravitino.connector.BaseColumn; import lombok.EqualsAndHashCode; -/** Represents a column in the Iceberg column. */ +/** Represents a column in an Apache Iceberg column. */ @EqualsAndHashCode(callSuper = true) public class IcebergColumn extends BaseColumn { diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergHiveCachedClientPool.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergHiveCachedClientPool.java index d779bf1b5fe..dc43316adc7 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergHiveCachedClientPool.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergHiveCachedClientPool.java @@ -71,11 +71,11 @@ * } * } * - * Why do we need to do this? Because the original client pool in iceberg uses a fixed username to - * create the client pool (please see the key in the method clientPool()). Assuming the original - * name is A and when a new user B tries to call the clientPool() method, it will use the connection - * that belongs to A. This will not work with kerberos authentication as it will change the user - * name. + * Why do we need to do this? Because the original client pool in Apache Iceberg uses a fixed + * username to create the client pool (please see the key in the method clientPool()). Assuming the + * original name is A and when a new user B tries to call the clientPool() method, it will use the + * connection that belongs to A. This will not work with kerberos authentication as it will change + * the user name. */ public class IcebergHiveCachedClientPool implements ClientPool, Closeable { diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergSchema.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergSchema.java index 54570b73a3e..3f32ba88f33 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergSchema.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergSchema.java @@ -26,7 +26,7 @@ import org.apache.iceberg.catalog.Namespace; import org.apache.iceberg.rest.requests.CreateNamespaceRequest; -/** Represents an Iceberg Schema (Database) entity in the Iceberg schema. */ +/** Represents an Apache Iceberg Schema (Database) entity in the Iceberg schema. */ @ToString public class IcebergSchema extends BaseSchema { diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergTable.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergTable.java index 874e23a0c51..4047d977e6a 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergTable.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergTable.java @@ -47,7 +47,7 @@ import org.apache.iceberg.TableMetadata; import org.apache.iceberg.rest.requests.CreateTableRequest; -/** Represents an Iceberg Table entity in the Iceberg table. */ +/** Represents an Apache Iceberg Table entity in the Iceberg table. */ @ToString @Getter public class IcebergTable extends BaseTable { @@ -107,7 +107,7 @@ public CreateTableRequest toCreateTableRequest() { } /** - * Transforms the gravitino distribution to the distribution mode name of the Iceberg table. + * Transforms the Gravitino distribution to the distribution mode name of the Iceberg table. * * @param distribution The distribution of the table. * @return The distribution mode name of the iceberg table. diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergTablePropertiesMetadata.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergTablePropertiesMetadata.java index 70aa5583771..dad6ba1a205 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergTablePropertiesMetadata.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/IcebergTablePropertiesMetadata.java @@ -69,7 +69,7 @@ public class IcebergTablePropertiesMetadata extends BasePropertiesMetadata { FORMAT_VERSION, "The Iceberg table format version, ", false, null, false, false), stringImmutablePropertyEntry( PROVIDER, - "Iceberg provider for Iceberg table fileFormat, such as parquet, orc, avro, iceberg", + "Iceberg provider for Iceberg table fileFormat, such as Parquet, Orc, Avro, or Iceberg", false, null, false, diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/authentication/kerberos/HiveBackendProxy.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/authentication/kerberos/HiveBackendProxy.java index 08f02f3b33f..da9ed0c2e70 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/authentication/kerberos/HiveBackendProxy.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/authentication/kerberos/HiveBackendProxy.java @@ -58,7 +58,7 @@ public HiveBackendProxy( proxyUser = UserGroupInformation.getCurrentUser(); // Replace the original client pool with IcebergHiveCachedClientPool. Why do we need to do - // this? Because the original client pool in iceberg uses a fixed username to create the + // this? Because the original client pool in Iceberg uses a fixed username to create the // client pool, and it will not work with kerberos authentication. We need to create a new // client pool with the current user. For more, please see CachedClientPool#clientPool and // notice the value of `key` diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ConvertUtil.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ConvertUtil.java index 99b94c7418f..9a2fb9e91a1 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ConvertUtil.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ConvertUtil.java @@ -30,7 +30,7 @@ public class ConvertUtil { /** - * Convert the Iceberg Table to the corresponding schema information in the Iceberg. + * Convert an Apache Iceberg Table to the corresponding schema information in the Iceberg. * * @param gravitinoTable Gravitino table of Iceberg. * @return Iceberg schema. @@ -47,7 +47,7 @@ public static Schema toIcebergSchema(IcebergTable gravitinoTable) { * Convert the nested field of Iceberg to the Iceberg column. * * @param nestedField Iceberg nested field. - * @return Gravitino iceberg column + * @return Gravitino Iceberg column */ public static IcebergColumn fromNestedField(Types.NestedField nestedField) { return IcebergColumn.builder() @@ -59,9 +59,9 @@ public static IcebergColumn fromNestedField(Types.NestedField nestedField) { } /** - * Convert the Gravitino iceberg table to the Gravitino StructType + * Convert the Gravitino Iceberg table to the Gravitino StructType * - * @param icebergTable Gravitino iceberg table + * @param icebergTable Gravitino Iceberg table * @return Gravitino StructType */ private static com.datastrato.gravitino.rel.types.Types.StructType toGravitinoStructType( diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/DescribeIcebergSortOrderVisitor.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/DescribeIcebergSortOrderVisitor.java index f16f2493945..791f7a3347f 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/DescribeIcebergSortOrderVisitor.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/DescribeIcebergSortOrderVisitor.java @@ -23,7 +23,7 @@ import org.apache.iceberg.transforms.SortOrderVisitor; /** - * Convert expressions of Iceberg SortOrders to function string. + * Convert expressions of Apache Iceberg SortOrders to function string. * *

Referred from org/apache/iceberg/spark/Spark3Util/DescribeSortOrderVisitor.java */ diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/FromIcebergPartitionSpec.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/FromIcebergPartitionSpec.java index e27f6867bb8..57fa9ff64d5 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/FromIcebergPartitionSpec.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/FromIcebergPartitionSpec.java @@ -89,7 +89,7 @@ public Transform unknown(int fieldId, String sourceName, int sourceId, String tr } /** - * Transform assembled into gravitino. + * Transform assembled into Gravitino. * * @param partitionSpec Iceberg partition spec. * @param schema Iceberg schema. diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/FromIcebergSortOrder.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/FromIcebergSortOrder.java index d602d2c11d8..3f45ef52e4d 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/FromIcebergSortOrder.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/FromIcebergSortOrder.java @@ -33,7 +33,7 @@ import org.apache.iceberg.transforms.SortOrderVisitor; /** - * Implement iceberg sort order converter to gravitino sort order. + * Implement Apache Iceberg sort order converter to Apache Gravitino sort order. * *

Referred from core/src/main/java/org/apache/iceberg/spark/SortOrderToSpark.java */ diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/FromIcebergType.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/FromIcebergType.java index 39fd103482b..06051e95571 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/FromIcebergType.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/FromIcebergType.java @@ -26,7 +26,7 @@ import org.apache.iceberg.types.Types; /** - * Implement a type converter to convert types in Iceberg. + * Implement a type converter to convert types in Apache Iceberg. * *

Referred from core/src/main/java/org/apache/iceberg/spark/TypeToSparkType.java */ diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergPartitionSpec.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergPartitionSpec.java index 1bc3439a0e9..71588669efa 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergPartitionSpec.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergPartitionSpec.java @@ -25,15 +25,15 @@ import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; -/** Convert Gravitino Transforms to Iceberg PartitionSpec. */ +/** Convert Apache Gravitino Transforms to an Apache Iceberg PartitionSpec. */ public class ToIcebergPartitionSpec { private static final String DOT = "."; /** - * Convert iceberg table to iceberg partition spec through gravitino. + * Convert Iceberg table to Iceberg partition spec through Gravitino. * - * @param icebergTable the iceberg table. + * @param icebergTable the Iceberg table. * @return a PartitionSpec */ public static PartitionSpec toPartitionSpec(IcebergTable icebergTable) { @@ -42,7 +42,7 @@ public static PartitionSpec toPartitionSpec(IcebergTable icebergTable) { } /** - * Converts gravitino transforms into a {@link PartitionSpec}. + * Converts Gravitino transforms into a {@link PartitionSpec}. * * @param schema the table schema * @param partitioning Gravitino Transforms diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergSortOrder.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergSortOrder.java index 16bc1f3e1b4..fd838ffa1b0 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergSortOrder.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergSortOrder.java @@ -34,7 +34,7 @@ import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.expressions.UnboundTerm; -/** Implement gravitino sort order converter to iceberg sort order. */ +/** Implement Apache Gravitino sort order converter to Apache Iceberg sort order. */ public class ToIcebergSortOrder { private ToIcebergSortOrder() {} diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergType.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergType.java index e435f2f963c..871a8422f87 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergType.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergType.java @@ -24,7 +24,7 @@ import org.apache.iceberg.types.Types; /** - * Convert Gravitino types to iceberg types. + * Convert Apache Gravitino types to Apache Iceberg types. * *

Referred from core/src/main/java/org/apache/iceberg/spark/SparkTypeToType.java */ diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergTypeVisitor.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergTypeVisitor.java index 06b93208c9e..96e7e38bfc9 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergTypeVisitor.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/converter/ToIcebergTypeVisitor.java @@ -25,7 +25,7 @@ import java.util.List; /** - * Type converter belonging to gravitino. + * Type converter belonging to Apache Gravitino. * *

Referred from core/src/main/java/org/apache/iceberg/spark/SparkTypeVisitor.java */ @@ -34,7 +34,7 @@ public class ToIcebergTypeVisitor { /** * Traverse the Gravitino data type and convert the fields into Iceberg fields. * - * @param type Gravitino a data type in a gravitino. + * @param type Gravitino a data type in a Gravitino. * @param visitor Visitor of Iceberg type * @param Iceberg type * @return Iceberg type diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/ops/IcebergTableOpsHelper.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/ops/IcebergTableOpsHelper.java index 945926924d8..a6c896d83e1 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/ops/IcebergTableOpsHelper.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/ops/IcebergTableOpsHelper.java @@ -193,7 +193,7 @@ private void doAddColumn( if (parentName != null) { org.apache.iceberg.types.Type parent = icebergTableSchema.findType(parentName); Preconditions.checkArgument( - parent != null, "Couldn't find parent field: " + parentName + " in iceberg table"); + parent != null, "Couldn't find parent field: " + parentName + " in Iceberg table"); Preconditions.checkArgument( parent instanceof StructType, "Couldn't add column to non-struct field, name:" diff --git a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/web/metrics/IcebergMetricsStore.java b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/web/metrics/IcebergMetricsStore.java index 425a50b6015..dab8f11abdd 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/web/metrics/IcebergMetricsStore.java +++ b/catalogs/catalog-lakehouse-iceberg/src/main/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/web/metrics/IcebergMetricsStore.java @@ -24,7 +24,7 @@ import java.util.Map; import org.apache.iceberg.metrics.MetricsReport; -/** A store API to save Iceberg metrics. */ +/** A store API to save Apache Iceberg metrics. */ public interface IcebergMetricsStore { /** diff --git a/catalogs/catalog-lakehouse-iceberg/src/test/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/integration/test/IcebergRESTServiceIT.java b/catalogs/catalog-lakehouse-iceberg/src/test/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/integration/test/IcebergRESTServiceIT.java index 2103853cccd..a099aafdc8a 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/test/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/integration/test/IcebergRESTServiceIT.java +++ b/catalogs/catalog-lakehouse-iceberg/src/test/java/com/datastrato/gravitino/catalog/lakehouse/iceberg/integration/test/IcebergRESTServiceIT.java @@ -123,7 +123,7 @@ void testCreateNamespace() { String properties = databaseInfo.getOrDefault("Properties", ""); switch (catalogType) { case HIVE: - // hive add more properties, like: + // Hive add more properties, like: // ((hive.metastore.database.owner,hive), (hive.metastore.database.owner-type,USER)) Assertions.assertTrue(properties.contains("(ID,001), (Name,John)")); break; diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/GravitinoPaimonColumn.java b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/GravitinoPaimonColumn.java index 1b5c5eb834f..4fe1837919e 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/GravitinoPaimonColumn.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/GravitinoPaimonColumn.java @@ -30,7 +30,7 @@ import org.apache.paimon.types.DataType; import org.apache.paimon.types.RowType; -/** Implementation of {@link Column} that represents a column in the Paimon column. */ +/** Implementation of {@link Column} that represents a column in the Apache Paimon column. */ @EqualsAndHashCode(callSuper = true) public class GravitinoPaimonColumn extends BaseColumn { diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/GravitinoPaimonTable.java b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/GravitinoPaimonTable.java index 9d6f26d6679..b375e7e2ddb 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/GravitinoPaimonTable.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/GravitinoPaimonTable.java @@ -31,7 +31,10 @@ import org.apache.paimon.table.Table; import org.apache.paimon.types.DataField; -/** Implementation of {@link Table} that represents a Paimon Table entity in the Paimon table. */ +/** + * Implementation of {@link Table} that represents an Apache Paimon Table entity in the Paimon + * table. + */ @ToString @Getter public class GravitinoPaimonTable extends BaseTable { diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonCatalog.java b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonCatalog.java index 7d404bdc880..a2c113fba06 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonCatalog.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonCatalog.java @@ -25,7 +25,9 @@ import com.datastrato.gravitino.connector.capability.Capability; import java.util.Map; -/** Implementation of {@link Catalog} that represents a Paimon catalog in Gravitino. */ +/** + * Implementation of {@link Catalog} that represents an Apache Paimon catalog in Apache Gravitino. + */ public class PaimonCatalog extends BaseCatalog { static final PaimonCatalogPropertiesMetadata CATALOG_PROPERTIES_META = diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonCatalogBackend.java b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonCatalogBackend.java index 43bd298fd9c..6d756b13d14 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonCatalogBackend.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonCatalogBackend.java @@ -18,7 +18,7 @@ */ package com.datastrato.gravitino.catalog.lakehouse.paimon; -/** The type of Paimon catalog backend. */ +/** The type of Apache Paimon catalog backend. */ public enum PaimonCatalogBackend { FILESYSTEM } diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonCatalogOperations.java b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonCatalogOperations.java index 367bfe713d9..8023bbedc16 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonCatalogOperations.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonCatalogOperations.java @@ -64,7 +64,7 @@ /** * Implementation of {@link CatalogOperations} that represents operations for interacting with the - * Paimon catalog in Gravitino. + * Apache Paimon catalog in Apache Gravitino. */ public class PaimonCatalogOperations implements CatalogOperations, SupportsSchemas, TableCatalog { diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonSchema.java b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonSchema.java index 1c8b4576c55..6f02ff0ab9c 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonSchema.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonSchema.java @@ -27,8 +27,8 @@ import lombok.ToString; /** - * Implementation of {@link Schema} that represents a Paimon Schema (Database) entity in the Paimon - * schema. + * Implementation of {@link Schema} that represents an Apache Paimon Schema (Database) entity in the + * Paimon schema. */ @ToString public class PaimonSchema extends BaseSchema { diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonSchemaPropertiesMetadata.java b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonSchemaPropertiesMetadata.java index efebf874ba0..fffe702f38a 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonSchemaPropertiesMetadata.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonSchemaPropertiesMetadata.java @@ -29,7 +29,8 @@ import java.util.Map; /** - * Implementation of {@link PropertiesMetadata} that represents Paimon schema properties metadata. + * Implementation of {@link PropertiesMetadata} that represents Apache Paimon schema properties + * metadata. */ public class PaimonSchemaPropertiesMetadata extends BasePropertiesMetadata { diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonTablePropertiesMetadata.java b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonTablePropertiesMetadata.java index c94a43358db..19f0eeaaf90 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonTablePropertiesMetadata.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/PaimonTablePropertiesMetadata.java @@ -29,7 +29,8 @@ import java.util.Map; /** - * Implementation of {@link PropertiesMetadata} that represents Paimon table properties metadata. + * Implementation of {@link PropertiesMetadata} that represents Apache Paimon table properties + * metadata. */ public class PaimonTablePropertiesMetadata extends BasePropertiesMetadata { diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/ops/PaimonCatalogOps.java b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/ops/PaimonCatalogOps.java index 586998cb9f1..cd0f7b9d46e 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/ops/PaimonCatalogOps.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/com/datastrato/gravitino/catalog/lakehouse/paimon/ops/PaimonCatalogOps.java @@ -32,7 +32,7 @@ import org.apache.paimon.schema.Schema; import org.apache.paimon.table.Table; -/** Table operation proxy that handles table operations of an underlying Paimon catalog. */ +/** Table operation proxy that handles table operations of an underlying Apache Paimon catalog. */ public class PaimonCatalogOps implements AutoCloseable { protected Catalog catalog; diff --git a/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoAdminClient.java b/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoAdminClient.java index 4bf8106d81f..e9c3d5b8dd2 100644 --- a/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoAdminClient.java +++ b/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoAdminClient.java @@ -58,8 +58,8 @@ import java.util.stream.Collectors; /** - * Gravitino Client for the administrator to interact with the Gravitino API, allowing the client to - * list, load, create, and alter Metalakes. + * Apache Gravitino Client for the administrator to interact with the Gravitino API, allowing the + * client to list, load, create, and alter Metalakes. * *

Normal users should use {@link GravitinoClient} to connect with the Gravitino server. */ diff --git a/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoClient.java b/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoClient.java index f89f18572cb..19b11e90266 100644 --- a/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoClient.java +++ b/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoClient.java @@ -29,8 +29,8 @@ import java.util.Map; /** - * Gravitino Client for an user to interact with the Gravitino API, allowing the client to list, - * load, create, and alter Catalog. + * Apache Gravitino Client for an user to interact with the Gravitino API, allowing the client to + * list, load, create, and alter Catalog. * *

It uses an underlying {@link RESTClient} to send HTTP requests and receive responses from the * API. diff --git a/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoMetalake.java b/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoMetalake.java index dec10e6836b..b27b0b1800d 100644 --- a/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoMetalake.java +++ b/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoMetalake.java @@ -44,9 +44,9 @@ import org.apache.commons.lang3.StringUtils; /** - * Gravitino Metalake is the top-level metadata repository for users. It contains a list of catalogs - * as sub-level metadata collections. With {@link GravitinoMetalake}, users can list, create, load, - * alter and drop a catalog with specified identifier. + * Apache Gravitino Metalake is the top-level metadata repository for users. It contains a list of + * catalogs as sub-level metadata collections. With {@link GravitinoMetalake}, users can list, + * create, load, alter and drop a catalog with specified identifier. */ public class GravitinoMetalake extends MetalakeDTO implements SupportsCatalogs { private static final String API_METALAKES_CATALOGS_PATH = "api/metalakes/%s/catalogs/%s"; diff --git a/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoVersion.java b/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoVersion.java index 5a984bf8c95..f637a4fa619 100644 --- a/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoVersion.java +++ b/clients/client-java/src/main/java/com/datastrato/gravitino/client/GravitinoVersion.java @@ -24,7 +24,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -/** Gravitino version information. */ +/** Apache Gravitino version information. */ public class GravitinoVersion extends VersionDTO implements Comparable { private static final int VERSION_PART_NUMBER = 3; diff --git a/clients/filesystem-hadoop3/src/main/java/com/datastrato/gravitino/filesystem/hadoop/GravitinoVirtualFileSystem.java b/clients/filesystem-hadoop3/src/main/java/com/datastrato/gravitino/filesystem/hadoop/GravitinoVirtualFileSystem.java index f2c1ccb33e6..5de324f8a13 100644 --- a/clients/filesystem-hadoop3/src/main/java/com/datastrato/gravitino/filesystem/hadoop/GravitinoVirtualFileSystem.java +++ b/clients/filesystem-hadoop3/src/main/java/com/datastrato/gravitino/filesystem/hadoop/GravitinoVirtualFileSystem.java @@ -56,8 +56,8 @@ /** * {@link GravitinoVirtualFileSystem} is a virtual file system which users can access `fileset` and * other resources. It obtains the actual storage location corresponding to the resource from the - * Gravitino server, and creates an independent file system for it to act as an agent for users to - * access the underlying storage. + * Apache Gravitino server, and creates an independent file system for it to act as an agent for + * users to access the underlying storage. */ public class GravitinoVirtualFileSystem extends FileSystem { private static final Logger Logger = LoggerFactory.getLogger(GravitinoVirtualFileSystem.class); diff --git a/core/src/main/java/com/datastrato/gravitino/Entity.java b/core/src/main/java/com/datastrato/gravitino/Entity.java index 932264650b8..9196ebd109c 100644 --- a/core/src/main/java/com/datastrato/gravitino/Entity.java +++ b/core/src/main/java/com/datastrato/gravitino/Entity.java @@ -24,7 +24,7 @@ import java.util.Map; import lombok.Getter; -/** This interface defines an entity within the Gravitino framework. */ +/** This interface defines an entity within the Apache Gravitino framework. */ public interface Entity extends Serializable { // The below constants are used for virtual metalakes, catalogs and schemas diff --git a/core/src/main/java/com/datastrato/gravitino/EntityAlreadyExistsException.java b/core/src/main/java/com/datastrato/gravitino/EntityAlreadyExistsException.java index a119d5ed558..75f73a329a7 100644 --- a/core/src/main/java/com/datastrato/gravitino/EntityAlreadyExistsException.java +++ b/core/src/main/java/com/datastrato/gravitino/EntityAlreadyExistsException.java @@ -24,7 +24,7 @@ /** * Exception class indicating that an entity already exists. This exception is thrown when an - * attempt is made to create an entity that already exists within the Gravitino framework. + * attempt is made to create an entity that already exists within the Apache Gravitino framework. */ public class EntityAlreadyExistsException extends GravitinoRuntimeException { diff --git a/core/src/main/java/com/datastrato/gravitino/EntitySerDeFactory.java b/core/src/main/java/com/datastrato/gravitino/EntitySerDeFactory.java index 409805b0e02..ac58f5bec17 100644 --- a/core/src/main/java/com/datastrato/gravitino/EntitySerDeFactory.java +++ b/core/src/main/java/com/datastrato/gravitino/EntitySerDeFactory.java @@ -27,7 +27,7 @@ /** * This class is responsible for creating instances of EntitySerDe implementations. EntitySerDe * (Entity Serialization/Deserialization) implementations are used to serialize and deserialize - * entities within the Gravitino framework. + * entities within the Apache Gravitino framework. */ public class EntitySerDeFactory { diff --git a/core/src/main/java/com/datastrato/gravitino/EntityStoreFactory.java b/core/src/main/java/com/datastrato/gravitino/EntityStoreFactory.java index 2625812dbf7..249a0a9e9e3 100644 --- a/core/src/main/java/com/datastrato/gravitino/EntityStoreFactory.java +++ b/core/src/main/java/com/datastrato/gravitino/EntityStoreFactory.java @@ -26,7 +26,7 @@ /** * This class is responsible for creating instances of EntityStore implementations. EntityStore - * implementations are used to store and manage entities within the Gravitino framework. + * implementations are used to store and manage entities within the Apache Gravitino framework. */ public class EntityStoreFactory { diff --git a/core/src/main/java/com/datastrato/gravitino/Field.java b/core/src/main/java/com/datastrato/gravitino/Field.java index b88ac7d0eb1..4b828436f26 100644 --- a/core/src/main/java/com/datastrato/gravitino/Field.java +++ b/core/src/main/java/com/datastrato/gravitino/Field.java @@ -20,7 +20,7 @@ import lombok.EqualsAndHashCode; -/** This class represents a field in the Gravitino framework. */ +/** This class represents a field in the Apache Gravitino framework. */ @EqualsAndHashCode public class Field { diff --git a/core/src/main/java/com/datastrato/gravitino/GravitinoEnv.java b/core/src/main/java/com/datastrato/gravitino/GravitinoEnv.java index 8bdbd3354fa..7da552bd137 100644 --- a/core/src/main/java/com/datastrato/gravitino/GravitinoEnv.java +++ b/core/src/main/java/com/datastrato/gravitino/GravitinoEnv.java @@ -60,7 +60,7 @@ import org.slf4j.LoggerFactory; /* - * This class manages the Gravitino environment. + * This class manages the Apache Gravitino environment. */ public class GravitinoEnv { diff --git a/core/src/main/java/com/datastrato/gravitino/authorization/AccessControlManager.java b/core/src/main/java/com/datastrato/gravitino/authorization/AccessControlManager.java index 231d38af876..eb7dbfb04ef 100644 --- a/core/src/main/java/com/datastrato/gravitino/authorization/AccessControlManager.java +++ b/core/src/main/java/com/datastrato/gravitino/authorization/AccessControlManager.java @@ -37,7 +37,7 @@ * AccessControlManager is used for manage users, roles, admin, grant information, this class is an * entrance class for tenant management. This lock policy about this is as follows: First, admin * operations are prevented by one lock. Then, other operations are prevented by the other lock. For - * non-admin operations, Gravitino doesn't choose metalake level lock. There are some reasons + * non-admin operations, Apache Gravitino doesn't choose metalake level lock. There are some reasons * mainly: First, the metalake can be renamed by users. It's hard to maintain a map with metalake as * the key. Second, the lock will be couped with life cycle of the metalake. */ diff --git a/core/src/main/java/com/datastrato/gravitino/connector/DataTypeConverter.java b/core/src/main/java/com/datastrato/gravitino/connector/DataTypeConverter.java index b55ed6d409f..1baf1536f3a 100644 --- a/core/src/main/java/com/datastrato/gravitino/connector/DataTypeConverter.java +++ b/core/src/main/java/com/datastrato/gravitino/connector/DataTypeConverter.java @@ -21,9 +21,9 @@ import com.datastrato.gravitino.rel.types.Type; /** - * The interface for converting data types between Gravitino and catalogs. In most cases, the ToType - * and FromType are the same. But in some cases, such as converting between Gravitino and JDBC - * types, the ToType is String and the FromType is JdbcTypeBean. + * The interface for converting data types between Apache Gravitino and catalogs. In most cases, the + * ToType and FromType are the same. But in some cases, such as converting between Gravitino and + * JDBC types, the ToType is String and the FromType is JdbcTypeBean. * * @param The Gravitino type will be converted to. * @param The type will be converted to Gravitino type. diff --git a/core/src/main/java/com/datastrato/gravitino/meta/SchemaEntity.java b/core/src/main/java/com/datastrato/gravitino/meta/SchemaEntity.java index 5c69b13dcd8..31ee4211498 100644 --- a/core/src/main/java/com/datastrato/gravitino/meta/SchemaEntity.java +++ b/core/src/main/java/com/datastrato/gravitino/meta/SchemaEntity.java @@ -29,7 +29,7 @@ import java.util.Map; import lombok.ToString; -/** A class representing a schema entity in Gravitino. */ +/** A class representing a schema entity in Apache Gravitino. */ @ToString public class SchemaEntity implements Entity, Auditable, HasIdentifier { diff --git a/core/src/main/java/com/datastrato/gravitino/meta/TableEntity.java b/core/src/main/java/com/datastrato/gravitino/meta/TableEntity.java index 24cda485cbf..8d9b7942ac2 100644 --- a/core/src/main/java/com/datastrato/gravitino/meta/TableEntity.java +++ b/core/src/main/java/com/datastrato/gravitino/meta/TableEntity.java @@ -28,7 +28,7 @@ import java.util.Map; import lombok.ToString; -/** A class representing a table entity in Gravitino. */ +/** A class representing a table entity in Apache Gravitino. */ @ToString public class TableEntity implements Entity, Auditable, HasIdentifier { diff --git a/core/src/main/java/com/datastrato/gravitino/meta/TopicEntity.java b/core/src/main/java/com/datastrato/gravitino/meta/TopicEntity.java index 02780d9acd7..2adbcd99a37 100644 --- a/core/src/main/java/com/datastrato/gravitino/meta/TopicEntity.java +++ b/core/src/main/java/com/datastrato/gravitino/meta/TopicEntity.java @@ -29,7 +29,7 @@ import java.util.Objects; import lombok.ToString; -/** A class representing a topic metadata entity in Gravitino. */ +/** A class representing a topic metadata entity in Apache Gravitino. */ @ToString public class TopicEntity implements Entity, Auditable, HasIdentifier { public static final Field ID = diff --git a/core/src/main/java/com/datastrato/gravitino/meta/UserEntity.java b/core/src/main/java/com/datastrato/gravitino/meta/UserEntity.java index cff4694b863..3c50783a81f 100644 --- a/core/src/main/java/com/datastrato/gravitino/meta/UserEntity.java +++ b/core/src/main/java/com/datastrato/gravitino/meta/UserEntity.java @@ -31,7 +31,7 @@ import java.util.Objects; import lombok.ToString; -/** A class representing a user metadata entity in Gravitino. */ +/** A class representing a user metadata entity in Apache Gravitino. */ @ToString public class UserEntity implements User, Entity, Auditable, HasIdentifier { diff --git a/core/src/main/java/com/datastrato/gravitino/metalake/MetalakeManager.java b/core/src/main/java/com/datastrato/gravitino/metalake/MetalakeManager.java index f707a50cacd..e3b368581c6 100644 --- a/core/src/main/java/com/datastrato/gravitino/metalake/MetalakeManager.java +++ b/core/src/main/java/com/datastrato/gravitino/metalake/MetalakeManager.java @@ -41,7 +41,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** Manages Metalakes within the Gravitino system. */ +/** Manages Metalakes within the Apache Gravitino system. */ public class MetalakeManager implements MetalakeDispatcher { private static final String METALAKE_DOES_NOT_EXIST_MSG = "Metalake %s does not exist"; diff --git a/core/src/main/java/com/datastrato/gravitino/storage/relational/converters/H2ExceptionConverter.java b/core/src/main/java/com/datastrato/gravitino/storage/relational/converters/H2ExceptionConverter.java index f43a5b6b23a..606beaf79d3 100644 --- a/core/src/main/java/com/datastrato/gravitino/storage/relational/converters/H2ExceptionConverter.java +++ b/core/src/main/java/com/datastrato/gravitino/storage/relational/converters/H2ExceptionConverter.java @@ -24,8 +24,8 @@ import java.sql.SQLException; /** - * Exception converter to Gravitino exception for H2. The definition of error codes can be found in - * the document: + * Exception converter to Apache Gravitino exception for H2. The definition of error codes can be + * found in the document: */ public class H2ExceptionConverter implements SQLExceptionConverter { /** It means found a duplicated primary key or unique key entry in H2. */ diff --git a/core/src/main/java/com/datastrato/gravitino/storage/relational/converters/MySQLExceptionConverter.java b/core/src/main/java/com/datastrato/gravitino/storage/relational/converters/MySQLExceptionConverter.java index 320f55b1907..06d14d89142 100644 --- a/core/src/main/java/com/datastrato/gravitino/storage/relational/converters/MySQLExceptionConverter.java +++ b/core/src/main/java/com/datastrato/gravitino/storage/relational/converters/MySQLExceptionConverter.java @@ -24,8 +24,8 @@ import java.sql.SQLException; /** - * Exception converter to Gravitino exception for MySQL. The definition of error codes can be found - * in the document: */ public class MySQLExceptionConverter implements SQLExceptionConverter { diff --git a/core/src/main/java/com/datastrato/gravitino/storage/relational/converters/SQLExceptionConverter.java b/core/src/main/java/com/datastrato/gravitino/storage/relational/converters/SQLExceptionConverter.java index 081ea5b8dee..a149bd70d59 100644 --- a/core/src/main/java/com/datastrato/gravitino/storage/relational/converters/SQLExceptionConverter.java +++ b/core/src/main/java/com/datastrato/gravitino/storage/relational/converters/SQLExceptionConverter.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.sql.SQLException; -/** Interface for converter JDBC SQL exceptions to Gravitino exceptions. */ +/** Interface for converter JDBC SQL exceptions to Apache Gravitino exceptions. */ public interface SQLExceptionConverter { /** * Convert JDBC exception to GravitinoException. diff --git a/dev/docker/tools/README.md b/dev/docker/tools/README.md index 700cb9ddff6..61024733b7d 100644 --- a/dev/docker/tools/README.md +++ b/dev/docker/tools/README.md @@ -22,4 +22,4 @@ Because Docker Desktop for Mac does not provide access to container IP from host This can result in host(macOS) and containers not being able to access each other's internal services directly over IPs. The [mac-docker-connector](https://github.com/wenjunxiao/mac-docker-connector) provides the ability for the macOS host to directly access the docker container IP. Before running the integration tests, make sure to execute the `dev/docker/tools/mac-docker-connector.sh` script. -> Developing Gravitino in a linux environment does not have this limitation and does not require executing the `mac-docker-connector.sh` script ahead of time. +> Developing Apache Gravitino in a linux environment does not have this limitation and does not require executing the `mac-docker-connector.sh` script ahead of time. diff --git a/docs/apache-hive-catalog.md b/docs/apache-hive-catalog.md index f86c419a264..d0287a26fa9 100644 --- a/docs/apache-hive-catalog.md +++ b/docs/apache-hive-catalog.md @@ -8,7 +8,7 @@ license: "This software is licensed under the Apache License version 2." ## Introduction -Gravitino offers the capability to utilize [Apache Hive](https://hive.apache.org) as a catalog for metadata management. +Apache Gravitino offers the capability to utilize [Apache Hive](https://hive.apache.org) as a catalog for metadata management. ### Requirements and limitations diff --git a/docs/docker-image-details.md b/docs/docker-image-details.md index 61a96670cac..cfa0a81162f 100644 --- a/docs/docker-image-details.md +++ b/docs/docker-image-details.md @@ -7,9 +7,9 @@ license: "This software is licensed under the Apache License version 2." # User Docker images -There are two kinds of Docker images you can use: the Gravitino Docker image and playground Docker images. +There are two kinds of Docker images you can use: the Apache Gravitino Docker image and playground Docker images. -## Gravitino Docker image +## Apache Gravitino Docker image You can deploy the service with the Gravitino Docker image. @@ -52,7 +52,7 @@ The playground consists of multiple Docker images. The Docker images of the playground have suitable configurations for users to experience. -### Hive image +### Apache Hive image Changelog @@ -91,7 +91,7 @@ Changelog You can use these kinds of Docker images to facilitate integration testing of all catalog and connector modules within Gravitino. -## Gravitino CI Apache Hive image with kerberos enabled +## Apache Gravitino CI Apache Hive image with kerberos enabled You can use this kind of image to test the catalog of Apache Hive with kerberos enable @@ -112,7 +112,7 @@ Changelog - Set up a Hive cluster with kerberos enabled. - Install a KDC server and create a principal for Hive. For more please see [kerberos-hive](../dev/docker/kerberos-hive) -## Gravitino CI Apache Hive image +## Apache Gravitino CI Apache Hive image You can use this kind of image to test the catalog of Apache Hive. @@ -183,7 +183,7 @@ Changelog - `10000` HiveServer2 - `10002` HiveServer2 HTTP -## Gravitino CI Trino image +## Apache Gravitino CI Trino image You can use this image to test Trino. @@ -210,7 +210,7 @@ Changelog - Expose ports: - `8080` Trino JDBC port -## Gravitino CI Doris image +## Apache Gravitino CI Doris image You can use this image to test Apache Doris. @@ -238,7 +238,7 @@ Changelog - `8030` Doris FE HTTP port - `9030` Doris FE MySQL server port -## Gravitino CI Apache Ranger image +## Apache Gravitino CI Apache Ranger image You can use this image to control Trino's permissions. diff --git a/docs/expression.md b/docs/expression.md index 043fd22a26f..ddc6f3fd711 100644 --- a/docs/expression.md +++ b/docs/expression.md @@ -1,5 +1,5 @@ --- -title: "Expression system of Gravitino" +title: "Expression system of Apache Gravitino" slug: /expression date: 2024-02-02 keyword: expression function field literal reference @@ -9,7 +9,7 @@ license: This software is licensed under the Apache License version 2. import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -This page introduces the expression system of Gravitino. Expressions are vital component of metadata definition, through expressions, you can define [default values](./manage-relational-metadata-using-gravitino.md#table-column-default-value) for columns, function arguments for [function partitioning](./table-partitioning-bucketing-sort-order-indexes.md#table-partitioning), [bucketing](./table-partitioning-bucketing-sort-order-indexes.md#table-bucketing), and sort term of [sort ordering](./table-partitioning-bucketing-sort-order-indexes.md#sort-ordering) in tables. +This page introduces the expression system of Apache Gravitino. Expressions are vital component of metadata definition, through expressions, you can define [default values](./manage-relational-metadata-using-gravitino.md#table-column-default-value) for columns, function arguments for [function partitioning](./table-partitioning-bucketing-sort-order-indexes.md#table-partitioning), [bucketing](./table-partitioning-bucketing-sort-order-indexes.md#table-bucketing), and sort term of [sort ordering](./table-partitioning-bucketing-sort-order-indexes.md#sort-ordering) in tables. Gravitino expression system divides expressions into three basic parts: field reference, literal, and function. Function expressions can contain field references, literals, and other function expressions. ## Field reference diff --git a/docs/getting-started.md b/docs/getting-started.md index e9eb57c3a0d..586962baf0e 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -1,10 +1,10 @@ --- -title: "Getting started with Gravitino" +title: "Getting started with Apache Gravitino" slug: /getting-started license: "This software is licensed under the Apache License version 2." --- -There are several options for getting started with Gravitino. Installing and configuring Hive and Trino can be a little complex, so if you are unfamiliar with the technologies it would be best to use Docker. +There are several options for getting started with Apache Gravitino. Installing and configuring Hive and Trino can be a little complex, so if you are unfamiliar with the technologies it would be best to use Docker. If you want to download and install Gravitino: @@ -12,7 +12,7 @@ If you want to download and install Gravitino: - Google Cloud Platform, see [Getting started on Google Cloud Platform](#getting-started-on-google-cloud-platform) - locally, see [Getting started locally](#getting-started-locally) -If you have your own Gravitino setup and want to use Apache Hive: +If you have your own Apache Gravitino setup and want to use Apache Hive: - on AWS or Google Cloud Platform, see [Installing Apache Hive on AWS or Google Cloud Platform](#installing-apache-hive-on-aws-or-google-cloud-platform) - locally, see [Installing Apache Hive locally](#installing-apache-hive-locally) @@ -208,7 +208,7 @@ sudo docker start gravitino-container The same steps for installing Hive on AWS or Google Cloud Platform apply when installing it locally. Follow [Installing Apache Hive on AWS or Google Cloud Platform](#installing-apache-hive-on-aws-or-google-cloud-platform). -## Installing Gravitino playground on AWS or Google Cloud Platform +## Installing Apache Gravitino playground on AWS or Google Cloud Platform Gravitino provides a bundle of Docker images to launch a Gravitino playground, which includes Apache Hive, Apache Hadoop, Trino, MySQL, PostgreSQL, and Gravitino. You can use @@ -226,11 +226,11 @@ You can install and run all the programs as Docker containers by using the [gravitino-playground](https://github.com/datastrato/gravitino-playground). For details about how to run the playground, see [how-to-use-the-playground](./how-to-use-the-playground.md) -## Installing Gravitino playground locally +## Installing Apache Gravitino playground locally The same steps for installing the playground on AWS or Google Cloud Platform apply when installing it locally. Follow [Installing Gravitino playground on AWS or Google Cloud Platform](#installing-gravitino-playground-on-aws-or-google-cloud-platform). -## Using REST to interact with Gravitino +## Using REST to interact with Apache Gravitino After starting the Gravitino distribution, issue REST commands to create and modify metadata. While you are using `localhost` in these examples, run these commands remotely via a hostname or IP address once you establish correct access. @@ -291,7 +291,7 @@ After starting the Gravitino distribution, issue REST commands to create and mod Note that the metastore.uris property is used for the Hive catalog and needs updating if you change your configuration. -## Accessing Gravitino on AWS externally +## Accessing Apache Gravitino on AWS externally When you deploy Gravitino on AWS, accessing it externally requires some additional configuration due to how AWS networking works. diff --git a/docs/glossary.md b/docs/glossary.md index 587974264c7..82138ff044e 100644 --- a/docs/glossary.md +++ b/docs/glossary.md @@ -1,5 +1,5 @@ --- -title: "Gravitino Glossary" +title: "Apache Gravitino Glossary" date: 2023-11-28 license: "This software is licensed under the Apache License version 2." --- @@ -144,11 +144,11 @@ license: "This software is licensed under the Apache License version 2." - A Gradle wrapper script, used for executing Gradle commands without installing Gradle separately. -## Gravitino +## Apache Gravitino - An open-source software platform created by Datastrato for high-performance, geo-distributed, and federated metadata lakes. Designed to manage metadata directly in different sources, types, and regions, providing unified metadata access for data and AI assets. -## Gravitino configuration file (gravitino.conf) +## Apache Gravitino configuration file (gravitino.conf) - The configuration file for the Gravitino server, located in the `conf` directory. It follows the standard property file format and contains settings for the Gravitino server. @@ -168,15 +168,15 @@ license: "This software is licensed under the Apache License version 2." - The port number on which a server listens for incoming connections. -## Iceberg Hive catalog +## Apache Iceberg Hive catalog - The **Iceberg Hive catalog** is a specialized metadata service designed for the Apache Iceberg table format, allowing external systems to interact with Iceberg metadata via a Hive metastore thrift client. -## Iceberg REST catalog +## Apache Iceberg REST catalog - The **Iceberg REST Catalog** is a specialized metadata service designed for the Apache Iceberg table format, allowing external systems to interact with Iceberg metadata via a RESTful API. -## Iceberg JDBC catalog +## Apache Iceberg JDBC catalog - The **Iceberg JDBC Catalog** is a specialized metadata service designed for the Apache Iceberg table format, allowing external systems to interact with Iceberg metadata using JDBC (Java Database Connectivity). @@ -364,7 +364,7 @@ license: "This software is licensed under the Apache License version 2." - A connector module for integrating Gravitino with Trino. -## Trino Gravitino connector documentation +## Trino Apache Gravitino connector documentation - Documentation providing information on using the Trino connector to access metadata in Gravitino. diff --git a/docs/gravitino-server-config.md b/docs/gravitino-server-config.md index 33ba3fff2b8..769f99958a5 100644 --- a/docs/gravitino-server-config.md +++ b/docs/gravitino-server-config.md @@ -1,5 +1,5 @@ --- -title: Gravitino configuration +title: Apache Gravitino configuration slug: /gravitino-server-config keywords: - configuration @@ -8,20 +8,20 @@ license: "This software is licensed under the Apache License version 2." ## Introduction -Gravitino supports several configurations: +Apache Gravitino supports several configurations: 1. **Gravitino server configuration**: Used to start up the Gravitino server. 2. **Gravitino catalog properties configuration**: Used to make default values for different catalogs. 3. **Some other configurations**: Includes HDFS and other configurations. -## Gravitino server configurations +## Apache Gravitino server configurations You can customize the Gravitino server by editing the configuration file `gravitino.conf` in the `conf` directory. The default values are sufficient for most use cases. We strongly recommend that you read the following sections to understand the configuration file so you can change the default values to suit your specific situation and usage scenario. The `gravitino.conf` file lists the configuration items in the following table. It groups those items into the following categories: -### Gravitino HTTP Server configuration +### Apache Gravitino HTTP Server configuration | Configuration item | Description | Default value | Required | Since version | |-------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------|----------|---------------| @@ -138,7 +138,7 @@ Refer to [security](security.md) for HTTPS and authentication configurations. |-------------------------------------------|------------------------------------------------------|---------------|----------|---------------| | `gravitino.metrics.timeSlidingWindowSecs` | The seconds of Gravitino metrics time sliding window | 60 | No | 0.5.1 | -## Gravitino catalog properties configuration +## Apache Gravitino catalog properties configuration There are three types of catalog properties: diff --git a/docs/hadoop-catalog.md b/docs/hadoop-catalog.md index 692e7b7edbf..0cf404ee3cc 100644 --- a/docs/hadoop-catalog.md +++ b/docs/hadoop-catalog.md @@ -12,7 +12,7 @@ Hadoop catalog is a fileset catalog that using Hadoop Compatible File System (HC the storage location of the fileset. Currently, it supports local filesystem and HDFS. For object storage like S3, GCS, and Azure Blob Storage, you can put the hadoop object store jar like hadoop-aws into the `$GRAVITINO_HOME/catalogs/hadoop/libs` directory to enable the support. -Gravitino itself hasn't yet tested the object storage support, so if you have any issue, +Apache Gravitino itself hasn't yet tested the object storage support, so if you have any issue, please create an [issue](https://github.com/datastrato/gravitino/issues). Note that Gravitino uses Hadoop 3 dependencies to build Hadoop catalog. Theoretically, it should be diff --git a/docs/how-to-build.md b/docs/how-to-build.md index 84149d18479..172bd1d287e 100644 --- a/docs/how-to-build.md +++ b/docs/how-to-build.md @@ -1,12 +1,12 @@ --- -title: How to build Gravitino +title: How to build Apache Gravitino slug: /how-to-build license: "This software is licensed under the Apache License version 2." --- - [Prerequisites](#prerequisites) - [Quick start](#quick-start) -- [How to Build Gravitino on Windows (Using WSL)](#how-to-build-gravitino-on-windows-using-wsl) +- [How to Build Apache Gravitino on Windows (Using WSL)](#how-to-build-gravitino-on-windows-using-wsl) ## Prerequisites @@ -165,7 +165,7 @@ If you want to contribute to this open-source project, please fork the project o `gravitino-trino-connector-{version}.tar.gz.sha256` under the `distribution` directory. You can uncompress and deploy it to Trino to use the Gravitino Trino connector. -## How to Build Gravitino on Windows (Using WSL) +## How to Build Apache Gravitino on Windows (Using WSL) ### Download WSL (Ubuntu) @@ -243,7 +243,7 @@ python3.11 --version These commands add a repository that provides the latest Python versions and then installs Python 3.11. -### Download Gravitino Project to WSL +### Download Apache Gravitino Project to WSL **On Ubuntu (WSL):** diff --git a/docs/how-to-install.md b/docs/how-to-install.md index 84749702e0f..29994d6ecbd 100644 --- a/docs/how-to-install.md +++ b/docs/how-to-install.md @@ -1,18 +1,18 @@ --- -title: How to install Gravitino +title: How to install Apache Gravitino slug: /how-to-install license: "This software is licensed under the Apache License version 2." --- -## Install Gravitino from scratch +## Install Apache Gravitino from scratch :::note -Gravitino supports running on Java 8, 11, and 17. Make sure you have Java installed and +Apache Gravitino supports running on Java 8, 11, and 17. Make sure you have Java installed and `JAVA_HOME` configured correctly. To confirm the Java version, run the `${JAVA_HOME}/bin/java -version` command. ::: -### Get the Gravitino binary distribution package +### Get the Apache Gravitino binary distribution package Before installing Gravitino, make sure you have the Gravitino binary distribution package. You can download the latest Gravitino binary distribution package from [GitHub](https://github.com/datastrato/gravitino/releases), @@ -51,24 +51,24 @@ The Gravitino binary distribution package contains the following files: If you want to use the relational backend storage, you need to initialize the RDBMS firstly. For the details on how to initialize the RDBMS, please check [How to use relational backend storage](./how-to-use-relational-backend-storage.md). -#### Configure the Gravitino server +#### Configure the Apache Gravitino server The Gravitino server configuration file is `conf/gravitino.conf`. You can configure the Gravitino server by modifying this file. Basic configurations are already added to this file. All the configurations are listed in [Gravitino Server Configurations](./gravitino-server-config.md). -#### Configure the Gravitino server log +#### Configure the Apache Gravitino server log The Gravitino server log configuration file is `conf/log4j2.properties`. Gravitino uses Log4j2 as the Logging system. You can [Log4j2](https://logging.apache.org/log4j/2.x/) to do the log configuration. -#### Configure the Gravitino server environment +#### Configure the Apache Gravitino server environment The Gravitino server environment configuration file is `conf/gravitino-env.sh`. Gravitino exposes several environment variables. You can modify them in this file. -#### Configure Gravitino catalogs +#### Configure Apache Gravitino catalogs Gravitino supports multiple catalogs. You can configure the catalog-level configurations by modifying the related configuration file in the `catalogs//conf` directory. The @@ -96,7 +96,7 @@ Also, Gravitino supports loading catalog specific configurations from external f you can put your own `hive-site.xml` file in the `catalogs/hive/conf` directory, and Gravitino loads it automatically. -#### Start Gravitino server +#### Start Apache Gravitino server After configuring the Gravitino server, start the Gravitino server on daemon by running: @@ -125,9 +125,9 @@ variable in the `conf/gravitino-env.sh` file. Then create a `Remote JVM Debug` configuration in `IntelliJ IDEA` and debug `gravitino.server.main`. ::: -## Install Gravitino using Docker +## Install Apache Gravitino using Docker -### Get the Gravitino Docker image +### Get the Apache Gravitino Docker image Gravitino publishes the Docker image to [Docker Hub](https://hub.docker.com/r/datastrato/gravitino/tags). Run the Gravitino Docker image by running: @@ -145,7 +145,7 @@ curl -v -X GET -H "Accept: application/vnd.gravitino.v1+json" -H "Content-Type: to make sure Gravitino is running. -## Install Gravitino using Docker compose +## Install Apache Gravitino using Docker compose The published Gravitino Docker image only contains the Gravitino server with basic configurations. If you want to experience the whole Gravitino system with other components, use the Docker diff --git a/docs/how-to-sign-releases.md b/docs/how-to-sign-releases.md index 3f34b32df7a..2f7051b4763 100644 --- a/docs/how-to-sign-releases.md +++ b/docs/how-to-sign-releases.md @@ -4,7 +4,7 @@ slug: /how-to-sign-releases license: "This software is licensed under the Apache License version 2." --- -These instructions provide a guide to signing and verifying Gravitino releases to enhance the security of releases. A signed release enables people to confirm the author of the release and guarantees that the code hasn't been altered. +These instructions provide a guide to signing and verifying Apache Gravitino releases to enhance the security of releases. A signed release enables people to confirm the author of the release and guarantees that the code hasn't been altered. ## Prerequisites diff --git a/docs/how-to-test.md b/docs/how-to-test.md index 558dddad3e5..60cf0244979 100644 --- a/docs/how-to-test.md +++ b/docs/how-to-test.md @@ -1,10 +1,10 @@ --- -title: How to test Gravitino +title: How to test Apache Gravitino slug: /how-to-test license: "This software is licensed under the Apache License version 2." --- -Gravitino has two types of tests: +Apache Gravitino has two types of tests: - Unit tests, focus on the functionalities of the specific class, module, or component. - Integration tests, end-to-end tests that cover the whole system. @@ -54,7 +54,7 @@ Gravitino has two modes to run the integration tests, the default `embedded` mod Running the `./gradlew build` command triggers the build and runs the integration tests in embedded mode. ::: -### Deploy the Gravitino server and run the integration tests in deploy mode +### Deploy the Apache Gravitino server and run the integration tests in deploy mode To deploy the Gravitino server locally to run the integration tests, follow these steps: @@ -125,13 +125,13 @@ Using Gravitino IT Docker container to run all integration tests. [deploy test] Complete integration tests only run when all the required environments are met. Otherwise, only parts of them without the `gravitino-docker-test` tag run. -## How to debug Gravitino server and integration tests in embedded mode +## How to debug Apache Gravitino server and integration tests in embedded mode By default, the integration tests run in the embedded mode, in which `MiniGravitino` starts in the same process. Debugging `MiniGravitino` is simple and easy, you can modify any code in the Gravitino project and set breakpoints anywhere. -## How to debug Gravitino server and integration tests in deploy mode +## How to debug Apache Gravitino server and integration tests in deploy mode This mode is closer to the actual environment, but more complex to debug. To debug the Gravitino server code, follow these steps: diff --git a/docs/how-to-use-gvfs.md b/docs/how-to-use-gvfs.md index ba62a49bf58..274834b27fd 100644 --- a/docs/how-to-use-gvfs.md +++ b/docs/how-to-use-gvfs.md @@ -1,12 +1,12 @@ --- -title: How to use Gravitino Virtual File System with Filesets +title: How to use Apache Gravitino Virtual File System with Filesets slug: /how-to-use-gvfs license: "This software is licensed under the Apache License version 2." --- ## Introduction -`Fileset` is a concept brought in by Gravitino, which is a logical collection of files and +`Fileset` is a concept brought in by Apache Gravitino, which is a logical collection of files and directories, with `fileset` you can manage non-tabular data through Gravitino. For details, you can read [How to manage fileset metadata using Gravitino](./manage-fileset-metadata-using-gravitino.md). @@ -94,7 +94,7 @@ You can configure these properties in two ways: ``` -## How to use the Gravitino Virtual File System +## How to use the Apache Gravitino Virtual File System First make sure to obtain the Gravitino Virtual File System runtime jar, which you can get in two ways: diff --git a/docs/how-to-use-python-client.md b/docs/how-to-use-python-client.md index 8928ee1703f..d33cd8de990 100644 --- a/docs/how-to-use-python-client.md +++ b/docs/how-to-use-python-client.md @@ -1,5 +1,5 @@ --- -title: "How to use Gravitino Python client" +title: "How to use Apache Gravitino Python client" slug: /how-to-use-gravitino-python-client date: 2024-05-09 keyword: Gravitino Python client @@ -7,7 +7,7 @@ license: This software is licensed under the Apache License version 2. --- ## Introduction -Gravitino is a high-performance, geo-distributed, and federated metadata lake. +Apache Gravitino is a high-performance, geo-distributed, and federated metadata lake. It manages the metadata directly in different sources, types, and regions, also provides users the unified metadata access for data and AI assets. @@ -23,7 +23,7 @@ First of all, You must have a Gravitino server set up and run, You can refer doc [How to install Gravitino](./how-to-install.md) to build Gravitino server from source code and install it in your local. -### Gravitino Python client API +### Apache Gravitino Python client API ```shell pip install gravitino @@ -32,7 +32,7 @@ pip install gravitino 1. [Manage metalake using Gravitino Python API](./manage-metalake-using-gravitino.md?language=python) 2. [Manage fileset metadata using Gravitino Python API](./manage-fileset-metadata-using-gravitino.md?language=python) -### Gravitino Fileset Example +### Apache Gravitino Fileset Example We offer a playground environment to help you quickly understand how to use Gravitino Python client to manage non-tabular data on HDFS via Fileset in Gravitino. You can refer to the @@ -63,7 +63,7 @@ contains the following code snippets: 11. Drop this `Fileset.Type.EXTERNAL` type fileset and check if the fileset location was not deleted in HDFS. -## How to development Gravitino Python Client +## How to development Apache Gravitino Python Client You can ues any IDE to develop Gravitino Python Client. Directly open the client-python module project in the IDE. diff --git a/docs/how-to-use-relational-backend-storage.md b/docs/how-to-use-relational-backend-storage.md index 5e8e7f5988c..0ccb57f6b87 100644 --- a/docs/how-to-use-relational-backend-storage.md +++ b/docs/how-to-use-relational-backend-storage.md @@ -6,7 +6,7 @@ license: "This software is licensed under the Apache License version 2." ## Introduction -Before the version `0.5.0`, Gravitino only supports KV backend storage to store metadata. Since +Before the version `0.5.0`, Apache Gravitino only supports KV backend storage to store metadata. Since RDBMS is widely used in the industry, starting from the version `0.5.0`, Gravitino supports using RDBMS as relational backend storage to store metadata. This doc will guide you on how to use the relational backend storage in Gravitino. @@ -61,7 +61,7 @@ Then please place it in the distribution package directory: ${GRAVITINO_HOME}/libs/ ``` -### Step 4: Set up the Gravitino server configs +### Step 4: Set up the Apache Gravitino server configs Find the server configuration file which name is `gravitino.conf` in the distribution package directory: diff --git a/docs/how-to-use-the-playground.md b/docs/how-to-use-the-playground.md index 3fa597cb117..a5be2677c5c 100644 --- a/docs/how-to-use-the-playground.md +++ b/docs/how-to-use-the-playground.md @@ -7,7 +7,7 @@ license: "This software is licensed under the Apache License version 2." ## Playground introduction -The playground is a complete Gravitino Docker runtime environment with `Hive`, `HDFS`, `Trino`, `MySQL`, `PostgreSQL`, `Jupyter`, and a `Gravitino` server. +The playground is a complete Apache Gravitino Docker runtime environment with `Hive`, `HDFS`, `Trino`, `MySQL`, `PostgreSQL`, `Jupyter`, and a `Gravitino` server. Depending on your network and computer, startup time may take 3-5 minutes. Once the playground environment has started, you can open [http://localhost:8090](http://localhost:8090) in a browser to access the Gravitino Web UI. @@ -66,7 +66,7 @@ cd gravitino-playground ./launch-playground.sh hive|gravitino|trino|postgresql|mysql|spark|jupyter ``` -### Experiencing Gravitino Fileset with Jupyter +### Experiencing Apache Gravitino Fileset with Jupyter We provide a Fileset playground environment to help you quickly understand how to use Gravitino Python client to manage non-tabular data on HDFS via fileset in Gravitino service. @@ -97,7 +97,7 @@ contains the following code snippets: 11. Drop this `Fileset.Type.EXTERNAL` type fileset and check if the fileset location was not deleted in HDFS. -## Experiencing Gravitino with Trino SQL +## Experiencing Apache Gravitino with Trino SQL 1. Log in to the Gravitino playground Trino Docker container using the following command: @@ -184,7 +184,7 @@ WHERE e.employee_id = p.employee_id AND p.employee_id = s.employee_id GROUP BY e.employee_id, given_name, family_name; ``` -### Using Iceberg REST service +### Using Apache Iceberg REST service If you want to migrate your business from Hive to Iceberg. Some tables will use Hive, and the other tables will use Iceberg. Gravitino provides an Iceberg REST catalog service, too. You can use Spark to access REST catalog to write the table data. diff --git a/docs/iceberg-rest-service.md b/docs/iceberg-rest-service.md index fbc4e887602..b68b03203a9 100644 --- a/docs/iceberg-rest-service.md +++ b/docs/iceberg-rest-service.md @@ -8,7 +8,7 @@ license: "This software is licensed under the Apache License version 2." ## Background -The Gravitino Iceberg REST Server follows the [Apache Iceberg REST API specification](https://github.com/apache/iceberg/blob/main/open-api/rest-catalog-open-api.yaml) and acts as an Iceberg REST catalog server. +The Apache Gravitino Iceberg REST Server follows the [Apache Iceberg REST API specification](https://github.com/apache/iceberg/blob/main/open-api/rest-catalog-open-api.yaml) and acts as an Iceberg REST catalog server. ### Capabilities @@ -23,7 +23,7 @@ Builds with Apache Iceberg `1.3.1`. The Apache Iceberg table format version is ` Builds with Hadoop 2.10.x. There may be compatibility issues when accessing Hadoop 3.x clusters. ::: -## Gravitino Iceberg REST catalog service configuration +## Apache Gravitino Iceberg REST catalog service configuration Assuming the Gravitino server is deployed in the `GRAVITINO_HOME` directory, you can locate the configuration options in [`$GRAVITINO_HOME/conf/gravitino.conf`](gravitino-server-config.md). There are four configuration properties for the Iceberg REST catalog service: @@ -58,7 +58,7 @@ Please refer to the following sections for details. The filter in `customFilters` should be a standard javax servlet filter. You can also specify filter parameters by setting configuration entries in the style `gravitino.auxService.iceberg-rest..param.=`. -### Iceberg metrics store configuration +### Apache Iceberg metrics store configuration Gravitino provides a pluggable metrics store interface to store and delete Iceberg metrics. You can develop a class that implements `com.datastrato.gravitino.catalog.lakehouse.iceberg.web.metrics` and add the corresponding jar file to the Iceberg REST service classpath directory. @@ -70,14 +70,14 @@ Gravitino provides a pluggable metrics store interface to store and delete Icebe | `gravitino.auxService.iceberg-rest.metricsQueueCapacity` | The size of queue to store metrics temporally before storing to the persistent storage. Metrics will be dropped when queue is full. | 1000 | No | 0.4.0 | -### Gravitino Iceberg catalog backend configuration +### Apache Gravitino Iceberg catalog backend configuration :::info The Gravitino Iceberg REST catalog service uses the memory catalog backend by default. You can specify a Hive or JDBC catalog backend for production environment. ::: -#### Hive backend configuration +#### Apache Hive backend configuration | Configuration item | Description | Default value | Required | Since Version | |----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------|----------|---------------| @@ -123,7 +123,7 @@ The `clients` property for example: The Gravitino Iceberg REST catalog service adds the HDFS configuration files `core-site.xml` and `hdfs-site.xml` from the directory defined by `gravitino.auxService.iceberg-rest.classpath`, for example, `catalogs/lakehouse-iceberg/conf`, to the classpath. -## Starting the Gravitino Iceberg REST catalog service +## Starting the Apache Gravitino Iceberg REST catalog service To start the service: @@ -139,7 +139,7 @@ curl http://127.0.0.1:9001/iceberg/v1/config Normally you will see the output like `{"defaults":{},"overrides":{}}%`. -## Exploring the Gravitino and Apache Iceberg REST catalog service with Apache Spark +## Exploring the Apache Gravitino and Apache Iceberg REST catalog service with Apache Spark ### Deploying Apache Spark with Apache Iceberg support diff --git a/docs/index.md b/docs/index.md index 79cb61e5593..7a006afe9c1 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,12 +1,12 @@ --- -title: Gravitino overview +title: Apache Gravitino overview slug: / license: "This software is licensed under the Apache License version 2." --- -## What's Gravitino? +## What's Apache Gravitino? -Gravitino is a high-performance, geo-distributed, and federated metadata lake. It manages the +Apache Gravitino is a high-performance, geo-distributed, and federated metadata lake. It manages the metadata directly in different sources, types, and regions. It also provides users with unified metadata access for data and AI assets. @@ -43,7 +43,7 @@ To get started with Gravitino, see [Getting started](./getting-started.md) for t * [Running on Google Cloud Platform](./getting-started.md#getting-started-on-google-cloud-platform): a quick guide to starting and using Gravitino on GCP. -## How to use Gravitino +## How to use Apache Gravitino Gravitino provides two SDKs to manage metadata from different catalogs in a unified way: the REST API and the Java SDK. You can use either to manage metadata. See @@ -84,7 +84,7 @@ Gravitino currently supports the following catalogs: Gravitino also provides an Iceberg REST catalog service for the Iceberg table format. See the [Iceberg REST catalog service](./iceberg-rest-service.md) for details. -## Gravitino playground +## Apache Gravitino playground To experience Gravitino with other components easily, Gravitino provides a playground to run. It integrates Apache Hadoop, Apache Hive, Trino, MySQL, PostgreSQL, and Gravitino together as a diff --git a/docs/jdbc-doris-catalog.md b/docs/jdbc-doris-catalog.md index b6cd61e5c35..a3c6abd689e 100644 --- a/docs/jdbc-doris-catalog.md +++ b/docs/jdbc-doris-catalog.md @@ -13,7 +13,7 @@ import TabItem from '@theme/TabItem'; ## Introduction -Gravitino provides the ability to manage [Apache Doris](https://doris.apache.org/) metadata through JDBC connection.. +Apache Gravitino provides the ability to manage [Apache Doris](https://doris.apache.org/) metadata through JDBC connection.. :::caution Gravitino saves some system information in schema and table comments, like diff --git a/docs/jdbc-mysql-catalog.md b/docs/jdbc-mysql-catalog.md index c168fd630a9..f437f0c9fee 100644 --- a/docs/jdbc-mysql-catalog.md +++ b/docs/jdbc-mysql-catalog.md @@ -13,7 +13,7 @@ import TabItem from '@theme/TabItem'; ## Introduction -Gravitino provides the ability to manage MySQL metadata. +Apache Gravitino provides the ability to manage MySQL metadata. :::caution Gravitino saves some system information in schema and table comment, like `(From Gravitino, DO NOT EDIT: gravitino.v1.uid1078334182909406185)`, please don't change or remove this message. diff --git a/docs/jdbc-postgresql-catalog.md b/docs/jdbc-postgresql-catalog.md index 0431c8f7240..3204b243653 100644 --- a/docs/jdbc-postgresql-catalog.md +++ b/docs/jdbc-postgresql-catalog.md @@ -13,7 +13,7 @@ import TabItem from '@theme/TabItem'; ## Introduction -Gravitino provides the ability to manage PostgreSQL metadata. +Apache Gravitino provides the ability to manage PostgreSQL metadata. :::caution Gravitino saves some system information in schema and table comment, like `(From Gravitino, DO NOT EDIT: gravitino.v1.uid1078334182909406185)`, please don't change or remove this message. diff --git a/docs/lakehouse-iceberg-catalog.md b/docs/lakehouse-iceberg-catalog.md index 4fc28fd1106..15e463c9693 100644 --- a/docs/lakehouse-iceberg-catalog.md +++ b/docs/lakehouse-iceberg-catalog.md @@ -13,7 +13,7 @@ import TabItem from '@theme/TabItem'; ## Introduction -Gravitino provides the ability to manage Apache Iceberg metadata. +Apache Gravitino provides the ability to manage Apache Iceberg metadata. ### Requirements and limitations diff --git a/docs/manage-fileset-metadata-using-gravitino.md b/docs/manage-fileset-metadata-using-gravitino.md index 46dabbaa3c1..c94edf40e6d 100644 --- a/docs/manage-fileset-metadata-using-gravitino.md +++ b/docs/manage-fileset-metadata-using-gravitino.md @@ -9,7 +9,7 @@ license: This software is licensed under the Apache License version 2. import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -This page introduces how to manage fileset metadata in Gravitino. Filesets +This page introduces how to manage fileset metadata in Apache Gravitino. Filesets are a collection of files and directories. Users can leverage filesets to manage non-tabular data like training datasets and other raw data. diff --git a/docs/manage-messaging-metadata-using-gravitino.md b/docs/manage-messaging-metadata-using-gravitino.md index aba5a1017df..d84b6510321 100644 --- a/docs/manage-messaging-metadata-using-gravitino.md +++ b/docs/manage-messaging-metadata-using-gravitino.md @@ -1,5 +1,5 @@ --- -title: "Manage massaging metadata using Gravitino" +title: "Manage massaging metadata using Apache Gravitino" slug: /manage-massaging-metadata-using-gravitino date: 2024-4-22 keyword: Gravitino massaging metadata manage @@ -9,7 +9,7 @@ license: This software is licensed under the Apache License version 2. import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -This page introduces how to manage messaging metadata using Gravitino. Messaging metadata refers to +This page introduces how to manage messaging metadata using Apache Gravitino. Messaging metadata refers to the topic metadata of the messaging system such as Apache Kafka, Apache Pulsar, Apache RocketMQ, etc. Through Gravitino, you can create, update, delete, and list topics via unified RESTful APIs or Java client. diff --git a/docs/manage-metalake-using-gravitino.md b/docs/manage-metalake-using-gravitino.md index b975cece682..ff4e76a029a 100644 --- a/docs/manage-metalake-using-gravitino.md +++ b/docs/manage-metalake-using-gravitino.md @@ -1,5 +1,5 @@ --- -title: "Manage metalake using Gravitino" +title: "Manage metalake using Apache Gravitino" slug: /manage-metalake-using-gravitino date: 2023-12-10 keyword: Gravitino metalake manage @@ -9,7 +9,7 @@ license: This software is licensed under the Apache License version 2. import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -This page introduces how to manage metalake by Gravitino. Metalake is a tenant-like concept in +This page introduces how to manage metalake by Apache Gravitino. Metalake is a tenant-like concept in Gravitino, all the catalogs, users and roles are under a metalake. Typically, a metalake is mapping to a organization or a company. diff --git a/docs/manage-relational-metadata-using-gravitino.md b/docs/manage-relational-metadata-using-gravitino.md index d38f8022aa3..47003de481e 100644 --- a/docs/manage-relational-metadata-using-gravitino.md +++ b/docs/manage-relational-metadata-using-gravitino.md @@ -1,5 +1,5 @@ --- -title: "Manage relational metadata using Gravitino" +title: "Manage relational metadata using Apache Gravitino" slug: /manage-relational-metadata-using-gravitino date: 2023-12-10 keyword: Gravitino relational metadata manage @@ -9,7 +9,7 @@ license: This software is licensed under the Apache License version 2. import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -This page introduces how to manage relational metadata by Gravitino, relational metadata refers +This page introduces how to manage relational metadata by Apache Gravitino, relational metadata refers to relational catalog, schema, table and partitions. Through Gravitino, you can create, edit, and delete relational metadata via unified REST APIs or Java client. @@ -692,7 +692,7 @@ In order to create a table, you need to provide the following information: - Table column auto-increment (optional) - Table property (optional) -#### Gravitino table column type +#### Apache Gravitino table column type The following types that Gravitino supports: diff --git a/docs/manage-table-partition-using-gravitino.md b/docs/manage-table-partition-using-gravitino.md index 23ab9c6a39c..7dac4f9a498 100644 --- a/docs/manage-table-partition-using-gravitino.md +++ b/docs/manage-table-partition-using-gravitino.md @@ -1,5 +1,5 @@ --- -title: "Manage table partition using Gravitino" +title: "Manage table partition using Apache Gravitino" slug: /manage-table-partition-using-gravitino date: 2024-02-03 keyword: table partition management @@ -11,7 +11,7 @@ import TabItem from '@theme/TabItem'; ## Introduction -Although many catalogs inherently manage partitions automatically, there are scenarios where manual partition management is necessary. Usage scenarios like managing the TTL (Time-To-Live) of partition data, gathering statistics on partition metadata, and optimizing queries through partition pruning. For these reasons, Gravitino provides capabilities of partition management. +Although many catalogs inherently manage partitions automatically, there are scenarios where manual partition management is necessary. Usage scenarios like managing the TTL (Time-To-Live) of partition data, gathering statistics on partition metadata, and optimizing queries through partition pruning. For these reasons, Apache Gravitino provides capabilities of partition management. ### Requirements and limitations diff --git a/docs/metrics.md b/docs/metrics.md index e59e6d72011..0127f859a49 100644 --- a/docs/metrics.md +++ b/docs/metrics.md @@ -1,5 +1,5 @@ --- -title: Gravitino metrics +title: Apache Gravitino metrics slug: /metrics keywords: - metrics @@ -8,7 +8,7 @@ license: "This software is licensed under the Apache License version 2." ## Introduction -Gravitino Metrics builds upon the [Dropwizard Metrics](https://metrics.dropwizard.io/). It exports these metrics through both JMX and an HTTP server, supporting JSON and Prometheus formats. You can retrieve them via HTTP requests, as illustrated below: +Apache Gravitino Metrics builds upon the [Dropwizard Metrics](https://metrics.dropwizard.io/). It exports these metrics through both JMX and an HTTP server, supporting JSON and Prometheus formats. You can retrieve them via HTTP requests, as illustrated below: ```shell // Use Gravitino Server address or Iceberg REST server address to replace 127.0.0.1:8090 diff --git a/docs/overview.md b/docs/overview.md index 2ab2aecfdd4..2b215412ede 100644 --- a/docs/overview.md +++ b/docs/overview.md @@ -6,7 +6,7 @@ license: "This software is licensed under the Apache License version 2." ## Introduction -Gravitino is a high-performance, geo-distributed, and federated metadata lake. It manages the +Apache Gravitino is a high-performance, geo-distributed, and federated metadata lake. It manages the metadata directly in different sources, types, and regions. It also provides users with unified metadata access for data and AI assets. ![Gravitino Architecture](assets/gravitino-architecture.png) @@ -70,7 +70,7 @@ assets like models, features, and others are under development. ## Terminology -### The model of Gravitino +### The model of Apache Gravitino ![Gravitino Model](assets/metadata-model.png) diff --git a/docs/publish-docker-images.md b/docs/publish-docker-images.md index b038e561439..a1923f92baa 100644 --- a/docs/publish-docker-images.md +++ b/docs/publish-docker-images.md @@ -8,7 +8,7 @@ license: "This software is licensed under the Apache License version 2." ## Introduction -The Gravitino project provides a set of Docker images to facilitate the publishing, development, and testing of the Gravitino project. +The Apache Gravitino project provides a set of Docker images to facilitate the publishing, development, and testing of the Gravitino project. [Datastrato Docker Hub](https://hub.docker.com/u/datastrato) repository publishes the official Gravitino Docker images. ## Publish Docker images to Docker Hub @@ -30,6 +30,6 @@ You can use GitHub actions to publish Docker images to the Docker Hub repository ![Publish Docker image](assets/publish-docker-image.png) -## More details of Gravitino Docker images +## More details of Apache Gravitino Docker images + [Gravitino Docker images](docker-image-details.md) diff --git a/docs/security.md b/docs/security.md index 2e0aa083ae2..af3bfd894c3 100644 --- a/docs/security.md +++ b/docs/security.md @@ -7,7 +7,7 @@ license: "This software is licensed under the Apache License version 2." ## Authentication -Gravitino supports two kinds of authentication mechanisms: simple and OAuth. +Apache Gravitino supports two kinds of authentication mechanisms: simple and OAuth. ### Simple mode @@ -183,7 +183,7 @@ If users choose to enable HTTPS, Gravitino won't provide the ability of HTTP ser Both the Gravitino server and Iceberg REST service can configure HTTPS. -### Gravitino server's configuration +### Apache Gravitino server's configuration | Configuration item | Description | Default value | Required | Since version | |-----------------------------------------------------|--------------------------------------------------------------------|---------------|---------------------------------------------------|---------------| @@ -200,7 +200,7 @@ Both the Gravitino server and Iceberg REST service can configure HTTPS. | `gravitino.server.webserver.trustStorePassword` | Password to the trust store. | (none) | Yes if use HTTPS and the authentication of client | 0.3.0 | | `gravitino.server.webserver.trustStoreType` | The type to the trust store. | `JKS` | No | 0.3.0 | -### Iceberg REST service's configuration +### Apache Iceberg REST service's configuration | Configuration item | Description | Default value | Required | Since version | |------------------------------------------------------------|--------------------------------------------------------------------|---------------|---------------------------------------------------|---------------| @@ -309,7 +309,7 @@ curl -v -X GET --cacert ./certificate.pem -H "Accept: application/vnd.gravitino. | `gravitino.server.webserver.exposedHeaders` | A comma separated list of allowed HTTP headers exposed on the client. The default value is the empty list. | `` | No | 0.4.0 | | `gravitino.server.webserver.chainPreflight` | If true chained preflight requests for normal handling (as an OPTION request). Otherwise, the filter responds to the preflight. The default is true. | `true` | No | 0.4.0 | -### Iceberg REST service's configuration +### Apache Iceberg REST service's configuration | Configuration item | Description | Default value | Required | Since version | |-----------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------|----------|---------------| diff --git a/docs/spark-connector/spark-catalog-hive.md b/docs/spark-connector/spark-catalog-hive.md index 815a1c2d02f..9214e9746e0 100644 --- a/docs/spark-connector/spark-catalog-hive.md +++ b/docs/spark-connector/spark-catalog-hive.md @@ -5,7 +5,7 @@ keyword: spark connector hive catalog license: "This software is licensed under the Apache License version 2." --- -With the Gravitino Spark connector, accessing data or managing metadata in Hive catalogs becomes straightforward, enabling seamless federation queries across different Hive catalogs. +With the Apache Gravitino Spark connector, accessing data or managing metadata in Hive catalogs becomes straightforward, enabling seamless federation queries across different Hive catalogs. ## Capabilities diff --git a/docs/spark-connector/spark-catalog-iceberg.md b/docs/spark-connector/spark-catalog-iceberg.md index 37c308627fe..f6ced796d6d 100644 --- a/docs/spark-connector/spark-catalog-iceberg.md +++ b/docs/spark-connector/spark-catalog-iceberg.md @@ -5,7 +5,7 @@ keyword: spark connector iceberg catalog license: "This software is licensed under the Apache License version 2." --- -The Gravitino Spark connector offers the capability to read and write Iceberg tables, with the metadata managed by the Gravitino server. To enable the use of the Iceberg catalog within the Spark connector, you must set the configuration `spark.sql.gravitino.enableIcebergSupport` to `true` and download Iceberg Spark runtime jar to Spark classpath. +The Apache Gravitino Spark connector offers the capability to read and write Iceberg tables, with the metadata managed by the Gravitino server. To enable the use of the Iceberg catalog within the Spark connector, you must set the configuration `spark.sql.gravitino.enableIcebergSupport` to `true` and download Iceberg Spark runtime jar to Spark classpath. ## Capabilities @@ -95,7 +95,7 @@ DESC EXTENDED employee; For more details about `CALL`, please refer to the [Spark Procedures description](https://iceberg.apache.org/docs/latest/spark-procedures/#spark-procedures) in Iceberg official document. -## Iceberg backend-catalog support +## Apache Iceberg backend-catalog support - HiveCatalog - JdbcCatalog - RESTCatalog diff --git a/docs/spark-connector/spark-connector.md b/docs/spark-connector/spark-connector.md index 54f28d4d499..84cbd91c096 100644 --- a/docs/spark-connector/spark-connector.md +++ b/docs/spark-connector/spark-connector.md @@ -1,5 +1,5 @@ --- -title: "Gravitino Spark connector" +title: "Apache Gravitino Spark connector" slug: /spark-connector/spark-connector keyword: spark connector federation query license: "This software is licensed under the Apache License version 2." @@ -7,7 +7,7 @@ license: "This software is licensed under the Apache License version 2." ## Overview -The Gravitino Spark connector leverages the Spark DataSourceV2 interface to facilitate the management of diverse catalogs under Gravitino. This capability allows users to perform federation queries, accessing data from various catalogs through a unified interface and consistent access control. +The Apache Gravitino Spark connector leverages the Spark DataSourceV2 interface to facilitate the management of diverse catalogs under Gravitino. This capability allows users to perform federation queries, accessing data from various catalogs through a unified interface and consistent access control. ## Capabilities diff --git a/docs/trino-connector/catalog-hive.md b/docs/trino-connector/catalog-hive.md index 4b33af6a22c..4d5cddba1d7 100644 --- a/docs/trino-connector/catalog-hive.md +++ b/docs/trino-connector/catalog-hive.md @@ -1,5 +1,5 @@ --- -title: "Gravitino connector - Hive catalog" +title: "Apache Gravitino connector - Hive catalog" slug: /trino-connector/catalog-hive keyword: gravitino connector trino license: "This software is licensed under the Apache License version 2." @@ -40,7 +40,7 @@ per catalog: ### Create a schema -Users can create a schema with properties through Gravitino Trino connector as follows: +Users can create a schema with properties through Apache Gravitino Trino connector as follows: ```SQL CREATE SCHEMA catalog.schema_name diff --git a/docs/trino-connector/catalog-iceberg.md b/docs/trino-connector/catalog-iceberg.md index cc923c7bd2f..d81767e6d5c 100644 --- a/docs/trino-connector/catalog-iceberg.md +++ b/docs/trino-connector/catalog-iceberg.md @@ -1,5 +1,5 @@ --- -title: "Gravitino connector - Iceberg catalog" +title: "Apache Gravitino connector - Iceberg catalog" slug: /trino-connector/catalog-iceberg keyword: gravitino connector trino license: "This software is licensed under the Apache License version 2." @@ -22,7 +22,7 @@ To use Iceberg, you need: ### Create a schema -Users can create a schema through Gravitino Trino connector as follows: +Users can create a schema through Apache Gravitino Trino connector as follows: ```SQL CREATE SCHEMA "metalake.catalog".schema_name diff --git a/docs/trino-connector/catalog-mysql.md b/docs/trino-connector/catalog-mysql.md index e3f75a4b96b..035a66860e9 100644 --- a/docs/trino-connector/catalog-mysql.md +++ b/docs/trino-connector/catalog-mysql.md @@ -1,5 +1,5 @@ --- -title: "Gravitino connector - MySQL catalog" +title: "Apache Gravitino connector - MySQL catalog" slug: /trino-connector/catalog-mysql keyword: gravitino connector trino license: "This software is licensed under the Apache License version 2." @@ -16,7 +16,7 @@ To connect to MySQL, you need: ## Create table -At present, the Gravitino connector only supports basic MySQL table creation statements, which involve fields, null allowances, and comments. +At present, the Apache Gravitino connector only supports basic MySQL table creation statements, which involve fields, null allowances, and comments. However, it does not support advanced features like primary keys, indexes, default values, and auto-increment. The Gravitino connector does not support `CREATE TABLE AS SELECT`. diff --git a/docs/trino-connector/catalog-postgresql.md b/docs/trino-connector/catalog-postgresql.md index 87bbda42485..6ae6d7fe346 100644 --- a/docs/trino-connector/catalog-postgresql.md +++ b/docs/trino-connector/catalog-postgresql.md @@ -1,5 +1,5 @@ --- -title: "Gravitino connector - PostgreSQL catalog" +title: "Apache Gravitino connector - PostgreSQL catalog" slug: /trino-connector/catalog-postgresql keyword: gravitino connector trino license: "This software is licensed under the Apache License version 2." @@ -16,7 +16,7 @@ To connect to PostgreSQL, you need: ## Create table -At present, the Gravitino connector only supports basic PostgreSQL table creation statements, which involve fields, null allowances, and comments. +At present, the Apache Gravitino connector only supports basic PostgreSQL table creation statements, which involve fields, null allowances, and comments. However, it does not support advanced features like primary keys, indexes, default values, and auto-increment. The Gravitino connector does not support `CREATE TABLE AS SELECT`. diff --git a/docs/trino-connector/configuration.md b/docs/trino-connector/configuration.md index 62fd59a11ba..8e066caac5e 100644 --- a/docs/trino-connector/configuration.md +++ b/docs/trino-connector/configuration.md @@ -1,5 +1,5 @@ --- -title: "Gravitino connector Configuration" +title: "Apache Gravitino connector Configuration" slug: /trino-connector/configuration keyword: gravitino connector trino license: "This software is licensed under the Apache License version 2." diff --git a/docs/trino-connector/development.md b/docs/trino-connector/development.md index 627118afa0d..e5c9e7a162d 100644 --- a/docs/trino-connector/development.md +++ b/docs/trino-connector/development.md @@ -1,11 +1,11 @@ --- -title: "Gravitino connector development" +title: "Apache Gravitino connector development" slug: /trino-connector/development keyword: gravitino connector development license: "This software is licensed under the Apache License version 2." --- -This document is to guide users through the development of the Gravitino connector for Trino locally. +This document is to guide users through the development of the Apache Gravitino connector for Trino locally. ## Prerequisites diff --git a/docs/trino-connector/index.md b/docs/trino-connector/index.md index 47a52af77dd..8e44833ca36 100644 --- a/docs/trino-connector/index.md +++ b/docs/trino-connector/index.md @@ -1,11 +1,11 @@ --- -title: "Gravitino connector index" +title: "Apache Gravitino connector index" slug: /trino-connector/index keyword: gravitino connector trino license: "This software is licensed under the Apache License version 2." --- -Gravitino connector index: +Apache Gravitino connector index: - [Trino Support](trino-connector.md) - [Requirements](requirements.md) diff --git a/docs/trino-connector/installation.md b/docs/trino-connector/installation.md index e74fe9848ed..a946f12b5fe 100644 --- a/docs/trino-connector/installation.md +++ b/docs/trino-connector/installation.md @@ -1,11 +1,11 @@ --- -title: "Gravitino connector installation" +title: "Apache Gravitino connector installation" slug: /trino-connector/installation keyword: gravitino connector trino license: "This software is licensed under the Apache License version 2." --- -To install the Gravitino connector, you should first deploy the Trino environment, and then install the Gravitino connector plugin into Trino. +To install the Apache Gravitino connector, you should first deploy the Trino environment, and then install the Gravitino connector plugin into Trino. Please refer to the [Deploying Trino documentation](https://trino.io/docs/current/installation/deployment.html) and do the following steps: 1. [Download](https://github.com/datastrato/gravitino/releases) the Gravitino connector tarball and unpack it. @@ -46,7 +46,7 @@ docker run --name trino-gravitino -d -p 8080:8080 trinodb/trino:435 Run `docker ps` to check whether the container is running. -### Installing the Gravitino connector +### Installing the Apache Gravitino connector Download the Gravitino connector tarball and unpack it. @@ -86,7 +86,7 @@ discovery.uri=http://localhost:8080 catalog.management=dynamic ``` -### Configuring the Gravitino connector +### Configuring the Apache Gravitino connector Assuming you have now started the Gravitino server on the host `gravitino-server-host` and already created a metalake named `test`, if those have not been prepared, please refer to the [Gravitino getting started](../getting-started.md). @@ -104,7 +104,7 @@ gravitino.simplify-catalog-names=true - The `gravitino.uri` defines the connection information about Gravitino server. Make sure your container can access the Gravitino server. - The `gravitino.simplify-catalog-names` setting omits the metalake prefix from catalog names when set to true. -Full configurations for Gravitino connector can be seen [here](configuration.md) +Full configurations for Apache Gravitino connector can be seen [here](configuration.md) If you haven't created the metalake named `test`, you can use the following command to create it. @@ -118,7 +118,7 @@ And then restart the Trino container to load the Gravitino connector. docker restart trino-gravitino ``` -### Verifying the Gravitino connector +### Verifying the Apache Gravitino connector Use the Trino CLI to connect to the Trino container and run a query. diff --git a/docs/trino-connector/requirements.md b/docs/trino-connector/requirements.md index b5687e869ac..becc09c3358 100644 --- a/docs/trino-connector/requirements.md +++ b/docs/trino-connector/requirements.md @@ -1,11 +1,11 @@ --- -title: "Gravitino connector requirements" +title: "Apache Gravitino connector requirements" slug: /trino-connector/requirements keyword: gravitino connector trino license: "This software is licensed under the Apache License version 2." --- -To install and deploy the Gravitino connector, The following environmental setup is necessary: +To install and deploy the Apache Gravitino connector, The following environmental setup is necessary: - Trino server version should be at least Trino-server-435. Other versions of Trino have not undergone thorough testing. diff --git a/docs/trino-connector/sql-support.md b/docs/trino-connector/sql-support.md index 8db04f3bfcf..934e989a0c1 100644 --- a/docs/trino-connector/sql-support.md +++ b/docs/trino-connector/sql-support.md @@ -1,11 +1,11 @@ --- -title: "Gravitino connector SQL support" +title: "Apache Gravitino connector SQL support" slug: /trino-connector/sql-support keyword: gravitino connector trino license: "This software is licensed under the Apache License version 2." --- -The connector provides read access and write access to data and metadata stored in Gravitino. +The connector provides read access and write access to data and metadata stored in Apache Gravitino. ### Globally available statements diff --git a/docs/trino-connector/supported-catalog.md b/docs/trino-connector/supported-catalog.md index 6db7d85008f..54d0bedd30f 100644 --- a/docs/trino-connector/supported-catalog.md +++ b/docs/trino-connector/supported-catalog.md @@ -1,11 +1,11 @@ --- -title: "Gravitino supported Catalogs" +title: "Apache Gravitino supported Catalogs" slug: /trino-connector/supported-catalog keyword: gravitino connector trino license: "This software is licensed under the Apache License version 2." --- -The catalogs currently supported by the Gravitino connector are as follows: +The catalogs currently supported by the Apache Gravitino connector are as follows: - [Hive](catalog-hive.md) - [Iceberg](catalog-iceberg.md) @@ -145,7 +145,7 @@ More trino connector configurations can refer to: - [MySQL catalog](https://trino.io/docs/current/connector/mysql.html#general-configuration-properties) - [PostgreSQL catalog](https://trino.io/docs/current/connector/postgresql.html#general-configuration-properties) -## Data type mapping between Trino and Gravitino +## Data type mapping between Trino and Apache Gravitino Gravitino connector supports the following data type conversions between Trino and Gravitino currently. Depending on the detailed catalog, Gravitino may not support some data types conversion for this specific catalog, for example, Hive does not support `TIME` data type. diff --git a/docs/trino-connector/trino-connector.md b/docs/trino-connector/trino-connector.md index 8a8006a91aa..bb4e252a43e 100644 --- a/docs/trino-connector/trino-connector.md +++ b/docs/trino-connector/trino-connector.md @@ -1,11 +1,11 @@ --- -title: "Gravitino connector" +title: "Apache Gravitino connector" slug: /trino-connector/trino-connector keyword: gravitino connector trino license: "This software is licensed under the Apache License version 2." --- -Trino can manage and access data using the Trino connector provided by `Gravitino`, commonly referred to as the `Gravitino connector`. +Trino can manage and access data using the Trino connector provided by `Apache Gravitino`, commonly referred to as the `Gravitino connector`. After configuring the Gravitino connector in Trino, Trino can automatically load catalog metadata from Gravitino, allowing users to directly access these catalogs in Trino. Once integrated with Gravitino, Trino can operate on all Gravitino data without requiring additional configuration. The Gravitino connector uses the [Trino dynamic catalog managed mechanism](https://trino.io/docs/current/admin/properties-catalog.html) to load catalogs. diff --git a/docs/webui.md b/docs/webui.md index 662cb4b6412..5ca2b754630 100644 --- a/docs/webui.md +++ b/docs/webui.md @@ -1,5 +1,5 @@ --- -title: 'Gravitino web UI' +title: 'Apache Gravitino web UI' slug: /webui keyword: webui last_update: @@ -8,7 +8,7 @@ last_update: license: 'This software is licensed under the Apache License version 2.' --- -This document primarily outlines how users can manage metadata within Gravitino using the web UI, the graphical interface is accessible through a web browser as an alternative to writing code or using the REST interface. +This document primarily outlines how users can manage metadata within Apache Gravitino using the web UI, the graphical interface is accessible through a web browser as an alternative to writing code or using the REST interface. Currently, you can integrate [OAuth settings](./security.md) to view, add, modify, and delete metalakes, create catalogs, and view catalogs, schemas, and tables, among other functions. diff --git a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/PropertiesConverter.java b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/PropertiesConverter.java index 8a66f15a489..6b03a18b6d2 100644 --- a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/PropertiesConverter.java +++ b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/PropertiesConverter.java @@ -23,7 +23,7 @@ import org.apache.flink.configuration.Configuration; /** - * PropertiesConverter is used to convert properties between Flink properties and Gravitino + * PropertiesConverter is used to convert properties between Flink properties and Apache Gravitino * properties */ public interface PropertiesConverter { diff --git a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/catalog/GravitinoCatalogManager.java b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/catalog/GravitinoCatalogManager.java index 95eca4a57b9..dcf2db97d44 100644 --- a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/catalog/GravitinoCatalogManager.java +++ b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/catalog/GravitinoCatalogManager.java @@ -29,7 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** GravitinoCatalogManager is used to retrieve catalogs from Gravitino server. */ +/** GravitinoCatalogManager is used to retrieve catalogs from Apache Gravitino server. */ public class GravitinoCatalogManager { private static final Logger LOG = LoggerFactory.getLogger(GravitinoCatalogManager.class); private static GravitinoCatalogManager gravitinoCatalogManager; diff --git a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/store/GravitinoCatalogStore.java b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/store/GravitinoCatalogStore.java index f5eb4bdf6d5..f896c97a1aa 100644 --- a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/store/GravitinoCatalogStore.java +++ b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/store/GravitinoCatalogStore.java @@ -36,7 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** GravitinoCatalogStore is used to store catalog information to Gravitino server. */ +/** GravitinoCatalogStore is used to store catalog information to Apache Gravitino server. */ public class GravitinoCatalogStore extends AbstractCatalogStore { private static final Logger LOG = LoggerFactory.getLogger(GravitinoCatalogStore.class); private final GravitinoCatalogManager gravitinoCatalogManager; diff --git a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/hive/FlinkHiveCatalogIT.java b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/hive/FlinkHiveCatalogIT.java index dd8419e5182..b85c0d3f673 100644 --- a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/hive/FlinkHiveCatalogIT.java +++ b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/hive/FlinkHiveCatalogIT.java @@ -252,7 +252,7 @@ public void testGetCatalogFromGravitino() { Assertions.assertEquals( numCatalogs + 1, tableEnv.listCatalogs().length, "Should create a new catalog"); - // get the catalog from gravitino. + // get the catalog from Gravitino. Optional flinkHiveCatalog = tableEnv.getCatalog(catalogName); Assertions.assertTrue(flinkHiveCatalog.isPresent()); Assertions.assertInstanceOf(GravitinoHiveCatalog.class, flinkHiveCatalog.get()); diff --git a/integration-test-common/src/test/java/com/datastrato/gravitino/integration/test/MiniGravitino.java b/integration-test-common/src/test/java/com/datastrato/gravitino/integration/test/MiniGravitino.java index b51afb7bd3f..d9205a70f37 100644 --- a/integration-test-common/src/test/java/com/datastrato/gravitino/integration/test/MiniGravitino.java +++ b/integration-test-common/src/test/java/com/datastrato/gravitino/integration/test/MiniGravitino.java @@ -53,8 +53,8 @@ import org.slf4j.LoggerFactory; /** - * MiniGravitino is a mini Gravitino server for integration tests. It starts a Gravitino server in - * the same JVM process. + * MiniGravitino is a mini Apache Gravitino server for integration tests. It starts a Gravitino + * server in the same JVM process. */ public class MiniGravitino { private static final Logger LOG = LoggerFactory.getLogger(MiniGravitino.class); @@ -76,7 +76,7 @@ public MiniGravitino(MiniGravitinoContext context) throws IOException { } private void removeIcebergRestConfiguration(Properties properties) { - // Disable iceberg rest service + // Disable Iceberg REST service properties.remove( AuxiliaryServiceManager.GRAVITINO_AUX_SERVICE_PREFIX + AuxiliaryServiceManager.AUX_SERVICE_NAMES); diff --git a/integration-test-common/src/test/java/com/datastrato/gravitino/integration/test/container/HiveContainer.java b/integration-test-common/src/test/java/com/datastrato/gravitino/integration/test/container/HiveContainer.java index fc738010185..ff8f7de86df 100644 --- a/integration-test-common/src/test/java/com/datastrato/gravitino/integration/test/container/HiveContainer.java +++ b/integration-test-common/src/test/java/com/datastrato/gravitino/integration/test/container/HiveContainer.java @@ -89,7 +89,7 @@ public void close() { private void copyHiveLog() { try { String destPath = System.getenv("IT_PROJECT_DIR"); - LOG.info("Copy hive log file to {}", destPath); + LOG.info("Copy Hive log file to {}", destPath); String hiveLogJarPath = "/hive.tar"; String HdfsLogJarPath = "/hdfs.tar"; @@ -101,7 +101,7 @@ private void copyHiveLog() { container.copyFileFromContainer(hiveLogJarPath, destPath + File.separator + "hive.tar"); container.copyFileFromContainer(HdfsLogJarPath, destPath + File.separator + "hdfs.tar"); } catch (Exception e) { - LOG.warn("Can't copy hive log for:", e); + LOG.warn("Can't copy Hive log for:", e); } } diff --git a/integration-test/src/test/java/com/datastrato/gravitino/integration/test/trino/TrinoConnectorIT.java b/integration-test/src/test/java/com/datastrato/gravitino/integration/test/trino/TrinoConnectorIT.java index a862855eafa..a1eebb4fe67 100644 --- a/integration-test/src/test/java/com/datastrato/gravitino/integration/test/trino/TrinoConnectorIT.java +++ b/integration-test/src/test/java/com/datastrato/gravitino/integration/test/trino/TrinoConnectorIT.java @@ -94,7 +94,7 @@ public static void startDockerContainer() throws IOException, TException, Interr containerSuite.startHiveContainer(); - // Initial hive client + // Initial Hive client HiveConf hiveConf = new HiveConf(); String hiveMetastoreUris = String.format( @@ -103,7 +103,7 @@ public static void startDockerContainer() throws IOException, TException, Interr HiveContainer.HIVE_METASTORE_PORT); hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, hiveMetastoreUris); - // Currently must first create metalake and catalog then start trino container + // Currently must first create metalake and catalog then start Trino container createMetalake(); createCatalog(); @@ -835,7 +835,7 @@ void testHiveCatalogCreatedByGravitino() { Assertions.fail("Trino fail to load catalogs created by gravitino: " + sql); } - // Because we assign 'hive.target-max-file-size' a wrong value, trino can't load the catalog + // Because we assign 'hive.target-max-file-size' a wrong value, Trino can't load the catalog String data = containerSuite.getTrinoContainer().executeQuerySQL(sql).get(0).get(0); Assertions.assertEquals(catalogName, data); } @@ -875,7 +875,7 @@ void testWrongHiveCatalogProperty() { String sql = String.format("show catalogs like '%s'", catalogName); await().atLeast(6, TimeUnit.SECONDS); - // Because we assign 'hive.target-max-file-size' a wrong value, trino can't load the catalog + // Because we assign 'hive.target-max-file-size' a wrong value, Trino can't load the catalog Assertions.assertTrue(containerSuite.getTrinoContainer().executeQuerySQL(sql).isEmpty()); } @@ -941,11 +941,11 @@ void testIcebergTableAndSchemaCreatedByGravitino() { boolean success = checkTrinoHasLoaded(sql, 30); if (!success) { - Assertions.fail("Trino fail to load table created by gravitino: " + sql); + Assertions.fail("Trino fail to load table created by Gravitino: " + sql); } String data = containerSuite.getTrinoContainer().executeQuerySQL(sql).get(0).get(0); - LOG.info("create iceberg hive table sql is: {}", data); + LOG.info("create Iceberg Hive table SQL is: {}", data); // Iceberg does not contain any properties; Assertions.assertFalse(data.contains("key1")); Assertions.assertTrue(data.contains("partitioning = ARRAY['BinaryType']")); @@ -1041,7 +1041,7 @@ void testIcebergCatalogCreatedByGravitino() { Assertions.assertTrue(checkTrinoHasLoaded(sql, 30)); final String sql1 = String.format("drop schema %s.%s cascade", catalogName, schemaName); - // Will fail because the iceberg catalog does not support cascade drop + // Will fail because the Iceberg catalog does not support cascade drop TrinoContainer trinoContainer = containerSuite.getTrinoContainer(); Assertions.assertThrows( RuntimeException.class, diff --git a/integration-test/src/test/java/com/datastrato/gravitino/integration/test/trino/TrinoQueryITBase.java b/integration-test/src/test/java/com/datastrato/gravitino/integration/test/trino/TrinoQueryITBase.java index 8b079d5df7d..f859792859f 100644 --- a/integration-test/src/test/java/com/datastrato/gravitino/integration/test/trino/TrinoQueryITBase.java +++ b/integration-test/src/test/java/com/datastrato/gravitino/integration/test/trino/TrinoQueryITBase.java @@ -45,10 +45,10 @@ public class TrinoQueryITBase { private static final Logger LOG = LoggerFactory.getLogger(TrinoQueryITBase.class); - // Auto start docker containers and gravitino server + // Auto start docker containers and Gravitino server protected static boolean autoStart = true; - // Auto start gravitino server + // Auto start Gravitino server protected static boolean autoStartGravitino = true; protected static boolean started = false; diff --git a/rfc/rfc-1/rfc-1.md b/rfc/rfc-1/rfc-1.md index 9cbcf8ef756..f8bf8b40c8c 100644 --- a/rfc/rfc-1/rfc-1.md +++ b/rfc/rfc-1/rfc-1.md @@ -48,7 +48,7 @@ ### Schema Entities -The schema system in Gravitino is organized like below: +The schema system in Apache Gravitino is organized like below: ![Schema System](schema-overview.png) diff --git a/rfc/rfc-3/Transaction-implementation-on-kv.md b/rfc/rfc-3/Transaction-implementation-on-kv.md index 24010776133..4d902700b38 100644 --- a/rfc/rfc-3/Transaction-implementation-on-kv.md +++ b/rfc/rfc-3/Transaction-implementation-on-kv.md @@ -23,11 +23,9 @@ | :------- |-------|------------| | v0.1 | Qi Yu | 21/11/2023 | - - ## Background -Currently, our storage layer heavily relies on the transaction mechanism provided by key-value storage backend such as RocksDB to ensure reliability. However, some key-value pair databases do not support transaction operations, making it challenging for Gravitino to adapt to other -KV databases such as Redis, Cassandra, Hbase, and so on. + +Currently, our storage layer heavily relies on the transaction mechanism provided by key-value storage backend such as RocksDB to ensure reliability. However, some key-value pair databases do not support transaction operations, making it challenging for Apache Gravitino to adapt to other KV databases such as Redis, Cassandra, Hbase, and so on. To make gravitino adapt different key-value stores, we need to eliminate transactional dependency and come up with alternative solutions. diff --git a/server/src/main/java/com/datastrato/gravitino/server/web/filter/AccessControlNotAllowedFilter.java b/server/src/main/java/com/datastrato/gravitino/server/web/filter/AccessControlNotAllowedFilter.java index d19c1360269..2e7e8e042f8 100644 --- a/server/src/main/java/com/datastrato/gravitino/server/web/filter/AccessControlNotAllowedFilter.java +++ b/server/src/main/java/com/datastrato/gravitino/server/web/filter/AccessControlNotAllowedFilter.java @@ -31,7 +31,7 @@ import javax.ws.rs.ext.Provider; /** - * AccessControlNotAllowedFilter is used for filter the requests related to access control if + * AccessControlNotAllowedFilter is used for filter the requests related to access control if Apache * Gravitino doesn't enable authorization. The filter return 405 error code. You can refer to * https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/405. No methods will be returned in the * allow methods. diff --git a/server/src/main/java/com/datastrato/gravitino/server/web/ui/WebUIFilter.java b/server/src/main/java/com/datastrato/gravitino/server/web/ui/WebUIFilter.java index 2be29f7e8f8..f5d4731bda4 100644 --- a/server/src/main/java/com/datastrato/gravitino/server/web/ui/WebUIFilter.java +++ b/server/src/main/java/com/datastrato/gravitino/server/web/ui/WebUIFilter.java @@ -27,7 +27,7 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; -// This filter is used to serve static HTML files from the Gravitino WEB UI. +// This filter is used to serve static HTML files from the Apache Gravitino WEB UI. // https://nextjs.org/docs/pages/building-your-application/deploying/static-exports#deploying public class WebUIFilter implements Filter { @Override diff --git a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/PropertiesConverter.java b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/PropertiesConverter.java index fcf363c81c4..e18b7d4827c 100644 --- a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/PropertiesConverter.java +++ b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/PropertiesConverter.java @@ -23,7 +23,7 @@ import java.util.Map; import org.apache.spark.sql.util.CaseInsensitiveStringMap; -/** Interface for transforming properties between Gravitino and Spark. */ +/** Interface for transforming properties between Apache Gravitino and Apache Spark. */ public interface PropertiesConverter { @VisibleForTesting String SPARK_PROPERTY_PREFIX = "spark.bypass."; diff --git a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/SparkTypeConverter.java b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/SparkTypeConverter.java index 6d1aa4c5406..79a86540aa6 100644 --- a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/SparkTypeConverter.java +++ b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/SparkTypeConverter.java @@ -46,7 +46,7 @@ import org.apache.spark.sql.types.TimestampType; import org.apache.spark.sql.types.VarcharType; -/** Transform DataTypes between Gravitino and Spark. */ +/** Transform DataTypes between Apache Gravitino and Apache Spark. */ public class SparkTypeConverter { public Type toGravitinoType(DataType sparkType) { if (sparkType instanceof ByteType) { diff --git a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/catalog/BaseCatalog.java b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/catalog/BaseCatalog.java index 6b9cc852318..c4b717ba294 100644 --- a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/catalog/BaseCatalog.java +++ b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/catalog/BaseCatalog.java @@ -56,7 +56,7 @@ import org.apache.spark.sql.util.CaseInsensitiveStringMap; /** - * BaseCatalog acts as the foundational class for Spark CatalogManager registration, enabling + * BaseCatalog acts as the foundational class for Apache Spark CatalogManager registration, enabling * seamless integration of various data source catalogs within Spark's ecosystem. This class is * pivotal in bridging Spark with diverse data sources, ensuring a unified approach to data * management and manipulation across the platform. diff --git a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/catalog/GravitinoCatalogManager.java b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/catalog/GravitinoCatalogManager.java index 10726853f9a..bce9baee83f 100644 --- a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/catalog/GravitinoCatalogManager.java +++ b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/catalog/GravitinoCatalogManager.java @@ -30,7 +30,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** GravitinoCatalogManager is used to retrieve catalogs from Gravitino server. */ +/** GravitinoCatalogManager is used to retrieve catalogs from Apache Gravitino server. */ public class GravitinoCatalogManager { private static final Logger LOG = LoggerFactory.getLogger(GravitinoCatalogManager.class); private static GravitinoCatalogManager gravitinoCatalogManager; diff --git a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/hive/HivePropertiesConverter.java b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/hive/HivePropertiesConverter.java index b52bbfe1d81..b1187092072 100644 --- a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/hive/HivePropertiesConverter.java +++ b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/hive/HivePropertiesConverter.java @@ -32,7 +32,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.spark.sql.connector.catalog.TableCatalog; -/** Transform hive catalog properties between Spark and Gravitino. */ +/** Transform Apache Hive catalog properties between Apache Spark and Apache Gravitino. */ public class HivePropertiesConverter implements PropertiesConverter { public static class HivePropertiesConverterHolder { private static final HivePropertiesConverter INSTANCE = new HivePropertiesConverter(); @@ -44,7 +44,7 @@ public static HivePropertiesConverter getInstance() { return HivePropertiesConverterHolder.INSTANCE; } - // Transform Spark hive file format to Gravitino hive file format + // Transform Spark Hive file format to Gravitino hive file format static final Map fileFormatMap = ImmutableMap.of( "sequencefile", HivePropertiesConstants.GRAVITINO_HIVE_FORMAT_SEQUENCEFILE, @@ -81,7 +81,7 @@ public Map toSparkCatalogProperties(Map properti StringUtils.isNotBlank(metastoreUri), "Couldn't get " + GravitinoSparkConfig.GRAVITINO_HIVE_METASTORE_URI - + " from hive catalog properties"); + + " from Hive catalog properties"); HashMap all = new HashMap<>(); all.put(GravitinoSparkConfig.SPARK_HIVE_METASTORE_URI, metastoreUri); return all; diff --git a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/iceberg/GravitinoIcebergCatalog.java b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/iceberg/GravitinoIcebergCatalog.java index 08ca7c60925..a98761c35a4 100644 --- a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/iceberg/GravitinoIcebergCatalog.java +++ b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/iceberg/GravitinoIcebergCatalog.java @@ -45,9 +45,9 @@ import org.apache.spark.sql.util.CaseInsensitiveStringMap; /** - * The GravitinoIcebergCatalog class extends the BaseCatalog to integrate with the Iceberg table - * format, providing specialized support for Iceberg-specific functionalities within Spark's - * ecosystem. This implementation can further adapt to specific interfaces such as + * The GravitinoIcebergCatalog class extends the BaseCatalog to integrate with the Apache Iceberg + * table format, providing specialized support for Iceberg-specific functionalities within Apache + * Spark's ecosystem. This implementation can further adapt to specific interfaces such as * StagingTableCatalog and FunctionCatalog, allowing for advanced operations like table staging and * function management tailored to the needs of Iceberg tables. */ diff --git a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/iceberg/IcebergPropertiesConverter.java b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/iceberg/IcebergPropertiesConverter.java index 88077a64157..48e3cb5166d 100644 --- a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/iceberg/IcebergPropertiesConverter.java +++ b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/iceberg/IcebergPropertiesConverter.java @@ -25,7 +25,7 @@ import java.util.Map; import org.apache.commons.lang3.StringUtils; -/** Transform Iceberg catalog properties between Spark and Gravitino. */ +/** Transform Apache Iceberg catalog properties between Apache Spark and Apache Gravitino. */ public class IcebergPropertiesConverter implements PropertiesConverter { public static class IcebergPropertiesConverterHolder { diff --git a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/iceberg/SparkIcebergTable.java b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/iceberg/SparkIcebergTable.java index 819de065bf7..9f605f92eb2 100644 --- a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/iceberg/SparkIcebergTable.java +++ b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/iceberg/SparkIcebergTable.java @@ -34,7 +34,7 @@ import org.apache.spark.sql.util.CaseInsensitiveStringMap; /** - * For spark-connector in Iceberg, it explicitly uses SparkTable to identify whether it is an + * For spark-connector in Iceberg, it explicitly uses SparkTable to identify whether it is an Apache * Iceberg table, so the SparkIcebergTable must extend SparkTable. */ public class SparkIcebergTable extends SparkTable { diff --git a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/plugin/GravitinoDriverPlugin.java b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/plugin/GravitinoDriverPlugin.java index 99a0982938a..a60c61d09b5 100644 --- a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/plugin/GravitinoDriverPlugin.java +++ b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/plugin/GravitinoDriverPlugin.java @@ -44,8 +44,8 @@ import org.slf4j.LoggerFactory; /** - * GravitinoDriverPlugin creates GravitinoCatalogManager to fetch catalogs from Gravitino and - * register Gravitino catalogs to Spark. + * GravitinoDriverPlugin creates GravitinoCatalogManager to fetch catalogs from Apache Gravitino and + * register Gravitino catalogs to Apache Spark. */ public class GravitinoDriverPlugin implements DriverPlugin { diff --git a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/plugin/GravitinoSparkPlugin.java b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/plugin/GravitinoSparkPlugin.java index c0e95cb780f..ee33fd1658e 100644 --- a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/plugin/GravitinoSparkPlugin.java +++ b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/plugin/GravitinoSparkPlugin.java @@ -22,7 +22,7 @@ import org.apache.spark.api.plugin.ExecutorPlugin; import org.apache.spark.api.plugin.SparkPlugin; -/** The entrypoint for Gravitino Spark connector. */ +/** The entrypoint for Apache Gravitino Spark connector. */ public class GravitinoSparkPlugin implements SparkPlugin { @Override diff --git a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/utils/GravitinoTableInfoHelper.java b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/utils/GravitinoTableInfoHelper.java index 83b9e686bc0..fda5e8355c4 100644 --- a/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/utils/GravitinoTableInfoHelper.java +++ b/spark-connector/spark-common/src/main/java/com/datastrato/gravitino/spark/connector/utils/GravitinoTableInfoHelper.java @@ -40,7 +40,7 @@ /** * GravitinoTableInfoHelper is a common helper class that is used to retrieve table info from the - * Gravitino Server + * Apache Gravitino Server */ public class GravitinoTableInfoHelper { diff --git a/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/SparkCommonIT.java b/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/SparkCommonIT.java index 744aff649ed..c5145d1ec96 100644 --- a/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/SparkCommonIT.java +++ b/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/SparkCommonIT.java @@ -122,7 +122,7 @@ private static String getRowLevelDeleteTableSql( // database after spark connector support Alter database xx set location command. @BeforeAll void initDefaultDatabase() throws IOException { - // In embedded mode, derby acts as the backend database for the hive metastore + // In embedded mode, derby acts as the backend database for the Hive metastore // and creates a directory named metastore_db to store metadata, // supporting only one connection at a time. // Previously, only SparkHiveCatalogIT accessed derby without any exceptions. diff --git a/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/SparkEnvIT.java b/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/SparkEnvIT.java index addd0e7c416..eb29c2d0d3b 100644 --- a/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/SparkEnvIT.java +++ b/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/SparkEnvIT.java @@ -89,7 +89,7 @@ void startUp() throws Exception { initMetalakeAndCatalogs(); initSparkEnv(); LOG.info( - "Startup Spark env successfully, gravitino uri: {}, hive metastore uri: {}", + "Startup Spark env successfully, Gravitino uri: {}, Hive metastore uri: {}", gravitinoUri, hiveMetastoreUri); } diff --git a/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogHiveBackendIT.java b/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogHiveBackendIT.java index 14e9a8b60ea..b731e6dd729 100644 --- a/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogHiveBackendIT.java +++ b/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogHiveBackendIT.java @@ -24,7 +24,7 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.TestInstance; -/** This class use Iceberg HiveCatalog for backend catalog. */ +/** This class use Apache Iceberg HiveCatalog for backend catalog. */ @Tag("gravitino-docker-test") @TestInstance(TestInstance.Lifecycle.PER_CLASS) public abstract class SparkIcebergCatalogHiveBackendIT extends SparkIcebergCatalogIT { diff --git a/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogRestBackendIT.java b/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogRestBackendIT.java index 862a9a40f6a..b92f6162c15 100644 --- a/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogRestBackendIT.java +++ b/spark-connector/spark-common/src/test/java/com/datastrato/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogRestBackendIT.java @@ -24,7 +24,9 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.TestInstance; -/** This class use Iceberg RESTCatalog for test, and the real backend catalog is HiveCatalog. */ +/** + * This class use Apache Iceberg RESTCatalog for test, and the real backend catalog is HiveCatalog. + */ @Tag("gravitino-docker-test") @TestInstance(TestInstance.Lifecycle.PER_CLASS) public abstract class SparkIcebergCatalogRestBackendIT extends SparkIcebergCatalogIT { diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoConfig.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoConfig.java index 86f7647347f..0e950422606 100644 --- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoConfig.java +++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoConfig.java @@ -45,7 +45,7 @@ public class GravitinoConfig { public static final String TRINO_CATALOG_STORE_DEFAULT_VALUE = "file"; public static final String TRINO_CATALOG_MANAGEMENT_DEFAULT_VALUE = "static"; - // The trino configuration of etc/config.properties + // The Trino configuration of etc/config.properties public static final TrinoConfig trinoConfig = new TrinoConfig(); // Gravitino config keys diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoConnector.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoConnector.java index 558b15facfd..65dc1e8694c 100644 --- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoConnector.java +++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoConnector.java @@ -40,8 +40,8 @@ /** * GravitinoConnector serves as the entry point for operations on the connector managed by Trino and - * Gravitino. It provides a standard entry point for Trino connectors and delegates their operations - * to internal connectors. + * Apache Gravitino. It provides a standard entry point for Trino connectors and delegates their + * operations to internal connectors. */ public class GravitinoConnector implements Connector { diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoConnectorFactory.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoConnectorFactory.java index 16bc16be40f..f8d2dd570b4 100644 --- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoConnectorFactory.java +++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoConnectorFactory.java @@ -62,13 +62,13 @@ public CatalogConnectorManager getCatalogConnectorManager() { } /** - * This function call by trino creates a connector. It creates GravitinoSystemConnector at first. + * This function call by Trino creates a connector. It creates GravitinoSystemConnector at first. * Another time's it get GravitinoConnector by CatalogConnectorManager * * @param catalogName the connector name of catalog * @param requiredConfig the config of connector - * @param context trino connector context - * @return trino connector + * @param context Trino connector context + * @return Trino connector */ @Override public Connector create( diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoDataSourceProvider.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoDataSourceProvider.java index 3e6fd3c4532..2d403a3d8a7 100644 --- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoDataSourceProvider.java +++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoDataSourceProvider.java @@ -28,7 +28,7 @@ import io.trino.spi.connector.DynamicFilter; import java.util.List; -/** This class provides a ConnectorPageSource for trino read data from internal connector. */ +/** This class provides a ConnectorPageSource for Trino read data from internal connector. */ public class GravitinoDataSourceProvider implements ConnectorPageSourceProvider { ConnectorPageSourceProvider internalPageSourceProvider; diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoMetadata.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoMetadata.java index 841af75b6f1..d989ed9ddac 100644 --- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoMetadata.java +++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoMetadata.java @@ -68,9 +68,9 @@ import org.apache.commons.lang3.StringUtils; /** - * The GravitinoMetadata class provides operations for Gravitino metadata on the Gravitino server. - * It also transforms the different metadata formats between Trino and Gravitino. Additionally, it - * wraps the internal connector metadata for accessing data. + * The GravitinoMetadata class provides operations for Apache Gravitino metadata on the Gravitino + * server. It also transforms the different metadata formats between Trino and Gravitino. + * Additionally, it wraps the internal connector metadata for accessing data. */ public class GravitinoMetadata implements ConnectorMetadata { // Handling metadata operations on gravitino server diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoPageSinkProvider.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoPageSinkProvider.java index 44d9a67d839..14bc8400559 100644 --- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoPageSinkProvider.java +++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoPageSinkProvider.java @@ -27,7 +27,7 @@ import io.trino.spi.connector.ConnectorTransactionHandle; import org.apache.commons.lang3.NotImplementedException; -/** This class provides a ConnectorPageSink for trino to write data to internal connector. */ +/** This class provides a ConnectorPageSink for Trino to write data to internal connector. */ public class GravitinoPageSinkProvider implements ConnectorPageSinkProvider { ConnectorPageSinkProvider pageSinkProvider; diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoRecordSetProvider.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoRecordSetProvider.java index 268dee0d861..01418c06fe3 100644 --- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoRecordSetProvider.java +++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoRecordSetProvider.java @@ -27,7 +27,7 @@ import io.trino.spi.connector.RecordSet; import java.util.List; -/** This class provides a RecordSet for trino read data from internal connector. */ +/** This class provides a RecordSet for Trino read data from internal connector. */ public class GravitinoRecordSetProvider implements ConnectorRecordSetProvider { ConnectorRecordSetProvider internalRecordSetProvider; diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoSplit.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoSplit.java index 352583a4a57..3b078cb30e9 100644 --- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoSplit.java +++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoSplit.java @@ -28,8 +28,8 @@ import java.util.List; /** - * The GravitinoFTransactionHandle is used to make Gravitino metadata operations transactional and - * wrap the inner connector transaction for data access. + * The GravitinoFTransactionHandle is used to make Apache Gravitino metadata operations + * transactional and wrap the inner connector transaction for data access. */ public class GravitinoSplit implements ConnectorSplit, GravitinoHandle { diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoSplitSource.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoSplitSource.java index 5a4b3736151..bcb851049ed 100644 --- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoSplitSource.java +++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoSplitSource.java @@ -26,8 +26,8 @@ import java.util.stream.Collectors; /** - * The GravitinoFTransactionHandle is used to make Gravitino metadata operations transactional and - * wrap the inner connector transaction for data access. + * The GravitinoFTransactionHandle is used to make Apache Gravitino metadata operations + * transactional and wrap the inner connector transaction for data access. */ public class GravitinoSplitSource implements ConnectorSplitSource { diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoTransactionHandle.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoTransactionHandle.java index 20a3b5c62e2..2663ade8707 100644 --- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoTransactionHandle.java +++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/GravitinoTransactionHandle.java @@ -25,8 +25,8 @@ import io.trino.spi.connector.ConnectorTransactionHandle; /** - * The GravitinoFTransactionHandle is used to make Gravitino metadata operations transactional and - * wrap the inner connector transaction for data access. + * The GravitinoFTransactionHandle is used to make Apache Gravitino metadata operations + * transactional and wrap the inner connector transaction for data access. */ public class GravitinoTransactionHandle implements ConnectorTransactionHandle, GravitinoHandle { diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorContext.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorContext.java index 7fbc77e0832..f87ba9a2911 100644 --- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorContext.java +++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorContext.java @@ -30,16 +30,16 @@ import java.util.Map; /** - * The CatalogConnector serves as a communication bridge between the Gravitino connector and its - * internal connectors. It manages the lifecycle, configuration, and runtime environment of internal - * connectors. + * The CatalogConnector serves as a communication bridge between the Apache Gravitino connector and + * its internal connectors. It manages the lifecycle, configuration, and runtime environment of + * internal connectors. */ public class CatalogConnectorContext { private final GravitinoCatalog catalog; private final GravitinoMetalake metalake; - // Connector communicates with trino + // Connector communicates with Trino private final GravitinoConnector connector; // Internal connector communicates with data storage diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorManager.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorManager.java index 8ab68a9f7d4..35f760a7407 100644 --- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorManager.java +++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorManager.java @@ -53,7 +53,7 @@ * This class has the following main functions: * *

- * 1. Load catalogs from the Gravitino server and create
+ * 1. Load catalogs from the Apache Gravitino server and create
  * catalog contexts.
  * 2. Manage all catalog context instances, which primarily handle communication
  * with Trino through Gravitino connectors and inner connectors related to the engine.
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorMetadata.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorMetadata.java
index d972120b7b3..7db804296d1 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorMetadata.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorMetadata.java
@@ -54,7 +54,7 @@
 import java.util.Map;
 import org.apache.commons.lang3.NotImplementedException;
 
-/** This class implements gravitino metadata operators. */
+/** This class implements Apache Gravitino metadata operators. */
 public class CatalogConnectorMetadata {
 
   private static final String CATALOG_DOES_NOT_EXIST_MSG = "Catalog does not exist";
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorMetadataAdapter.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorMetadataAdapter.java
index eb5b792f2ef..f9728972012 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorMetadataAdapter.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogConnectorMetadataAdapter.java
@@ -70,7 +70,7 @@ public GeneralDataTypeTransformer getDataTypeTransformer() {
     return dataTypeTransformer;
   }
 
-  /** Transform gravitino table metadata to trino ConnectorTableMetadata */
+  /** Transform Gravitino table metadata to Trino ConnectorTableMetadata */
   public ConnectorTableMetadata getTableMetadata(GravitinoTable gravitinoTable) {
     SchemaTableName schemaTableName =
         new SchemaTableName(gravitinoTable.getSchemaName(), gravitinoTable.getName());
@@ -87,7 +87,7 @@ public ConnectorTableMetadata getTableMetadata(GravitinoTable gravitinoTable) {
         Optional.ofNullable(gravitinoTable.getComment()));
   }
 
-  /** Transform trino ConnectorTableMetadata to gravitino table metadata */
+  /** Transform Trino ConnectorTableMetadata to Gravitino table metadata */
   public GravitinoTable createTable(ConnectorTableMetadata tableMetadata) {
     String tableName = tableMetadata.getTableSchema().getTable().getTableName();
     String schemaName = tableMetadata.getTableSchema().getTable().getSchemaName();
@@ -118,12 +118,12 @@ protected Map removeKeys(
         .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
   }
 
-  /** Transform trino schema metadata to gravitino schema metadata */
+  /** Transform Trino schema metadata to Gravitino schema metadata */
   public GravitinoSchema createSchema(String schemaName, Map properties) {
     return new GravitinoSchema(schemaName, toGravitinoSchemaProperties(properties), "");
   }
 
-  /** Transform gravitino column metadata to trino ColumnMetadata */
+  /** Transform Gravitino column metadata to Trino ColumnMetadata */
   public ColumnMetadata getColumnMetadata(GravitinoColumn column) {
     return ColumnMetadata.builder()
         .setName(column.getName())
@@ -135,15 +135,15 @@ public ColumnMetadata getColumnMetadata(GravitinoColumn column) {
         .build();
   }
 
-  /** Transform gravitino table properties to trino ConnectorTableProperties */
+  /** Transform Gravitino table properties to Trino ConnectorTableProperties */
   public ConnectorTableProperties getTableProperties(GravitinoTable table) {
     throw new NotImplementedException();
   }
 
-  /** Normalize gravitino attributes for trino */
+  /** Normalize Gravitino attributes for Trino */
   private Map normalizeProperties(
       Map properties, List> propertyTemplate) {
-    // TODO yuhui redo this function once gravitino table properties are supported..
+    // TODO yuhui redo this function once Gravitino table properties are supported..
     // Trino only supports properties defined in the propertyTemplate.
     Map validProperties = new HashMap<>();
     for (PropertyMetadata propertyMetadata : propertyTemplate) {
@@ -151,33 +151,33 @@ private Map normalizeProperties(
       if (properties.containsKey(name)) {
         validProperties.put(name, properties.get(name));
       } else {
-        LOG.warn("Property {} is not defined in trino, we will ignore it", name);
+        LOG.warn("Property {} is not defined in Trino, we will ignore it", name);
       }
     }
     return validProperties;
   }
 
-  /** Normalize gravitino table attributes for trino */
+  /** Normalize Gravitino table attributes for Trino */
   public Map toTrinoTableProperties(Map properties) {
     return normalizeProperties(properties, tableProperties);
   }
 
-  /** Normalize gravitino schema attributes for trino */
+  /** Normalize Gravitino schema attributes for Trino */
   public Map toTrinoSchemaProperties(Map properties) {
     return normalizeProperties(properties, schemaProperties);
   }
 
-  /** Normalize trino table attributes for gravitino */
+  /** Normalize Trino table attributes for Gravitino */
   public Map toGravitinoTableProperties(Map properties) {
     return removeUnsetProperties(properties);
   }
 
-  /** Normalize trino schema attributes for gravitino */
+  /** Normalize Trino schema attributes for Gravitino */
   public Map toGravitinoSchemaProperties(Map properties) {
     return removeUnsetProperties(properties);
   }
 
-  /** Remove trino unset attributes fro gravitino */
+  /** Remove Trino unset attributes for Gravitino */
   private Map removeUnsetProperties(Map properties) {
     return properties.entrySet().stream()
         .filter(e -> e.getValue() != null)
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogRegister.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogRegister.java
index 63b2596774d..9a380aa134f 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogRegister.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/CatalogRegister.java
@@ -44,8 +44,8 @@
 import org.slf4j.LoggerFactory;
 
 /**
- * This class dynamically register the Catalog managed by Gravitino into Trino using Trino CREATE
- * CATALOG statement. It allows the catalog to be used in Trino like a regular Trino catalog.
+ * This class dynamically register the Catalog managed by Apache Gravitino into Trino using Trino
+ * CREATE CATALOG statement. It allows the catalog to be used in Trino like a regular Trino catalog.
  */
 public class CatalogRegister {
 
@@ -147,7 +147,7 @@ public void registerCatalog(String name, GravitinoCatalog catalog) {
         if (!catalogContents.contains(GRAVITINO_DYNAMIC_CONNECTOR + "=true")) {
           throw new TrinoException(
               GRAVITINO_DUPLICATED_CATALOGS,
-              "Catalog already exists, the catalog is not created by gravitino");
+              "Catalog already exists, the catalog is not created by Gravitino");
         } else {
           throw new TrinoException(
               GRAVITINO_CATALOG_ALREADY_EXISTS,
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveCatalogPropertyConverter.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveCatalogPropertyConverter.java
index 185feea7b48..fd013976c89 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveCatalogPropertyConverter.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveCatalogPropertyConverter.java
@@ -27,7 +27,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/** Convert hive properties between trino and gravitino. */
+/** Convert Apache Hive properties between Trino and Apache Gravitino. */
 public class HiveCatalogPropertyConverter extends PropertyConverter {
 
   public static final Logger LOG = LoggerFactory.getLogger(HiveCatalogPropertyConverter.class);
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveConnectorAdapter.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveConnectorAdapter.java
index ea612a60f60..3b6252e8654 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveConnectorAdapter.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveConnectorAdapter.java
@@ -31,7 +31,9 @@
 import java.util.List;
 import java.util.Map;
 
-/** Transforming Hive connector configuration and components into Gravitino connector. */
+/**
+ * Transforming Apache Hive connector configuration and components into Apache Gravitino connector.
+ */
 public class HiveConnectorAdapter implements CatalogConnectorAdapter {
 
   private final HasPropertyMeta propertyMetadata;
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveDataTypeTransformer.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveDataTypeTransformer.java
index 88d088516d9..ee27e3e2043 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveDataTypeTransformer.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveDataTypeTransformer.java
@@ -26,7 +26,7 @@
 import io.trino.spi.TrinoException;
 import io.trino.spi.type.VarcharType;
 
-/** Type transformer between Hive and Trino */
+/** Type transformer between Apache Hive and Trino */
 public class HiveDataTypeTransformer extends GeneralDataTypeTransformer {
   // Max length of Hive varchar is 65535
   private static final int HIVE_VARCHAR_MAX_LENGTH = 65535;
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveMetadataAdapter.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveMetadataAdapter.java
index a909e79e03d..6635bf81ee1 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveMetadataAdapter.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HiveMetadataAdapter.java
@@ -55,7 +55,7 @@
 import java.util.stream.Collectors;
 import org.apache.commons.lang3.ArrayUtils;
 
-/** Transforming gravitino hive metadata to trino. */
+/** Transforming Apache Gravitino Hive metadata to Trino. */
 public class HiveMetadataAdapter extends CatalogConnectorMetadataAdapter {
 
   private final PropertyConverter tableConverter;
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HivePropertyMeta.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HivePropertyMeta.java
index be01f3dee68..7607e8830a8 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HivePropertyMeta.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/hive/HivePropertyMeta.java
@@ -33,7 +33,7 @@
 import io.trino.spi.type.ArrayType;
 import java.util.List;
 
-/** Implementation of {@link HasPropertyMeta} for Hive catalog. */
+/** Implementation of {@link HasPropertyMeta} for Apache Hive catalog. */
 public class HivePropertyMeta implements HasPropertyMeta {
 
   static final String HIVE_SCHEMA_LOCATION = "location";
@@ -151,7 +151,7 @@ enum InsertExistingPartitionsBehavior {
   }
 
   // Hive catalog properties contain '.' and PropertyMetadata does not allow '.'
-  // Those configurations are referred from Trino hive connector
+  // Those configurations are referred from Trino Hive connector
   private static final List> CATALOG_PROPERTY_META =
       ImmutableList.of(
           enumProperty(
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/iceberg/IcebergConnectorAdapter.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/iceberg/IcebergConnectorAdapter.java
index e12f1b574c4..3b3173e719e 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/iceberg/IcebergConnectorAdapter.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/iceberg/IcebergConnectorAdapter.java
@@ -29,7 +29,10 @@
 import java.util.List;
 import java.util.Map;
 
-/** Transforming Iceberg connector configuration and components into Gravitino connector. */
+/**
+ * Transforming Apache Iceberg connector configuration and components into Apache Gravitino
+ * connector.
+ */
 public class IcebergConnectorAdapter implements CatalogConnectorAdapter {
 
   private final IcebergPropertyMeta propertyMetadata;
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/iceberg/IcebergDataTypeTransformer.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/iceberg/IcebergDataTypeTransformer.java
index 701245abb91..431baed7d21 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/iceberg/IcebergDataTypeTransformer.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/iceberg/IcebergDataTypeTransformer.java
@@ -28,7 +28,7 @@
 import io.trino.spi.type.VarbinaryType;
 import io.trino.spi.type.VarcharType;
 
-/** Type transformer between Iceberg and Trino */
+/** Type transformer between Apache Iceberg and Trino */
 public class IcebergDataTypeTransformer extends GeneralDataTypeTransformer {
 
   @Override
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/iceberg/IcebergMetadataAdapter.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/iceberg/IcebergMetadataAdapter.java
index 7020add742f..4f609aeec5e 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/iceberg/IcebergMetadataAdapter.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/iceberg/IcebergMetadataAdapter.java
@@ -46,7 +46,7 @@
 import java.util.stream.Collectors;
 import org.apache.commons.lang3.ArrayUtils;
 
-/** Transforming gravitino Iceberg metadata to trino. */
+/** Transforming Apache Gravitino Iceberg metadata to Trino. */
 public class IcebergMetadataAdapter extends CatalogConnectorMetadataAdapter {
 
   // Move all this logic to CatalogConnectorMetadataAdapter
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/mysql/MySQLConnectorAdapter.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/mysql/MySQLConnectorAdapter.java
index 7e1fb272b0f..fb6a56b5570 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/mysql/MySQLConnectorAdapter.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/mysql/MySQLConnectorAdapter.java
@@ -31,7 +31,7 @@
 import java.util.List;
 import java.util.Map;
 
-/** Transforming MySQL connector configuration and components into Gravitino connector. */
+/** Transforming MySQL connector configuration and components into Apache Gravitino connector. */
 public class MySQLConnectorAdapter implements CatalogConnectorAdapter {
 
   private final PropertyConverter catalogConverter;
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/mysql/MySQLMetadataAdapter.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/mysql/MySQLMetadataAdapter.java
index 2b894087c1e..f0a3b506837 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/mysql/MySQLMetadataAdapter.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/mysql/MySQLMetadataAdapter.java
@@ -31,7 +31,7 @@
 import java.util.Map;
 import java.util.Optional;
 
-/** Transforming gravitino MySQL metadata to trino. */
+/** Transforming Apache Gravitino MySQL metadata to Trino. */
 public class MySQLMetadataAdapter extends CatalogConnectorMetadataAdapter {
 
   private final PropertyConverter tableConverter;
@@ -57,7 +57,7 @@ public Map toTrinoTableProperties(Map properties
     return super.toTrinoTableProperties(objectMap);
   }
 
-  /** Transform trino ConnectorTableMetadata to gravitino table metadata */
+  /** Transform Trino ConnectorTableMetadata to Gravitino table metadata */
   @Override
   public GravitinoTable createTable(ConnectorTableMetadata tableMetadata) {
     String tableName = tableMetadata.getTableSchema().getTable().getTableName();
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/postgresql/PostgreSQLConnectorAdapter.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/postgresql/PostgreSQLConnectorAdapter.java
index f758bfd3f86..79dd280e377 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/postgresql/PostgreSQLConnectorAdapter.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/postgresql/PostgreSQLConnectorAdapter.java
@@ -28,7 +28,9 @@
 import com.datastrato.gravitino.trino.connector.metadata.GravitinoCatalog;
 import java.util.Map;
 
-/** Transforming PostgreSQL connector configuration and components into Gravitino connector. */
+/**
+ * Transforming PostgreSQL connector configuration and components into Apche Gravitino connector.
+ */
 public class PostgreSQLConnectorAdapter implements CatalogConnectorAdapter {
   private final PropertyConverter catalogConverter;
 
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/postgresql/PostgreSQLMetadataAdapter.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/postgresql/PostgreSQLMetadataAdapter.java
index b72586a1c0c..45e5e6813ff 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/postgresql/PostgreSQLMetadataAdapter.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/jdbc/postgresql/PostgreSQLMetadataAdapter.java
@@ -22,7 +22,7 @@
 import io.trino.spi.session.PropertyMetadata;
 import java.util.List;
 
-/** Transforming gravitino PostgreSQL metadata to trino. */
+/** Transforming gravitino PostgreSQL metadata to Trino. */
 public class PostgreSQLMetadataAdapter extends CatalogConnectorMetadataAdapter {
 
   public PostgreSQLMetadataAdapter(
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/memory/MemoryConnectorAdapter.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/memory/MemoryConnectorAdapter.java
index a3d3c85b34f..40c09011262 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/memory/MemoryConnectorAdapter.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/catalog/memory/MemoryConnectorAdapter.java
@@ -30,8 +30,8 @@
 import java.util.Map;
 
 /**
- * Support trino Memory connector for testing. Transforming Memory connector configuration and
- * components into Gravitino connector.
+ * Support Trino Memory connector for testing. Transforming Memory connector configuration and
+ * components into Apache Gravitino connector.
  */
 public class MemoryConnectorAdapter implements CatalogConnectorAdapter {
 
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/metadata/GravitinoCatalog.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/metadata/GravitinoCatalog.java
index d98c3b8cb30..4e76092e6ba 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/metadata/GravitinoCatalog.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/metadata/GravitinoCatalog.java
@@ -31,7 +31,7 @@
 import java.util.Map;
 import org.apache.commons.lang3.StringUtils;
 
-/** Help Gravitino connector access CatalogMetadata from gravitino client. */
+/** Help Apache Gravitino connector access CatalogMetadata from Gravitino client. */
 public class GravitinoCatalog {
 
   private static ObjectMapper objectMapper = new ObjectMapper();
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/metadata/GravitinoSchema.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/metadata/GravitinoSchema.java
index ad2b603b5ad..7c7cfa8286b 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/metadata/GravitinoSchema.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/metadata/GravitinoSchema.java
@@ -21,7 +21,7 @@
 import com.datastrato.gravitino.Schema;
 import java.util.Map;
 
-/** Help Gravitino connector access SchemaMetadata from gravitino client. */
+/** Help Apache Gravitino connector access SchemaMetadata from Gravitino client. */
 public class GravitinoSchema {
 
   private final String schemaName;
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/metadata/GravitinoTable.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/metadata/GravitinoTable.java
index 5864efa210e..6035180f139 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/metadata/GravitinoTable.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/metadata/GravitinoTable.java
@@ -33,7 +33,7 @@
 import java.util.Map;
 import java.util.Optional;
 
-/** Help Gravitino connector access TableMetadata from gravitino client. */
+/** Help Apache Gravitino connector access TableMetadata from Gravitino client. */
 public class GravitinoTable {
   private final String schemaName;
   private final String tableName;
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/system/GravitinoSystemConnector.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/system/GravitinoSystemConnector.java
index aeebfedbf2c..93c6b7d7077 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/system/GravitinoSystemConnector.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/system/GravitinoSystemConnector.java
@@ -48,9 +48,9 @@
 
 /**
  * GravitinoSystemConnector is primarily used to drive the GravitinoCatalogManager to load catalog
- * connectors managed in the Gravitino server. After users configure the Gravitino connector through
- * Trino catalog configuration, a GravitinoSystemConnector is initially created. And it provides
- * some system tables and stored procedures of Gravitino connector.
+ * connectors managed in the Apache Gravitino server. After users configure the Gravitino connector
+ * through Trino catalog configuration, a GravitinoSystemConnector is initially created. And it
+ * provides some system tables and stored procedures of Gravitino connector.
  */
 public class GravitinoSystemConnector implements Connector {
 
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/system/GravitinoSystemConnectorMetadata.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/system/GravitinoSystemConnectorMetadata.java
index c73de4ad478..d8acd94a13e 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/system/GravitinoSystemConnectorMetadata.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/system/GravitinoSystemConnectorMetadata.java
@@ -36,7 +36,7 @@
 import java.util.Map;
 import java.util.Optional;
 
-/** An implementation of Gravitino System connector Metadata */
+/** An implementation of Apache Gravitino System connector Metadata */
 public class GravitinoSystemConnectorMetadata implements ConnectorMetadata {
 
   @Override
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/system/storedprocdure/GravitinoStoredProcedureFactory.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/system/storedprocdure/GravitinoStoredProcedureFactory.java
index 17749dc18dd..4b0a2d3bdae 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/system/storedprocdure/GravitinoStoredProcedureFactory.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/system/storedprocdure/GravitinoStoredProcedureFactory.java
@@ -60,7 +60,7 @@ public Set getStoredProcedures() {
               } catch (Exception e) {
                 throw new TrinoException(
                     GRAVITINO_UNSUPPORTED_OPERATION,
-                    "Failed to initialize gravitino system procedures",
+                    "Failed to initialize Gravitino system procedures",
                     e);
               }
             })
diff --git a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/util/GeneralDataTypeTransformer.java b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/util/GeneralDataTypeTransformer.java
index 8cf11008217..6fa8d1f9861 100644
--- a/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/util/GeneralDataTypeTransformer.java
+++ b/trino-connector/src/main/java/com/datastrato/gravitino/trino/connector/util/GeneralDataTypeTransformer.java
@@ -52,7 +52,7 @@
 import java.util.Optional;
 import java.util.stream.Collectors;
 
-/** This class is used to transform datatype between gravitino and trino */
+/** This class is used to transform datatype between Apache Gravitino and Trino */
 public class GeneralDataTypeTransformer {
 
   public Type getTrinoType(com.datastrato.gravitino.rel.types.Type type) {
diff --git a/web/README.md b/web/README.md
index 2612ea4b95b..f36af776a28 100644
--- a/web/README.md
+++ b/web/README.md
@@ -17,11 +17,11 @@
   under the License.
 -->
 
-# Gravitino Web UI
+# Apache Gravitino Web UI
 
 > **ℹ️ Tips**
 >
-> Under normal circumstances, you only need to visit [http://localhost:8090](http://localhost:8090) if you're just using the web UI to manage the Gravitino.
+> Under normal circumstances, you only need to visit [http://localhost:8090](http://localhost:8090) if you're just using the web UI to manage the Apache Gravitino.
 >
 > You don't need to start the web development mode. If you need to modify the web part of the code, you can refer to the following document content for development and testing.
 

From e17a910c94d5462e705a0870569b1252be14bd50 Mon Sep 17 00:00:00 2001
From: Justin Mclean 
Date: Thu, 4 Jul 2024 16:20:21 +1000
Subject: [PATCH 05/12] [#4074] Add work in progress disclaimer. (#4076)

### What changes were proposed in this pull request?

Add work in progress disclaimer.

### Why are the changes needed?

Required by ASF incubator policy.

Fix: #4074

### Does this PR introduce _any_ user-facing change?

N/A

### How was this patch tested?

N/A
---
 DISCLAIMER_WIP.txt | 6 ++++++
 build.gradle.kts   | 2 ++
 2 files changed, 8 insertions(+)
 create mode 100644 DISCLAIMER_WIP.txt

diff --git a/DISCLAIMER_WIP.txt b/DISCLAIMER_WIP.txt
new file mode 100644
index 00000000000..c285b1214e6
--- /dev/null
+++ b/DISCLAIMER_WIP.txt
@@ -0,0 +1,6 @@
+Apache Gravitino is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the name of Apache TLP sponsor. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision-making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.
+
+Some of the incubating project’s releases may not be fully compliant with ASF policy and while we have documented the licensing of all code in detail, we know that currently our release would:
+- Contains code that may not be compatible with the Apache License
+
+If you are planning to incorporate this work into your product/project, please be aware that you will need to conduct a thorough licensing review to determine the overall implications of including this work. For the current status of this project through the Apache Incubator, visit: https://incubator.apache.org/projects/Apache Podling-Name.html
\ No newline at end of file
diff --git a/build.gradle.kts b/build.gradle.kts
index 32e13d72cb9..abba4ee2a98 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -484,6 +484,8 @@ tasks.rat {
     "web/src/lib/icons/svg/**/*.svg",
     "**/LICENSE.*",
     "**/NOTICE.*",
+    "DISCLAIMER_WIP.txt",
+    "DISCLAIMER.txt",
     "ROADMAP.md",
     "clients/client-python/.pytest_cache/*",
     "clients/client-python/gravitino.egg-info/*",

From e61422e47781ed9e3e1f98b6caa148e254314ade Mon Sep 17 00:00:00 2001
From: JinsYin 
Date: Thu, 4 Jul 2024 15:24:16 +0800
Subject: [PATCH 06/12] [#4077] improvement(docs): Fixed an incorrect Java
 example (#4079)



### What changes were proposed in this pull request?

Fixed an incorrect Java example.

### Why are the changes needed?

Fix: #4077

### Does this PR introduce _any_ user-facing change?

No

### How was this patch tested?

No testing required

Co-authored-by: rqyin 
---
 docs/manage-metalake-using-gravitino.md            | 8 ++++----
 docs/manage-relational-metadata-using-gravitino.md | 2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/docs/manage-metalake-using-gravitino.md b/docs/manage-metalake-using-gravitino.md
index ff4e76a029a..b4510cfc2c8 100644
--- a/docs/manage-metalake-using-gravitino.md
+++ b/docs/manage-metalake-using-gravitino.md
@@ -42,7 +42,7 @@ GravitinoAdminClient gravitinoAdminClient = GravitinoAdminClient
     .builder("http://localhost:8090")
     .build();
 
-GravitinoMetaLake newMetalake = gravitinoAdminClient.createMetalake(
+GravitinoMetalake newMetalake = gravitinoAdminClient.createMetalake(
     NameIdentifier.of("metalake"),
     "This is a new metalake",
     new HashMap<>());
@@ -79,7 +79,7 @@ curl -X GET -H "Accept: application/vnd.gravitino.v1+json" \
 
 ```java
 // ...
-GravitinoMetaLake loaded = gravitinoAdminClient.loadMetalake(
+GravitinoMetalake loaded = gravitinoAdminClient.loadMetalake(
     NameIdentifier.of("metalake"));
 // ...
 ```
@@ -123,7 +123,7 @@ curl -X PUT -H "Accept: application/vnd.gravitino.v1+json" \
 
 ```java
 // ...
-GravitinoMetaLake renamed = gravitinoAdminClient.alterMetalake(
+GravitinoMetalake renamed = gravitinoAdminClient.alterMetalake(
     NameIdentifier.of("new_metalake"),
     MetalakeChange.rename("new_metalake_renamed")
 );
@@ -213,7 +213,7 @@ curl -X GET -H "Accept: application/vnd.gravitino.v1+json" \
 
 ```java
 // ...
-GravitinoMetaLake[] allMetalakes = gravitinoAdminClient.listMetalakes();
+GravitinoMetalake[] allMetalakes = gravitinoAdminClient.listMetalakes();
 // ...
 ```
 
diff --git a/docs/manage-relational-metadata-using-gravitino.md b/docs/manage-relational-metadata-using-gravitino.md
index 47003de481e..372d2e40ca8 100644
--- a/docs/manage-relational-metadata-using-gravitino.md
+++ b/docs/manage-relational-metadata-using-gravitino.md
@@ -245,7 +245,7 @@ http://localhost:8090/api/metalakes/metalake/catalogs?details=true
 ```java
 // ...
 // Assuming you have just created a metalake named `metalake`
-Catalog[] catalogsInfos = gravitinoMetaLake.listCatalogsInfo(Namespace.ofCatalog("metalake"));
+Catalog[] catalogsInfos = gravitinoMetalake.listCatalogsInfo(Namespace.ofCatalog("metalake"));
 // ...
 ```
 

From 64e4189762e77d99e0bef425d6a75b56cb62bb9d Mon Sep 17 00:00:00 2001
From: danhuawang <154112360+danhuawang@users.noreply.github.com>
Date: Fri, 5 Jul 2024 09:37:12 +0800
Subject: [PATCH 07/12] [#4078] fix(CI): apt purge pop error "E: Unable to
 locate package" in some ubuntu vm (#4081)

### What changes were proposed in this pull request?
Fix issue:
```
sudo apt purge -y \
    firefox \
    google-chrome-stable \
    microsoft-edge-stable
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
E: Unable to locate package google-chrome-stable
E: Unable to locate package microsoft-edge-stable
```
### Why are the changes needed?

Fix: #4078

### Does this PR introduce _any_ user-facing change?

N/A

### How was this patch tested?

CI
---
 dev/ci/util_free_space.sh | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/dev/ci/util_free_space.sh b/dev/ci/util_free_space.sh
index 5ff4a22b449..c7ab15b6402 100755
--- a/dev/ci/util_free_space.sh
+++ b/dev/ci/util_free_space.sh
@@ -50,9 +50,14 @@ if [ "${GITHUB_ACTIONS}" = "true" ]; then
   # 376MB
   sudo rm -rf /opt/hostedtoolcache/node || :
   # Remove Web browser packages
-  sudo apt purge -y \
-    firefox \
-    google-chrome-stable \
-    microsoft-edge-stable
+  if dpkg-query -l firefox;then
+    sudo apt purge -y firefox
+  fi
+  if dpkg-query -l google-chrome-stable;then
+    sudo apt purge -y google-chrome-stable
+  fi
+  if dpkg-query -l microsoft-edge-stable;then
+    sudo apt purge -y microsoft-edge-stable
+  fi
   df -h
 fi
\ No newline at end of file

From 2133d95ac86918c9345e9a2440fc646c0fbbe0d3 Mon Sep 17 00:00:00 2001
From: Justin Mclean 
Date: Fri, 5 Jul 2024 13:05:58 +1000
Subject: [PATCH 08/12] [#4073] Update policies to be in line with ASF policy.
 (#4080)

### What changes were proposed in this pull request?

Update project's current policies to be in line with ASF policy.

### Why are the changes needed?

To comply with ASF policy.

Fix: #4073

### Does this PR introduce _any_ user-facing change?

No.

### How was this patch tested?

Built locally.
---
 CODE_OF_CONDUCT.md |   2 +-
 CONTRIBUTING.md    |   2 +-
 GOVERNANCE.md      |  30 +------------
 MAINTAINERS.md     | 106 ++++++++++++++++++++++++---------------------
 SECURITY.md        |  27 +-----------
 5 files changed, 61 insertions(+), 106 deletions(-)

diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index c83f1d12e38..7c9052a606f 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -58,7 +58,7 @@ Examples of representing our community include using an official e-mail address,
 
 ## Enforcement
 
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at .
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at .
 All complaints will be reviewed and investigated promptly and fairly.
 All community leaders are obligated to respect the privacy and security of the reporter of any incident.
 
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 028b782bec9..c4c83b6f669 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -281,7 +281,7 @@ All text files should use macOS/unix style line endings (LF) not windows style l
 
 ## Community and communication
 
-Join the [community discourse group](https://gravitino.discourse.group) to discuss ideas and seek help. You are also encouraged to use GitHub discussions and follow Datastrato on social media to stay updated on project news.
+Join the [community mailing list](https://lists.apache.org/list.html?dev@gravitino.apache.org) to discuss ideas and seek help and are also encouraged to use GitHub discussions.
 
 ## License
 
diff --git a/GOVERNANCE.md b/GOVERNANCE.md
index 5d28e44b439..37418d6080e 100644
--- a/GOVERNANCE.md
+++ b/GOVERNANCE.md
@@ -19,32 +19,4 @@
 
 # Governance Policy
 
-This document provides the governance policy for the project. Maintainers agree to this policy and to follow all project polices by adding their name to the [maintainers.md file](./MAINTAINERS.md).
-
-## 1. Roles
-
-This project includes the following roles.
-
-**1.1. Maintainers**. Maintainers oversee the development, maintenance, and updates of the project, and play a role in consensus decision-making. The addition or removal of Maintainers requires approval from the existing Maintainers.
-
-**1.2. Contributors**. Contributors are individuals who have made contributions to the project.
-
-## 2. Decisions
-
-**2.1. Consensus-Based Decision Making**. Decisions in projects are reached through consensus. Although unanimous agreement is preferred, it's not required.
-
-## 3. How We Work
-
-**3.1. Openness**. Anyone can participate in the project, and there should be minimal barriers to entry.
-
-**3.2. Balance**. The development process should balance the interests of all stakeholders.
-
-**3.3. Harmonization**. Good-faith efforts shall be made to resolve any conflicts.
-
-## 4. Trademarks
-
-Any names, trademarks or logos of the project may only be used if they indicate the project's source.
-
-## 5. Amendments
-
-Amendments to this governance policy may be made by approval of the Maintainers.
+The Apache Gravitino project follows the standard [ASF governance model](https://www.apache.org/foundation/governance/) and [ASF policies](https://www.apache.org/foundation/policies/) and [ASF Incubator policies] (https://incubator.apache.org/policy/incubation.html).
\ No newline at end of file
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index ceb2505846a..f9b693345dc 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -17,58 +17,11 @@
   under the License.
 -->
 
-This document lists the maintainers and contributors of the Project.
+Note: This maintainer and contributor tables at the end of this document list the maintainers and contributors of the project before it become an ASF project and are no longer updated.
 
-# Maintainers
+# Committers
 
-Maintainers may be added once approved by the existing maintainers (see [Governance document](GOVERNANCE.md)). By adding your name to this list you agree to follow the project's guidelines and governance including the [Code of Conduct](CODE-OF-CONDUCT.md) and that you have permission to contribute from your employer. All contributions are licensed under the Apache License version 2.
-
-| **NAME**      | **GitHub Username** | **Organization** |
-|---------------|---------------------|------------------|
-| Justin Mclean | justinmclean        | Datastrato       |
-| He Qi         | qqqttt123           | Datastrato       |
-| Minghuang Li  | mchades             | Datastrato       |
-| Xun Liu       | xunliu              | Datastrato       |
-| Hui Yu        | diqiu50             | Datastrato       |
-| Xiaojing Fang | FANNG1              | Datastrato       |
-| Qi Yu         | yuqi1129            | Datastrato       |
-| Decheng Liu   | ch3yne              | Datastrato       |
-| Saisai Shao   | jerryshao           | Datastrato       |
-| Shaofeng Shi  | shaofengshi         | Datastrato       |
-| Lisa Cao      | lisancao            | Datastrato       |
-| Qian Xia      | LauraXia123         | Datastrato       |
-| Danhua Wang   | danhuawang          | Datastrato       |
-
-# Contributors
-
-Contributors may be added by existing maintainers (see [Governance document](GOVERNANCE.md)). By adding your name to this list you agree to follow the project's guidelines and governance including the [Code of Conduct](CODE-OF-CONDUCT.md) and that you have permission to contribute from your employer. All contributions are licensed under the Apache License version 2.
-
-| **NAME**       | **GitHub Username** | **Organization** |
-|----------------|---------------------|------------------|
-| Kuan-Po Tseng  | brandboat           | SUSE             |
-| Nicholas Jiang | SteNicholas         | Bilibili         |
-| Eric Chang     | unknowntpo          | Lawsnote         |
-| Sophie Sun     | SophieTech88        | ExtraHop Network |
-| Xing Yong      | YxAc                | Xiaomi           |
-| Liwei Yang     | lw-yang             | Xiaomi           |
-| Yu-Ting Wang   | noidname01          | opensource4you   |
-| Ziva Li        | zivali              | Yahoo            |
-| Kang Zhou      | zhoukangcn          | Xiaomi           |
-| Han Zhang      | xiaozcy             | Xiaomi           |
-| Yu-Hsin Lai    | laiyousin           | Virginia Tech    |
-| Charlie Cheng  | charliecheng630     | cacaFly          |
-| PoAn Yang      | FrankYang0592       | SUSE             |
-| Congling Xia   | xiacongling         | Xiaomi           |
-| JieBao Xiao    | xloya               | Xiaomi           |
-| Can Cai        | caican00            | Xiaomi           |
-| Peidian Li     | coolderli           | Xiaomi           |
-| Brandon Lu     | Lanznx              | LINE             |
-| Lewis Jackson  | xnge                | opensource4you   |
-| Li-Hsing Liu   | austin362667        | opensource4you   |
-| Tianhang Li    | TEOTEO520           | Bilibili         |
-| Hiren Sharma   | hiirrxnn            | opensource4you   |
-| Chun-Hung Tseng| henrybear327        | opensource4you   |
-| Carl Chang     | ichuniq             | opensource4you   |
+The current Apache Gravitino project committers can be [viewed here](https://projects.apache.org/project.html?incubator-gravitino).
 
 ## Review process
 
@@ -131,3 +84,56 @@ For those bug fixes we should not backport:
    that could introduce new bugs.
 2. The bug being addressed is low priority or not critical to the community.
 3. The backported fix varies widely from the main branch fix.
+
+# Pre-ASF Maintainers
+
+Maintainers of the project are now called committers.
+
+All contributions from the people listed are licensed under the Apache License version 2.
+
+| **NAME**      | **GitHub Username** | **Organization** |
+|---------------|---------------------|------------------|
+| Justin Mclean | justinmclean        | Datastrato       |
+| He Qi         | qqqttt123           | Datastrato       |
+| Minghuang Li  | mchades             | Datastrato       |
+| Xun Liu       | xunliu              | Datastrato       |
+| Hui Yu        | diqiu50             | Datastrato       |
+| Xiaojing Fang | FANNG1              | Datastrato       |
+| Qi Yu         | yuqi1129            | Datastrato       |
+| Decheng Liu   | ch3yne              | Datastrato       |
+| Saisai Shao   | jerryshao           | Datastrato       |
+| Shaofeng Shi  | shaofengshi         | Datastrato       |
+| Lisa Cao      | lisancao            | Datastrato       |
+| Qian Xia      | LauraXia123         | Datastrato       |
+| Danhua Wang   | danhuawang          | Datastrato       |
+
+# Pre-ASF Contributors
+
+All contributions from the people listed are licensed under the Apache License version 2.
+
+| **NAME**       | **GitHub Username** | **Organization** |
+|----------------|---------------------|------------------|
+| Kuan-Po Tseng  | brandboat           | SUSE             |
+| Nicholas Jiang | SteNicholas         | Bilibili         |
+| Eric Chang     | unknowntpo          | Lawsnote         |
+| Sophie Sun     | SophieTech88        | ExtraHop Network |
+| Xing Yong      | YxAc                | Xiaomi           |
+| Liwei Yang     | lw-yang             | Xiaomi           |
+| Yu-Ting Wang   | noidname01          | opensource4you   |
+| Ziva Li        | zivali              | Yahoo            |
+| Kang Zhou      | zhoukangcn          | Xiaomi           |
+| Han Zhang      | xiaozcy             | Xiaomi           |
+| Yu-Hsin Lai    | laiyousin           | Virginia Tech    |
+| Charlie Cheng  | charliecheng630     | cacaFly          |
+| PoAn Yang      | FrankYang0592       | SUSE             |
+| Congling Xia   | xiacongling         | Xiaomi           |
+| JieBao Xiao    | xloya               | Xiaomi           |
+| Can Cai        | caican00            | Xiaomi           |
+| Peidian Li     | coolderli           | Xiaomi           |
+| Brandon Lu     | Lanznx              | LINE             |
+| Lewis Jackson  | xnge                | opensource4you   |
+| Li-Hsing Liu   | austin362667        | opensource4you   |
+| Tianhang Li    | TEOTEO520           | Bilibili         |
+| Hiren Sharma   | hiirrxnn            | opensource4you   |
+| Chun-Hung Tseng| henrybear327        | opensource4you   |
+| Carl Chang     | ichuniq             | opensource4you   |
diff --git a/SECURITY.md b/SECURITY.md
index c24077d17c7..9d80d94619b 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -19,29 +19,6 @@
 
 # Security Policy
 
-## Reporting a Security Issue
+The Apache Gravitino project follows the standard processes as set out by the [ASF security team](https://www.apache.org/security/).
 
-If you discover a security issue in our software, please report it to us immediately. We take security seriously and will respond to verified issues as quickly as possible.
-
-**To report a security issue:**
-
-1. Email us at [security@datastrato.com](mailto:security@datastrato.com) with a detailed description of the issue. Please do not disclose the issue publicly until we have had a chance to address it.
-
-2. We will acknowledge your report within 3 working days and work to investigate and resolve the issue.
-
-## Responsible Disclosure
-
-We appreciate your help in responsibly disclosing security issues to us. We follow the principles of responsible disclosure, and we ask that you:
-
-- Do not publicly disclose the issue until we have had a chance to address it.
-- Provide us with a reasonable amount of time to address and fix the issue before making it public.
-
-## Our Commitment
-
-We will make every effort to resolve security issues in a timely manner and keep you informed of our progress.
-
-## Security Updates
-
-We will post updates regarding security incidents or resolutions.
-
-Thank you for your help in keeping our software secure.
+Please report any security issues to [private@gravitino.apache.org](mailto:private@gravitino.apache.org) or to (security@apache.org)[mailto:security@apache.org].

From 45bb443fb75945a71db7d2766402403de1dad9d3 Mon Sep 17 00:00:00 2001
From: Kang 
Date: Fri, 5 Jul 2024 11:11:24 +0800
Subject: [PATCH 09/12] [MINOR] fix: remove chmod in Dockerfile to decrease
 image size (#4082)

### What changes were proposed in this pull request?

Decrease the size of the Docker image in ARM arch.

### Why are the changes needed?

chmod makes Docker image increase to 8.89GB, but it's not necessary for
user root.

### Does this PR introduce _any_ user-facing change?

N/A

### How was this patch tested?

Mannual/CI
---
 catalogs/catalog-jdbc-doris/build.gradle.kts | 2 +-
 dev/docker/doris/Dockerfile                  | 3 +--
 docs/docker-image-details.md                 | 2 ++
 integration-test/build.gradle.kts            | 2 +-
 4 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/catalogs/catalog-jdbc-doris/build.gradle.kts b/catalogs/catalog-jdbc-doris/build.gradle.kts
index 65d6e0d582c..0d01a5cc03f 100644
--- a/catalogs/catalog-jdbc-doris/build.gradle.kts
+++ b/catalogs/catalog-jdbc-doris/build.gradle.kts
@@ -84,7 +84,7 @@ tasks {
 tasks.test {
   val skipUTs = project.hasProperty("skipTests")
   doFirst {
-    environment("GRAVITINO_CI_DORIS_DOCKER_IMAGE", "datastrato/gravitino-ci-doris:0.1.4")
+    environment("GRAVITINO_CI_DORIS_DOCKER_IMAGE", "datastrato/gravitino-ci-doris:0.1.5")
   }
 
   if (skipUTs) {
diff --git a/dev/docker/doris/Dockerfile b/dev/docker/doris/Dockerfile
index 2831bfae1dd..bcdc343b2fb 100644
--- a/dev/docker/doris/Dockerfile
+++ b/dev/docker/doris/Dockerfile
@@ -56,8 +56,7 @@ RUN ARCH=$(uname -m) && \
 #################################################################################
 ## add files
 ADD packages/doris-${TARGETARCH}.tar.xz /opt/
-RUN ln -s /opt/apache-doris-${DORIS_VERSION}-bin-* ${DORIS_HOME} && \
-    chmod 755 "${DORIS_BE_HOME}/lib/doris_be"
+RUN ln -s /opt/apache-doris-${DORIS_VERSION}-bin-* ${DORIS_HOME}
 
 COPY start.sh ${DORIS_HOME}
 
diff --git a/docs/docker-image-details.md b/docs/docker-image-details.md
index cfa0a81162f..f7b30a2d6f8 100644
--- a/docs/docker-image-details.md
+++ b/docs/docker-image-details.md
@@ -215,6 +215,8 @@ Changelog
 You can use this image to test Apache Doris.
 
 Changelog
+- gravitino-ci-doris:0.1.5
+  - Remove the chmod command in the Dockerfile to decrease the size of the Docker image.
 
 - gravitino-ci-doris:0.1.4
   - remove chmod in start.sh to accelerate the startup speed
diff --git a/integration-test/build.gradle.kts b/integration-test/build.gradle.kts
index af26f0bc1bb..016142e0be3 100644
--- a/integration-test/build.gradle.kts
+++ b/integration-test/build.gradle.kts
@@ -160,7 +160,7 @@ tasks.test {
       environment("GRAVITINO_CI_HIVE_DOCKER_IMAGE", "datastrato/gravitino-ci-hive:0.1.12")
       environment("GRAVITINO_CI_TRINO_DOCKER_IMAGE", "datastrato/gravitino-ci-trino:0.1.5")
       environment("GRAVITINO_CI_KAFKA_DOCKER_IMAGE", "apache/kafka:3.7.0")
-      environment("GRAVITINO_CI_DORIS_DOCKER_IMAGE", "datastrato/gravitino-ci-doris:0.1.4")
+      environment("GRAVITINO_CI_DORIS_DOCKER_IMAGE", "datastrato/gravitino-ci-doris:0.1.5")
       environment("GRAVITINO_CI_RANGER_DOCKER_IMAGE", "datastrato/gravitino-ci-ranger:0.1.0")
 
       copy {

From 4312b632b225b5a5a2ea6e0f0bdcbdd3092a1db8 Mon Sep 17 00:00:00 2001
From: xloya <982052490@qq.com>
Date: Fri, 5 Jul 2024 11:26:27 +0800
Subject: [PATCH 10/12] [#3760] improvement(client-python): Add Docker env and
 PyGVFS Integration tests (#3876)

### What changes were proposed in this pull request?

Add Hive Docker env for client-python, and add integration tests for
PyGVFS + HDFS. Depends on #3528.

### Why are the changes needed?

Fix: #3760

### How was this patch tested?

Add some ITs.

---------

Co-authored-by: xiaojiebao 
---
 .github/workflows/python-integration-test.yml |   2 +-
 clients/client-python/build.gradle.kts        |  55 +-
 clients/client-python/requirements-dev.txt    |   3 +-
 .../tests/integration/base_hadoop_env.py      | 101 +++
 .../tests/integration/hdfs_container.py       | 158 ++++
 .../tests/integration/integration_test_env.py |  86 +++
 .../tests/integration/test_gvfs_with_hdfs.py  | 704 ++++++++++++++++++
 .../integration/test_simple_auth_client.py    |   4 +-
 8 files changed, 1098 insertions(+), 15 deletions(-)
 create mode 100644 clients/client-python/tests/integration/base_hadoop_env.py
 create mode 100644 clients/client-python/tests/integration/hdfs_container.py
 create mode 100644 clients/client-python/tests/integration/test_gvfs_with_hdfs.py

diff --git a/.github/workflows/python-integration-test.yml b/.github/workflows/python-integration-test.yml
index f2e5fd4edd4..a7ffacfd72a 100644
--- a/.github/workflows/python-integration-test.yml
+++ b/.github/workflows/python-integration-test.yml
@@ -66,7 +66,7 @@ jobs:
           for pythonVersion in "3.8" "3.9" "3.10" "3.11"
           do
             echo "Use Python version ${pythonVersion} to test the Python client."
-            ./gradlew -PjdkVersion=${{ matrix.java-version }} -PpythonVersion=${pythonVersion} :clients:client-python:test
+            ./gradlew -PjdkVersion=${{ matrix.java-version }} -PpythonVersion=${pythonVersion} -PskipDockerTests=false :clients:client-python:test
             # Clean Gravitino database to clean test data
             rm -rf ./distribution/package/data
           done
diff --git a/clients/client-python/build.gradle.kts b/clients/client-python/build.gradle.kts
index 68cc897e595..2cf83c37614 100644
--- a/clients/client-python/build.gradle.kts
+++ b/clients/client-python/build.gradle.kts
@@ -16,12 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
+import de.undercouch.gradle.tasks.download.Download
+import de.undercouch.gradle.tasks.download.Verify
 import io.github.piyushroshan.python.VenvTask
 import java.net.HttpURLConnection
 import java.net.URL
 
 plugins {
   id("io.github.piyushroshan.python-gradle-miniforge-plugin") version "1.0.0"
+  id("de.undercouch.download") version "5.6.0"
 }
 
 pythonPlugin {
@@ -148,6 +151,10 @@ fun generatePypiProjectHomePage() {
   }
 }
 
+val hadoopVersion = "2.7.3"
+val hadoopPackName = "hadoop-${hadoopVersion}.tar.gz"
+val hadoopDirName = "hadoop-${hadoopVersion}"
+val hadoopDownloadUrl = "https://archive.apache.org/dist/hadoop/core/hadoop-${hadoopVersion}/${hadoopPackName}"
 tasks {
   val pipInstall by registering(VenvTask::class) {
     venvExec = "pip"
@@ -173,6 +180,26 @@ tasks {
     workingDir = projectDir.resolve("./tests/integration")
   }
 
+  val build by registering(VenvTask::class) {
+    dependsOn(pylint)
+    venvExec = "python"
+    args = listOf("scripts/generate_version.py")
+  }
+
+  val downloadHadoopPack by registering(Download::class) {
+    dependsOn(build)
+    onlyIfModified(true)
+    src(hadoopDownloadUrl)
+    dest(layout.buildDirectory.dir("tmp"))
+  }
+
+  val verifyHadoopPack by registering(Verify::class) {
+    dependsOn(downloadHadoopPack)
+    src(layout.buildDirectory.file("tmp/${hadoopPackName}"))
+    algorithm("MD5")
+    checksum("3455bb57e4b4906bbea67b58cca78fa8")
+  }
+
   val integrationTest by registering(VenvTask::class) {
     doFirst {
       gravitinoServer("start")
@@ -181,11 +208,23 @@ tasks {
     venvExec = "coverage"
     args = listOf("run", "--branch", "-m", "unittest")
     workingDir = projectDir.resolve("./tests/integration")
-    environment = mapOf(
-      "PROJECT_VERSION" to project.version,
-      "GRAVITINO_HOME" to project.rootDir.path + "/distribution/package",
-      "START_EXTERNAL_GRAVITINO" to "true"
-    )
+    val dockerTest = project.rootProject.extra["dockerTest"] as? Boolean ?: false
+    val envMap = mapOf().toMutableMap()
+    if (dockerTest) {
+      dependsOn("verifyHadoopPack")
+      envMap.putAll(mapOf(
+          "HADOOP_VERSION" to hadoopVersion,
+          "PYTHON_BUILD_PATH" to project.rootDir.path + "/clients/client-python/build"
+      ))
+    }
+    envMap.putAll(mapOf(
+        "PROJECT_VERSION" to project.version,
+        "GRAVITINO_HOME" to project.rootDir.path + "/distribution/package",
+        "START_EXTERNAL_GRAVITINO" to "true",
+        "DOCKER_TEST" to dockerTest.toString(),
+        "GRAVITINO_CI_HIVE_DOCKER_IMAGE" to "datastrato/gravitino-ci-hive:0.1.12",
+    ))
+    environment = envMap
 
     doLast {
       gravitinoServer("stop")
@@ -224,12 +263,6 @@ tasks {
     }
   }
 
-  val build by registering(VenvTask::class) {
-    dependsOn(pylint)
-    venvExec = "python"
-    args = listOf("scripts/generate_version.py")
-  }
-
   val pydoc by registering(VenvTask::class) {
     venvExec = "python"
     args = listOf("scripts/generate_doc.py")
diff --git a/clients/client-python/requirements-dev.txt b/clients/client-python/requirements-dev.txt
index 77387c01c6c..06f63435837 100644
--- a/clients/client-python/requirements-dev.txt
+++ b/clients/client-python/requirements-dev.txt
@@ -26,4 +26,5 @@ pyarrow==15.0.2
 llama-index==0.10.40
 tenacity==8.3.0
 cachetools==5.3.3
-readerwriterlock==1.0.9
\ No newline at end of file
+readerwriterlock==1.0.9
+docker==7.1.0
\ No newline at end of file
diff --git a/clients/client-python/tests/integration/base_hadoop_env.py b/clients/client-python/tests/integration/base_hadoop_env.py
new file mode 100644
index 00000000000..694331078ff
--- /dev/null
+++ b/clients/client-python/tests/integration/base_hadoop_env.py
@@ -0,0 +1,101 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+"""
+
+import logging
+import os
+import shutil
+import subprocess
+import tarfile
+
+from gravitino.exceptions.gravitino_runtime_exception import GravitinoRuntimeException
+
+logger = logging.getLogger(__name__)
+
+HADOOP_VERSION = os.environ.get("HADOOP_VERSION")
+PYTHON_BUILD_PATH = os.environ.get("PYTHON_BUILD_PATH")
+
+
+class BaseHadoopEnvironment:
+
+    @classmethod
+    def init_hadoop_env(cls):
+        cls._unzip_hadoop_pack()
+        # configure hadoop env
+        cls._configure_hadoop_environment()
+
+    @classmethod
+    def clear_hadoop_env(cls):
+        try:
+            shutil.rmtree(f"{PYTHON_BUILD_PATH}/hadoop")
+        except Exception as e:
+            raise GravitinoRuntimeException(
+                f"Failed to delete dir '{PYTHON_BUILD_PATH}/hadoop': {e}"
+            ) from e
+
+    @classmethod
+    def _unzip_hadoop_pack(cls):
+        hadoop_pack = f"{PYTHON_BUILD_PATH}/tmp/hadoop-{HADOOP_VERSION}.tar.gz"
+        unzip_dir = f"{PYTHON_BUILD_PATH}/hadoop"
+        logger.info("Unzip hadoop pack from %s.", hadoop_pack)
+        # unzip the pack
+        if os.path.exists(unzip_dir):
+            try:
+                shutil.rmtree(unzip_dir)
+            except Exception as e:
+                raise GravitinoRuntimeException(
+                    f"Failed to delete dir '{unzip_dir}': {e}"
+                ) from e
+        try:
+            with tarfile.open(hadoop_pack) as tar:
+                tar.extractall(path=unzip_dir)
+        except Exception as e:
+            raise GravitinoRuntimeException(
+                f"Failed to extract file '{hadoop_pack}': {e}"
+            ) from e
+
+    @classmethod
+    def _configure_hadoop_environment(cls):
+        logger.info("Configure hadoop environment.")
+        os.putenv("HADOOP_USER_NAME", "datastrato")
+        os.putenv("HADOOP_HOME", f"{PYTHON_BUILD_PATH}/hadoop/hadoop-{HADOOP_VERSION}")
+        os.putenv(
+            "HADOOP_CONF_DIR",
+            f"{PYTHON_BUILD_PATH}/hadoop/hadoop-{HADOOP_VERSION}/etc/hadoop",
+        )
+        hadoop_shell_path = (
+            f"{PYTHON_BUILD_PATH}/hadoop/hadoop-{HADOOP_VERSION}/bin/hadoop"
+        )
+        # get the classpath
+        try:
+            result = subprocess.run(
+                [hadoop_shell_path, "classpath", "--glob"],
+                capture_output=True,
+                text=True,
+                check=True,
+            )
+            if result.returncode == 0:
+                os.putenv("CLASSPATH", str(result.stdout))
+            else:
+                raise GravitinoRuntimeException(
+                    f"Command failed with return code is not 0, stdout: {result.stdout}, stderr:{result.stderr}"
+                )
+        except subprocess.CalledProcessError as e:
+            raise GravitinoRuntimeException(
+                f"Command failed with return code {e.returncode}, stderr:{e.stderr}"
+            ) from e
diff --git a/clients/client-python/tests/integration/hdfs_container.py b/clients/client-python/tests/integration/hdfs_container.py
new file mode 100644
index 00000000000..34bc41d8641
--- /dev/null
+++ b/clients/client-python/tests/integration/hdfs_container.py
@@ -0,0 +1,158 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+"""
+
+import asyncio
+import logging
+import os
+import time
+
+import docker
+from docker import types as tp
+from docker.errors import NotFound
+
+from gravitino.exceptions.gravitino_runtime_exception import GravitinoRuntimeException
+
+logger = logging.getLogger(__name__)
+
+
+async def check_hdfs_status(hdfs_container):
+    retry_limit = 15
+    for _ in range(retry_limit):
+        try:
+            command_and_args = ["bash", "/tmp/check-status.sh"]
+            exec_result = hdfs_container.exec_run(command_and_args)
+            if exec_result.exit_code != 0:
+                message = (
+                    f"Command {command_and_args} exited with {exec_result.exit_code}"
+                )
+                logger.warning(message)
+                logger.warning("output: %s", exec_result.output)
+                output_status_command = ["hdfs", "dfsadmin", "-report"]
+                exec_result = hdfs_container.exec_run(output_status_command)
+                logger.info("HDFS report, output: %s", exec_result.output)
+            else:
+                logger.info("HDFS startup successfully!")
+                return True
+        except Exception as e:
+            logger.error(
+                "Exception occurred while checking HDFS container status: %s", e
+            )
+        time.sleep(10)
+    return False
+
+
+async def check_hdfs_container_status(hdfs_container):
+    timeout_sec = 150
+    try:
+        result = await asyncio.wait_for(
+            check_hdfs_status(hdfs_container), timeout=timeout_sec
+        )
+        assert result is True, "HDFS container startup failed!"
+    except asyncio.TimeoutError as e:
+        raise GravitinoRuntimeException(
+            "Timeout occurred while waiting for checking HDFS container status."
+        ) from e
+
+
+class HDFSContainer:
+    _docker_client = None
+    _container = None
+    _network = None
+    _ip = ""
+    _network_name = "python-net"
+    _container_name = "python-hdfs"
+
+    def __init__(self):
+        self._docker_client = docker.from_env()
+        self._create_networks()
+        try:
+            container = self._docker_client.containers.get(self._container_name)
+            if container is not None:
+                if container.status != "running":
+                    container.restart()
+                self._container = container
+        except NotFound:
+            logger.warning("Cannot find hdfs container in docker env, skip remove.")
+        if self._container is None:
+            image_name = os.environ.get("GRAVITINO_CI_HIVE_DOCKER_IMAGE")
+            if image_name is None:
+                raise GravitinoRuntimeException(
+                    "GRAVITINO_CI_HIVE_DOCKER_IMAGE env variable is not set."
+                )
+            self._container = self._docker_client.containers.run(
+                image=image_name,
+                name=self._container_name,
+                detach=True,
+                environment={"HADOOP_USER_NAME": "datastrato"},
+                network=self._network_name,
+            )
+        asyncio.run(check_hdfs_container_status(self._container))
+
+        self._fetch_ip()
+
+    def _create_networks(self):
+        pool_config = tp.IPAMPool(subnet="10.20.31.16/28")
+        ipam_config = tp.IPAMConfig(driver="default", pool_configs=[pool_config])
+        networks = self._docker_client.networks.list()
+        for network in networks:
+            if network.name == self._network_name:
+                self._network = network
+                break
+        if self._network is None:
+            self._network = self._docker_client.networks.create(
+                name=self._network_name, driver="bridge", ipam=ipam_config
+            )
+
+    def _fetch_ip(self):
+        if self._container is None:
+            raise GravitinoRuntimeException("The HDFS container has not init.")
+
+        container_info = self._docker_client.api.inspect_container(self._container.id)
+        self._ip = container_info["NetworkSettings"]["Networks"][self._network_name][
+            "IPAddress"
+        ]
+
+    def get_ip(self):
+        return self._ip
+
+    def close(self):
+        try:
+            self._container.kill()
+        except RuntimeError as e:
+            logger.warning(
+                "Exception occurred while killing container %s : %s",
+                self._container_name,
+                e,
+            )
+        try:
+            self._container.remove()
+        except RuntimeError as e:
+            logger.warning(
+                "Exception occurred while removing container %s : %s",
+                self._container_name,
+                e,
+            )
+        try:
+            self._network.remove()
+        except RuntimeError as e:
+            logger.warning(
+                "Exception occurred while removing network %s : %s",
+                self._network_name,
+                e,
+            )
diff --git a/clients/client-python/tests/integration/integration_test_env.py b/clients/client-python/tests/integration/integration_test_env.py
index a34bba23dbb..dde2c2410e9 100644
--- a/clients/client-python/tests/integration/integration_test_env.py
+++ b/clients/client-python/tests/integration/integration_test_env.py
@@ -26,6 +26,8 @@
 
 import requests
 
+from gravitino.exceptions.gravitino_runtime_exception import GravitinoRuntimeException
+
 logger = logging.getLogger(__name__)
 
 
@@ -132,3 +134,87 @@ def tearDownClass(cls):
 
         if gravitino_server_running:
             logger.error("Can't stop Gravitino server!")
+
+    @classmethod
+    def restart_server(cls):
+        logger.info("Restarting Gravitino server...")
+        gravitino_home = os.environ.get("GRAVITINO_HOME")
+        gravitino_startup_script = os.path.join(gravitino_home, "bin/gravitino.sh")
+        if not os.path.exists(gravitino_startup_script):
+            raise GravitinoRuntimeException(
+                f"Can't find Gravitino startup script: {gravitino_startup_script}, "
+                "Please execute `./gradlew compileDistribution -x test` in the Gravitino "
+                "project root directory."
+            )
+
+        # Restart Gravitino Server
+        env_vars = os.environ.copy()
+        env_vars["HADOOP_USER_NAME"] = "datastrato"
+        result = subprocess.run(
+            [gravitino_startup_script, "restart"],
+            env=env_vars,
+            capture_output=True,
+            text=True,
+            check=False,
+        )
+        if result.stdout:
+            logger.info("stdout: %s", result.stdout)
+        if result.stderr:
+            logger.info("stderr: %s", result.stderr)
+
+        if not check_gravitino_server_status():
+            raise GravitinoRuntimeException("ERROR: Can't start Gravitino server!")
+
+    @classmethod
+    def _append_catalog_hadoop_conf(cls, config):
+        logger.info("Append catalog hadoop conf.")
+        gravitino_home = os.environ.get("GRAVITINO_HOME")
+        if gravitino_home is None:
+            raise GravitinoRuntimeException("Cannot find GRAVITINO_HOME env.")
+        hadoop_conf_path = f"{gravitino_home}/catalogs/hadoop/conf/hadoop.conf"
+        if not os.path.exists(hadoop_conf_path):
+            raise GravitinoRuntimeException(
+                f"Hadoop conf file is not found at `{hadoop_conf_path}`."
+            )
+
+        with open(hadoop_conf_path, mode="a", encoding="utf-8") as f:
+            for key, value in config.items():
+                f.write(f"\n{key} = {value}")
+
+    @classmethod
+    def _reset_catalog_hadoop_conf(cls, config):
+        logger.info("Reset catalog hadoop conf.")
+        gravitino_home = os.environ.get("GRAVITINO_HOME")
+        if gravitino_home is None:
+            raise GravitinoRuntimeException("Cannot find GRAVITINO_HOME env.")
+        hadoop_conf_path = f"{gravitino_home}/catalogs/hadoop/conf/hadoop.conf"
+        if not os.path.exists(hadoop_conf_path):
+            raise GravitinoRuntimeException(
+                f"Hadoop conf file is not found at `{hadoop_conf_path}`."
+            )
+        filtered_lines = []
+        with open(hadoop_conf_path, mode="r", encoding="utf-8") as file:
+            origin_lines = file.readlines()
+
+        existed_config = {}
+        for line in origin_lines:
+            line = line.strip()
+            if line.startswith("#"):
+                # append annotations directly
+                filtered_lines.append(line + "\n")
+            else:
+                try:
+                    key, value = line.split("=")
+                    existed_config[key.strip()] = value.strip()
+                except ValueError:
+                    # cannot split to key, value, so just append
+                    filtered_lines.append(line + "\n")
+
+        for key, value in existed_config.items():
+            if config[key] is None:
+                append_line = f"{key} = {value}\n"
+                filtered_lines.append(append_line)
+
+        with open(hadoop_conf_path, mode="w", encoding="utf-8") as file:
+            for line in filtered_lines:
+                file.write(line)
diff --git a/clients/client-python/tests/integration/test_gvfs_with_hdfs.py b/clients/client-python/tests/integration/test_gvfs_with_hdfs.py
new file mode 100644
index 00000000000..442bbc87edd
--- /dev/null
+++ b/clients/client-python/tests/integration/test_gvfs_with_hdfs.py
@@ -0,0 +1,704 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+"""
+
+import logging
+import os
+import platform
+import unittest
+from random import randint
+from typing import Dict
+
+import pandas
+import pyarrow as pa
+import pyarrow.dataset as dt
+import pyarrow.parquet as pq
+from fsspec.implementations.local import LocalFileSystem
+from fsspec.implementations.arrow import ArrowFSWrapper
+from llama_index.core import SimpleDirectoryReader
+from pyarrow.fs import HadoopFileSystem
+from gravitino import (
+    gvfs,
+    NameIdentifier,
+    GravitinoAdminClient,
+    GravitinoClient,
+    Catalog,
+    Fileset,
+)
+from gravitino.exceptions.gravitino_runtime_exception import GravitinoRuntimeException
+from tests.integration.integration_test_env import IntegrationTestEnv
+from tests.integration.hdfs_container import HDFSContainer
+from tests.integration.base_hadoop_env import BaseHadoopEnvironment
+
+logger = logging.getLogger(__name__)
+
+DOCKER_TEST = os.environ.get("DOCKER_TEST")
+
+
+#  The Hadoop distribution package does not have native hdfs libraries for macOS / Windows systems
+#  (`libhdfs.dylib` for macOS and `libhdfs.dll` for Windows), so the integration tests cannot be run
+#  on these two systems at present.
+@unittest.skipIf(
+    DOCKER_TEST == "false" or platform.system() != "Linux",
+    "Skipping tests on non-Linux systems or when DOCKER_TEST=false",
+)
+class TestGvfsWithHDFS(IntegrationTestEnv):
+    hdfs_container: HDFSContainer = None
+    config: Dict = None
+    metalake_name: str = "TestGvfsWithHDFS_metalake" + str(randint(1, 10000))
+    catalog_name: str = "test_gvfs_catalog" + str(randint(1, 10000))
+    catalog_provider: str = "hadoop"
+
+    schema_name: str = "test_gvfs_schema"
+
+    fileset_name: str = "test_gvfs_fileset"
+    fileset_comment: str = "fileset_comment"
+    fileset_storage_location: str = ""
+    fileset_properties_key1: str = "fileset_properties_key1"
+    fileset_properties_value1: str = "fileset_properties_value1"
+    fileset_properties_key2: str = "fileset_properties_key2"
+    fileset_properties_value2: str = "fileset_properties_value2"
+    fileset_properties: Dict[str, str] = {
+        fileset_properties_key1: fileset_properties_value1,
+        fileset_properties_key2: fileset_properties_value2,
+    }
+
+    schema_ident: NameIdentifier = NameIdentifier.of(
+        metalake_name, catalog_name, schema_name
+    )
+    fileset_ident: NameIdentifier = NameIdentifier.of(schema_name, fileset_name)
+
+    gravitino_admin_client: GravitinoAdminClient = GravitinoAdminClient(
+        uri="http://localhost:8090"
+    )
+    gravitino_client: GravitinoClient = None
+
+    @classmethod
+    def setUpClass(cls):
+        cls.hdfs_container = HDFSContainer()
+        hdfs_container_ip = cls.hdfs_container.get_ip()
+        # init hadoop env
+        BaseHadoopEnvironment.init_hadoop_env()
+        cls.config = {
+            "gravitino.bypass.fs.defaultFS": f"hdfs://{hdfs_container_ip}:9000"
+        }
+        # append the hadoop conf to server
+        cls._append_catalog_hadoop_conf(cls.config)
+        # restart the server
+        cls.restart_server()
+        # create entity
+        cls._init_test_entities()
+
+    @classmethod
+    def tearDownClass(cls):
+        try:
+            cls._clean_test_data()
+            # reset server conf
+            cls._reset_catalog_hadoop_conf(cls.config)
+            # restart server
+            cls.restart_server()
+            # clear hadoop env
+            BaseHadoopEnvironment.clear_hadoop_env()
+        finally:
+            # close hdfs container
+            cls.hdfs_container.close()
+
+    @classmethod
+    def _init_test_entities(cls):
+        cls.gravitino_admin_client.create_metalake(
+            name=cls.metalake_name, comment="", properties={}
+        )
+        cls.gravitino_client = GravitinoClient(
+            uri="http://localhost:8090", metalake_name=cls.metalake_name
+        )
+        catalog = cls.gravitino_client.create_catalog(
+            name=cls.catalog_name,
+            catalog_type=Catalog.Type.FILESET,
+            provider=cls.catalog_provider,
+            comment="",
+            properties={},
+        )
+        catalog.as_schemas().create_schema(
+            schema_name=cls.schema_name, comment="", properties={}
+        )
+
+        cls.fileset_storage_location: str = (
+            f"hdfs://{cls.hdfs_container.get_ip()}:9000/{cls.catalog_name}/{cls.schema_name}/{cls.fileset_name}"
+        )
+        cls.fileset_gvfs_location = (
+            f"gvfs://fileset/{cls.catalog_name}/{cls.schema_name}/{cls.fileset_name}"
+        )
+        catalog.as_fileset_catalog().create_fileset(
+            ident=cls.fileset_ident,
+            fileset_type=Fileset.Type.MANAGED,
+            comment=cls.fileset_comment,
+            storage_location=cls.fileset_storage_location,
+            properties=cls.fileset_properties,
+        )
+        arrow_hadoop_fs = HadoopFileSystem(host=cls.hdfs_container.get_ip(), port=9000)
+        cls.hdfs = ArrowFSWrapper(arrow_hadoop_fs)
+        cls.conf: Dict = {"fs.defaultFS": f"hdfs://{cls.hdfs_container.get_ip()}:9000/"}
+
+    @classmethod
+    def _clean_test_data(cls):
+        try:
+            cls.gravitino_client = GravitinoClient(
+                uri="http://localhost:8090", metalake_name=cls.metalake_name
+            )
+            catalog = cls.gravitino_client.load_catalog(name=cls.catalog_name)
+            logger.info(
+                "Drop fileset %s[%s]",
+                cls.fileset_ident,
+                catalog.as_fileset_catalog().drop_fileset(ident=cls.fileset_ident),
+            )
+            logger.info(
+                "Drop schema %s[%s]",
+                cls.schema_ident,
+                catalog.as_schemas().drop_schema(
+                    schema_name=cls.schema_name, cascade=True
+                ),
+            )
+            logger.info(
+                "Drop catalog %s[%s]",
+                cls.catalog_name,
+                cls.gravitino_client.drop_catalog(name=cls.catalog_name),
+            )
+            logger.info(
+                "Drop metalake %s[%s]",
+                cls.metalake_name,
+                cls.gravitino_admin_client.drop_metalake(cls.metalake_name),
+            )
+        except Exception as e:
+            logger.error("Clean test data failed: %s", e)
+
+    def test_ls(self):
+        ls_dir = self.fileset_gvfs_location + "/test_ls"
+        ls_actual_dir = self.fileset_storage_location + "/test_ls"
+
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(ls_actual_dir)
+        self.assertTrue(self.hdfs.exists(ls_actual_dir))
+
+        ls_file = self.fileset_gvfs_location + "/test_ls/test.file"
+        ls_actual_file = self.fileset_storage_location + "/test_ls/test.file"
+        self.hdfs.touch(ls_actual_file)
+        self.assertTrue(self.hdfs.exists(ls_actual_file))
+
+        # test detail = false
+        file_list_without_detail = fs.ls(ls_dir, detail=False)
+        self.assertEqual(1, len(file_list_without_detail))
+        self.assertEqual(file_list_without_detail[0], ls_file[len("gvfs://") :])
+
+        # test detail = true
+        file_list_with_detail = fs.ls(ls_dir, detail=True)
+        self.assertEqual(1, len(file_list_with_detail))
+        self.assertEqual(file_list_with_detail[0]["name"], ls_file[len("gvfs://") :])
+
+    def test_info(self):
+        info_dir = self.fileset_gvfs_location + "/test_info"
+        info_actual_dir = self.fileset_storage_location + "/test_info"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(info_actual_dir)
+        self.assertTrue(self.hdfs.exists(info_actual_dir))
+
+        info_file = self.fileset_gvfs_location + "/test_info/test.file"
+        info_actual_file = self.fileset_storage_location + "/test_info/test.file"
+        self.hdfs.touch(info_actual_file)
+        self.assertTrue(self.hdfs.exists(info_actual_file))
+
+        dir_info = fs.info(info_dir)
+        self.assertEqual(dir_info["name"], info_dir[len("gvfs://") :])
+
+        file_info = fs.info(info_file)
+        self.assertEqual(file_info["name"], info_file[len("gvfs://") :])
+
+    def test_exist(self):
+        exist_dir = self.fileset_gvfs_location + "/test_exist"
+        exist_actual_dir = self.fileset_storage_location + "/test_exist"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(exist_actual_dir)
+        self.assertTrue(self.hdfs.exists(exist_actual_dir))
+        self.assertTrue(fs.exists(exist_dir))
+
+        exist_file = self.fileset_gvfs_location + "/test_exist/test.file"
+        exist_actual_file = self.fileset_storage_location + "/test_exist/test.file"
+        self.hdfs.touch(exist_actual_file)
+        self.assertTrue(self.hdfs.exists(exist_actual_file))
+        self.assertTrue(fs.exists(exist_file))
+
+    def test_cp_file(self):
+        cp_file_dir = self.fileset_gvfs_location + "/test_cp_file"
+        cp_file_actual_dir = self.fileset_storage_location + "/test_cp_file"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(cp_file_actual_dir)
+        self.assertTrue(self.hdfs.exists(cp_file_actual_dir))
+        self.assertTrue(fs.exists(cp_file_dir))
+
+        cp_file_file = self.fileset_gvfs_location + "/test_cp_file/test.file"
+        cp_file_actual_file = self.fileset_storage_location + "/test_cp_file/test.file"
+        self.hdfs.touch(cp_file_actual_file)
+        self.assertTrue(self.hdfs.exists(cp_file_actual_file))
+        self.assertTrue(fs.exists(cp_file_file))
+
+        with self.hdfs.open(cp_file_actual_file, "wb") as f:
+            f.write(b"test_file_1")
+
+        cp_file_new_file = self.fileset_gvfs_location + "/test_cp_file/test_cp.file"
+        cp_file_new_actual_file = (
+            self.fileset_storage_location + "/test_cp_file/test_cp.file"
+        )
+        fs.cp_file(cp_file_file, cp_file_new_file)
+        self.assertTrue(fs.exists(cp_file_new_file))
+
+        with self.hdfs.open(cp_file_new_actual_file, "rb") as f:
+            result = f.read()
+        self.assertEqual(b"test_file_1", result)
+
+    def test_mv(self):
+        mv_dir = self.fileset_gvfs_location + "/test_mv"
+        mv_actual_dir = self.fileset_storage_location + "/test_mv"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(mv_actual_dir)
+        self.assertTrue(self.hdfs.exists(mv_actual_dir))
+        self.assertTrue(fs.exists(mv_dir))
+
+        mv_new_dir = self.fileset_gvfs_location + "/test_mv_new"
+        mv_new_actual_dir = self.fileset_storage_location + "/test_mv_new"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(mv_new_actual_dir)
+        self.assertTrue(self.hdfs.exists(mv_new_actual_dir))
+        self.assertTrue(fs.exists(mv_new_dir))
+
+        mv_file = self.fileset_gvfs_location + "/test_mv/test.file"
+        mv_actual_file = self.fileset_storage_location + "/test_mv/test.file"
+        self.hdfs.touch(mv_actual_file)
+        self.assertTrue(self.hdfs.exists(mv_actual_file))
+        self.assertTrue(fs.exists(mv_file))
+
+        mv_new_file = self.fileset_gvfs_location + "/test_mv_new/test_new.file"
+        mv_new_actual_file = (
+            self.fileset_storage_location + "/test_mv_new/test_new.file"
+        )
+
+        fs.mv(mv_file, mv_new_file)
+        self.assertTrue(fs.exists(mv_new_file))
+        self.assertTrue(self.hdfs.exists(mv_new_actual_file))
+
+    def test_rm(self):
+        rm_dir = self.fileset_gvfs_location + "/test_rm"
+        rm_actual_dir = self.fileset_storage_location + "/test_rm"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(rm_actual_dir)
+        self.assertTrue(self.hdfs.exists(rm_actual_dir))
+        self.assertTrue(fs.exists(rm_dir))
+
+        rm_file = self.fileset_gvfs_location + "/test_rm/test.file"
+        rm_actual_file = self.fileset_storage_location + "/test_rm/test.file"
+        self.hdfs.touch(rm_file)
+        self.assertTrue(self.hdfs.exists(rm_actual_file))
+        self.assertTrue(fs.exists(rm_file))
+
+        # test delete file
+        fs.rm(rm_file)
+        self.assertFalse(fs.exists(rm_file))
+
+        # test delete dir with recursive = false
+        rm_new_file = self.fileset_gvfs_location + "/test_rm/test_new.file"
+        rm_new_actual_file = self.fileset_storage_location + "/test_rm/test_new.file"
+        self.hdfs.touch(rm_new_actual_file)
+        self.assertTrue(self.hdfs.exists(rm_new_actual_file))
+        self.assertTrue(fs.exists(rm_new_file))
+        with self.assertRaises(ValueError):
+            fs.rm(rm_dir, recursive=False)
+
+        # test delete dir with recursive = true
+        fs.rm(rm_dir, recursive=True)
+        self.assertFalse(fs.exists(rm_dir))
+
+    def test_rm_file(self):
+        rm_file_dir = self.fileset_gvfs_location + "/test_rm_file"
+        rm_file_actual_dir = self.fileset_storage_location + "/test_rm_file"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(rm_file_actual_dir)
+        self.assertTrue(self.hdfs.exists(rm_file_actual_dir))
+        self.assertTrue(fs.exists(rm_file_dir))
+
+        rm_file_file = self.fileset_gvfs_location + "/test_rm_file/test.file"
+        rm_file_actual_file = self.fileset_storage_location + "/test_rm_file/test.file"
+        self.hdfs.touch(rm_file_actual_file)
+        self.assertTrue(self.hdfs.exists(rm_file_actual_file))
+        self.assertTrue(fs.exists(rm_file_file))
+
+        # test delete file
+        fs.rm_file(rm_file_file)
+        self.assertFalse(fs.exists(rm_file_file))
+
+        # test delete dir
+        with self.assertRaises(OSError):
+            fs.rm_file(rm_file_dir)
+
+    def test_rmdir(self):
+        rmdir_dir = self.fileset_gvfs_location + "/test_rmdir"
+        rmdir_actual_dir = self.fileset_storage_location + "/test_rmdir"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(rmdir_actual_dir)
+        self.assertTrue(self.hdfs.exists(rmdir_actual_dir))
+        self.assertTrue(fs.exists(rmdir_dir))
+
+        rmdir_file = self.fileset_gvfs_location + "/test_rmdir/test.file"
+        rmdir_actual_file = self.fileset_storage_location + "/test_rmdir/test.file"
+        self.hdfs.touch(rmdir_actual_file)
+        self.assertTrue(self.hdfs.exists(rmdir_actual_file))
+        self.assertTrue(fs.exists(rmdir_file))
+
+        # test delete file
+        with self.assertRaises(OSError):
+            fs.rmdir(rmdir_file)
+
+        # test delete dir
+        fs.rmdir(rmdir_dir)
+        self.assertFalse(fs.exists(rmdir_dir))
+
+    def test_open(self):
+        open_dir = self.fileset_gvfs_location + "/test_open"
+        open_actual_dir = self.fileset_storage_location + "/test_open"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(open_actual_dir)
+        self.assertTrue(self.hdfs.exists(open_actual_dir))
+        self.assertTrue(fs.exists(open_dir))
+
+        open_file = self.fileset_gvfs_location + "/test_open/test.file"
+        open_actual_file = self.fileset_storage_location + "/test_open/test.file"
+        self.hdfs.touch(open_actual_file)
+        self.assertTrue(self.hdfs.exists(open_actual_file))
+        self.assertTrue(fs.exists(open_file))
+
+        # test open and write file
+        with fs.open(open_file, mode="wb") as f:
+            f.write(b"test_open_write")
+        self.assertTrue(fs.info(open_file)["size"] > 0)
+
+        # test open and read file
+        with fs.open(open_file, mode="rb") as f:
+            self.assertEqual(b"test_open_write", f.read())
+
+    def test_mkdir(self):
+        mkdir_dir = self.fileset_gvfs_location + "/test_mkdir"
+        mkdir_actual_dir = self.fileset_storage_location + "/test_mkdir"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        fs.mkdir(mkdir_dir)
+        self.assertTrue(fs.exists(mkdir_dir))
+        self.assertTrue(self.hdfs.exists(mkdir_actual_dir))
+
+        # test mkdir dir with create_parents = false
+        parent_not_exist_virtual_path = mkdir_dir + "/not_exist/sub_dir"
+        self.assertFalse(fs.exists(parent_not_exist_virtual_path))
+
+        with self.assertRaises(OSError):
+            fs.mkdir(parent_not_exist_virtual_path, create_parents=False)
+
+        # test mkdir dir with create_parents = true
+        parent_not_exist_virtual_path2 = mkdir_dir + "/not_exist/sub_dir"
+        self.assertFalse(fs.exists(parent_not_exist_virtual_path2))
+
+        fs.mkdir(parent_not_exist_virtual_path2, create_parents=True)
+        self.assertTrue(fs.exists(parent_not_exist_virtual_path2))
+
+    def test_makedirs(self):
+        makedirs_dir = self.fileset_gvfs_location + "/test_makedirs"
+        makedirs_actual_dir = self.fileset_storage_location + "/test_makedirs"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        fs.makedirs(makedirs_dir)
+        self.assertTrue(fs.exists(makedirs_dir))
+        self.assertTrue(self.hdfs.exists(makedirs_actual_dir))
+
+        # test mkdir dir not exist
+        parent_not_exist_virtual_path = makedirs_dir + "/not_exist/sub_dir"
+        self.assertFalse(fs.exists(parent_not_exist_virtual_path))
+        fs.makedirs(parent_not_exist_virtual_path)
+        self.assertTrue(fs.exists(parent_not_exist_virtual_path))
+
+    def test_created(self):
+        created_dir = self.fileset_gvfs_location + "/test_created"
+        created_actual_dir = self.fileset_storage_location + "/test_created"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(created_actual_dir)
+        self.assertTrue(self.hdfs.exists(created_actual_dir))
+        self.assertTrue(fs.exists(created_dir))
+
+        with self.assertRaises(GravitinoRuntimeException):
+            fs.created(created_dir)
+
+    def test_modified(self):
+        modified_dir = self.fileset_gvfs_location + "/test_modified"
+        modified_actual_dir = self.fileset_storage_location + "/test_modified"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(modified_actual_dir)
+        self.assertTrue(self.hdfs.exists(modified_actual_dir))
+        self.assertTrue(fs.exists(modified_dir))
+
+        # test mkdir dir which exists
+        self.assertIsNotNone(fs.modified(modified_dir))
+
+    def test_cat_file(self):
+        cat_dir = self.fileset_gvfs_location + "/test_cat"
+        cat_actual_dir = self.fileset_storage_location + "/test_cat"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(cat_actual_dir)
+        self.assertTrue(self.hdfs.exists(cat_actual_dir))
+        self.assertTrue(fs.exists(cat_dir))
+
+        cat_file = self.fileset_gvfs_location + "/test_cat/test.file"
+        cat_actual_file = self.fileset_storage_location + "/test_cat/test.file"
+        self.hdfs.touch(cat_actual_file)
+        self.assertTrue(self.hdfs.exists(cat_actual_file))
+        self.assertTrue(fs.exists(cat_file))
+
+        # test open and write file
+        with fs.open(cat_file, mode="wb") as f:
+            f.write(b"test_cat_file")
+        self.assertTrue(fs.info(cat_file)["size"] > 0)
+
+        # test cat file
+        content = fs.cat_file(cat_file)
+        self.assertEqual(b"test_cat_file", content)
+
+    def test_get_file(self):
+        get_dir = self.fileset_gvfs_location + "/test_get"
+        get_actual_dir = self.fileset_storage_location + "/test_get"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(get_actual_dir)
+        self.assertTrue(self.hdfs.exists(get_actual_dir))
+        self.assertTrue(fs.exists(get_dir))
+
+        get_file = self.fileset_gvfs_location + "/test_get/test.file"
+        get_actual_file = self.fileset_storage_location + "/test_get/test.file"
+        self.hdfs.touch(get_actual_file)
+        self.assertTrue(self.hdfs.exists(get_actual_file))
+        self.assertTrue(fs.exists(get_file))
+
+        # test open and write file
+        with fs.open(get_file, mode="wb") as f:
+            f.write(b"test_get_file")
+        self.assertTrue(fs.info(get_file)["size"] > 0)
+
+        # test get file
+        local_fs = LocalFileSystem()
+        local_dir = "/tmp/test_gvfs_local_file_" + str(randint(1, 10000))
+        local_fs.makedirs(local_dir)
+        local_path = local_dir + "/get_file.txt"
+        local_fs.touch(local_path)
+        self.assertTrue(local_fs.exists(local_path))
+        fs.get_file(get_file, local_path)
+        self.assertEqual(b"test_get_file", local_fs.cat_file(local_path))
+        local_fs.rm(local_dir, recursive=True)
+
+        # test get a file to a remote file
+        remote_path = self.fileset_gvfs_location + "/test_file_2.par"
+        with self.assertRaises(GravitinoRuntimeException):
+            fs.get_file(get_file, remote_path)
+
+    def test_pandas(self):
+        pands_dir = self.fileset_gvfs_location + "/test_pandas"
+        pands_actual_dir = self.fileset_storage_location + "/test_pandas"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(pands_actual_dir)
+        self.assertTrue(self.hdfs.exists(pands_actual_dir))
+        self.assertTrue(fs.exists(pands_dir))
+
+        data = pandas.DataFrame({"Name": ["A", "B", "C", "D"], "ID": [20, 21, 19, 18]})
+        # to parquet
+        parquet_file = self.fileset_gvfs_location + "/test_pandas/test.parquet"
+        parquet_actual_file = (
+            self.fileset_storage_location + "/test_pandas/test.parquet"
+        )
+        data.to_parquet(parquet_file, filesystem=fs)
+        self.assertTrue(fs.exists(parquet_file))
+        self.assertTrue(self.hdfs.exists(parquet_actual_file))
+
+        # read parquet
+        ds1 = pandas.read_parquet(path=parquet_file, filesystem=fs)
+        self.assertTrue(data.equals(ds1))
+        storage_options = {
+            "server_uri": "http://localhost:8090",
+            "metalake_name": self.metalake_name,
+        }
+        # to csv
+        csv_file = self.fileset_gvfs_location + "/test_pandas/test.csv"
+        csv_actual_file = self.fileset_storage_location + "/test_pandas/test.csv"
+        data.to_csv(
+            csv_file,
+            index=False,
+            storage_options=storage_options,
+        )
+        self.assertTrue(fs.exists(csv_file))
+        self.assertTrue(self.hdfs.exists(csv_actual_file))
+
+        # read csv
+        ds2 = pandas.read_csv(csv_file, storage_options=storage_options)
+        self.assertTrue(data.equals(ds2))
+
+    def test_pyarrow(self):
+        pyarrow_dir = self.fileset_gvfs_location + "/test_pyarrow"
+        pyarrow_actual_dir = self.fileset_storage_location + "/test_pyarrow"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(pyarrow_actual_dir)
+        self.assertTrue(self.hdfs.exists(pyarrow_actual_dir))
+        self.assertTrue(fs.exists(pyarrow_dir))
+
+        data = pandas.DataFrame({"Name": ["A", "B", "C", "D"], "ID": [20, 21, 19, 18]})
+        # to parquet
+        parquet_file = pyarrow_dir + "/test.parquet"
+        data.to_parquet(parquet_file, filesystem=fs)
+        self.assertTrue(fs.exists(parquet_file))
+
+        # read as arrow dataset
+        arrow_dataset = dt.dataset(parquet_file, filesystem=fs)
+        arrow_tb_1 = arrow_dataset.to_table()
+
+        arrow_tb_2 = pa.Table.from_pandas(data)
+        self.assertTrue(arrow_tb_1.equals(arrow_tb_2))
+
+        # read as arrow parquet dataset
+        arrow_tb_3 = pq.read_table(parquet_file, filesystem=fs)
+        self.assertTrue(arrow_tb_3.equals(arrow_tb_2))
+
+    def test_llama_index(self):
+        llama_dir = self.fileset_gvfs_location + "/test_llama"
+        llama_actual_dir = self.fileset_storage_location + "/test_llama"
+        fs = gvfs.GravitinoVirtualFileSystem(
+            server_uri="http://localhost:8090",
+            metalake_name=self.metalake_name,
+            **self.conf,
+        )
+        self.hdfs.mkdir(llama_actual_dir)
+        self.assertTrue(self.hdfs.exists(llama_actual_dir))
+        self.assertTrue(fs.exists(llama_dir))
+        data = pandas.DataFrame({"Name": ["A", "B", "C", "D"], "ID": [20, 21, 19, 18]})
+
+        storage_options = {
+            "server_uri": "http://localhost:8090",
+            "metalake_name": self.metalake_name,
+        }
+        csv_file = llama_dir + "/test.csv"
+        # to csv
+        data.to_csv(
+            csv_file,
+            index=False,
+            storage_options=storage_options,
+        )
+        self.assertTrue(fs.exists(csv_file))
+        another_csv_file = llama_dir + "/sub_dir/test1.csv"
+        data.to_csv(
+            another_csv_file,
+            index=False,
+            storage_options=storage_options,
+        )
+        self.assertTrue(fs.exists(another_csv_file))
+
+        reader = SimpleDirectoryReader(
+            input_dir=llama_dir[len("gvfs://") :],
+            fs=fs,
+            recursive=True,  # recursively searches all subdirectories
+        )
+        documents = reader.load_data()
+        self.assertEqual(len(documents), 2)
+        doc_1 = documents[0]
+        result_1 = [line.strip().split(", ") for line in doc_1.text.split("\n")]
+        self.assertEqual(4, len(result_1))
+        for row in result_1:
+            if row[0] == "A":
+                self.assertEqual(row[1], "20")
+            elif row[0] == "B":
+                self.assertEqual(row[1], "21")
+            elif row[0] == "C":
+                self.assertEqual(row[1], "19")
+            elif row[0] == "D":
+                self.assertEqual(row[1], "18")
diff --git a/clients/client-python/tests/integration/test_simple_auth_client.py b/clients/client-python/tests/integration/test_simple_auth_client.py
index 516db2add06..a4ed77fe185 100644
--- a/clients/client-python/tests/integration/test_simple_auth_client.py
+++ b/clients/client-python/tests/integration/test_simple_auth_client.py
@@ -19,7 +19,6 @@
 
 import logging
 import os
-import unittest
 from random import randint
 from typing import Dict
 
@@ -31,11 +30,12 @@
     Fileset,
 )
 from gravitino.auth.simple_auth_provider import SimpleAuthProvider
+from tests.integration.integration_test_env import IntegrationTestEnv
 
 logger = logging.getLogger(__name__)
 
 
-class TestSimpleAuthClient(unittest.TestCase):
+class TestSimpleAuthClient(IntegrationTestEnv):
     creator: str = "test_client"
     metalake_name: str = "TestClient_metalake" + str(randint(1, 10000))
     catalog_name: str = "fileset_catalog"

From 6a3e0f36bf1ef0f5344a0684a22538ae4af79fe5 Mon Sep 17 00:00:00 2001
From: Peidian li <38486782+coolderli@users.noreply.github.com>
Date: Fri, 5 Jul 2024 11:43:54 +0800
Subject: [PATCH 11/12] [#3371] feat(flink-connector): support basic table
 operation (#3795)

### What changes were proposed in this pull request?

- Support table operation on Flink

### Why are the changes needed?

- Fix: #3371

### Does this PR introduce _any_ user-facing change?

- no

### How was this patch tested?
- add Uts and ITs
---
 flink-connector/build.gradle.kts              |   1 +
 .../flink/connector/PropertiesConverter.java  |  20 ++
 .../flink/connector/catalog/BaseCatalog.java  | 135 ++++++++++++--
 .../connector/hive/GravitinoHiveCatalog.java  |  29 +++
 .../hive/HivePropertiesConverter.java         |  23 +++
 .../flink/connector/utils/TypeUtils.java      |  58 ++++++
 .../hive/TestHivePropertiesConverter.java     |   1 +
 .../integration/test/FlinkCommonIT.java       | 171 +++++++++++++++++-
 .../integration/test/FlinkEnvIT.java          |  28 ++-
 .../test/hive/FlinkHiveCatalogIT.java         |  38 ++--
 .../integration/test/utils/TestUtils.java     |  32 ++++
 .../flink/connector/utils/TestTypeUtils.java  |  56 ++++++
 12 files changed, 563 insertions(+), 29 deletions(-)
 create mode 100644 flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/utils/TypeUtils.java
 create mode 100644 flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/utils/TestTypeUtils.java

diff --git a/flink-connector/build.gradle.kts b/flink-connector/build.gradle.kts
index f34f1ff107f..ff28a72c4a1 100644
--- a/flink-connector/build.gradle.kts
+++ b/flink-connector/build.gradle.kts
@@ -131,6 +131,7 @@ dependencies {
     exclude("com.google.code.findbugs", "jsr305")
   }
   testImplementation("org.apache.flink:flink-table-planner_$scalaVersion:$flinkVersion")
+  testImplementation("org.apache.flink:flink-test-utils:$flinkVersion")
 
   testRuntimeOnly(libs.junit.jupiter.engine)
 }
diff --git a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/PropertiesConverter.java b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/PropertiesConverter.java
index 6b03a18b6d2..981044958ec 100644
--- a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/PropertiesConverter.java
+++ b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/PropertiesConverter.java
@@ -70,4 +70,24 @@ default Map toGravitinoSchemaProperties(Map flin
   default Map toFlinkDatabaseProperties(Map gravitinoProperties) {
     return gravitinoProperties;
   }
+
+  /**
+   * Converts properties from Gravitino table properties to Flink connector table properties.
+   *
+   * @param gravitinoProperties The table properties provided by Gravitino.
+   * @return The table properties for the Flink connector.
+   */
+  default Map toFlinkTableProperties(Map gravitinoProperties) {
+    return gravitinoProperties;
+  }
+
+  /**
+   * Converts properties from Flink connector table properties to Gravitino table properties.
+   *
+   * @param flinkProperties The table properties provided by Flink.
+   * @return The table properties for the Gravitino.
+   */
+  default Map toGravitinoTableProperties(Map flinkProperties) {
+    return flinkProperties;
+  }
 }
diff --git a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/catalog/BaseCatalog.java b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/catalog/BaseCatalog.java
index f98670b4cfe..ae6127c1c51 100644
--- a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/catalog/BaseCatalog.java
+++ b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/catalog/BaseCatalog.java
@@ -20,19 +20,30 @@
 package com.datastrato.gravitino.flink.connector.catalog;
 
 import com.datastrato.gravitino.Catalog;
+import com.datastrato.gravitino.NameIdentifier;
+import com.datastrato.gravitino.Namespace;
 import com.datastrato.gravitino.Schema;
 import com.datastrato.gravitino.SchemaChange;
 import com.datastrato.gravitino.exceptions.NoSuchCatalogException;
 import com.datastrato.gravitino.exceptions.NoSuchSchemaException;
+import com.datastrato.gravitino.exceptions.NoSuchTableException;
 import com.datastrato.gravitino.exceptions.NonEmptySchemaException;
 import com.datastrato.gravitino.exceptions.SchemaAlreadyExistsException;
+import com.datastrato.gravitino.exceptions.TableAlreadyExistsException;
 import com.datastrato.gravitino.flink.connector.PropertiesConverter;
+import com.datastrato.gravitino.flink.connector.utils.TypeUtils;
+import com.datastrato.gravitino.rel.Column;
+import com.datastrato.gravitino.rel.Table;
+import com.datastrato.gravitino.rel.TableChange;
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.MapDifference;
 import com.google.common.collect.Maps;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 import org.apache.commons.compress.utils.Lists;
 import org.apache.flink.table.catalog.AbstractCatalog;
 import org.apache.flink.table.catalog.CatalogBaseTable;
@@ -41,7 +52,9 @@
 import org.apache.flink.table.catalog.CatalogFunction;
 import org.apache.flink.table.catalog.CatalogPartition;
 import org.apache.flink.table.catalog.CatalogPartitionSpec;
+import org.apache.flink.table.catalog.CatalogTable;
 import org.apache.flink.table.catalog.ObjectPath;
+import org.apache.flink.table.catalog.ResolvedCatalogBaseTable;
 import org.apache.flink.table.catalog.exceptions.CatalogException;
 import org.apache.flink.table.catalog.exceptions.DatabaseAlreadyExistException;
 import org.apache.flink.table.catalog.exceptions.DatabaseNotEmptyException;
@@ -58,6 +71,7 @@
 import org.apache.flink.table.catalog.stats.CatalogColumnStatistics;
 import org.apache.flink.table.catalog.stats.CatalogTableStatistics;
 import org.apache.flink.table.expressions.Expression;
+import org.apache.flink.table.types.DataType;
 
 /**
  * The BaseCatalog that provides a default implementation for all methods in the {@link
@@ -149,8 +163,17 @@ public void alterDatabase(
   }
 
   @Override
-  public List listTables(String s) throws DatabaseNotExistException, CatalogException {
-    throw new UnsupportedOperationException();
+  public List listTables(String databaseName)
+      throws DatabaseNotExistException, CatalogException {
+    try {
+      return Stream.of(catalog().asTableCatalog().listTables(Namespace.of(databaseName)))
+          .map(NameIdentifier::name)
+          .collect(Collectors.toList());
+    } catch (NoSuchSchemaException e) {
+      throw new DatabaseNotExistException(catalogName(), databaseName, e);
+    } catch (Exception e) {
+      throw new CatalogException(e);
+    }
   }
 
   @Override
@@ -159,32 +182,95 @@ public List listViews(String s) throws DatabaseNotExistException, Catalo
   }
 
   @Override
-  public CatalogBaseTable getTable(ObjectPath objectPath)
+  public CatalogBaseTable getTable(ObjectPath tablePath)
       throws TableNotExistException, CatalogException {
-    throw new UnsupportedOperationException();
+    try {
+      Table table =
+          catalog()
+              .asTableCatalog()
+              .loadTable(NameIdentifier.of(tablePath.getDatabaseName(), tablePath.getObjectName()));
+      return toFlinkTable(table);
+    } catch (NoSuchTableException e) {
+      throw new TableNotExistException(catalogName(), tablePath, e);
+    } catch (Exception e) {
+      throw new CatalogException(e);
+    }
   }
 
   @Override
-  public boolean tableExists(ObjectPath objectPath) throws CatalogException {
-    throw new UnsupportedOperationException();
+  public boolean tableExists(ObjectPath tablePath) throws CatalogException {
+    try {
+      return catalog()
+          .asTableCatalog()
+          .tableExists(NameIdentifier.of(tablePath.getDatabaseName(), tablePath.getObjectName()));
+    } catch (Exception e) {
+      throw new CatalogException(e);
+    }
   }
 
   @Override
-  public void dropTable(ObjectPath objectPath, boolean b)
+  public void dropTable(ObjectPath tablePath, boolean ignoreIfNotExists)
       throws TableNotExistException, CatalogException {
-    throw new UnsupportedOperationException();
+    boolean dropped =
+        catalog()
+            .asTableCatalog()
+            .dropTable(NameIdentifier.of(tablePath.getDatabaseName(), tablePath.getObjectName()));
+    if (!dropped && !ignoreIfNotExists) {
+      throw new TableNotExistException(catalogName(), tablePath);
+    }
   }
 
   @Override
-  public void renameTable(ObjectPath objectPath, String s, boolean b)
+  public void renameTable(ObjectPath tablePath, String newTableName, boolean ignoreIfNotExists)
       throws TableNotExistException, TableAlreadyExistException, CatalogException {
-    throw new UnsupportedOperationException();
+    NameIdentifier identifier =
+        NameIdentifier.of(Namespace.of(tablePath.getDatabaseName()), newTableName);
+
+    if (catalog().asTableCatalog().tableExists(identifier)) {
+      throw new TableAlreadyExistException(
+          catalogName(), ObjectPath.fromString(tablePath.getDatabaseName() + newTableName));
+    }
+
+    try {
+      catalog()
+          .asTableCatalog()
+          .alterTable(
+              NameIdentifier.of(tablePath.getDatabaseName(), tablePath.getObjectName()),
+              TableChange.rename(newTableName));
+    } catch (NoSuchTableException e) {
+      if (!ignoreIfNotExists) {
+        throw new TableNotExistException(catalogName(), tablePath, e);
+      }
+    } catch (Exception e) {
+      throw new CatalogException(e);
+    }
   }
 
   @Override
-  public void createTable(ObjectPath objectPath, CatalogBaseTable catalogBaseTable, boolean b)
+  public void createTable(ObjectPath tablePath, CatalogBaseTable table, boolean ignoreIfExists)
       throws TableAlreadyExistException, DatabaseNotExistException, CatalogException {
-    throw new UnsupportedOperationException();
+    NameIdentifier identifier =
+        NameIdentifier.of(tablePath.getDatabaseName(), tablePath.getObjectName());
+
+    ResolvedCatalogBaseTable resolvedTable = (ResolvedCatalogBaseTable) table;
+    Column[] columns =
+        resolvedTable.getResolvedSchema().getColumns().stream()
+            .map(this::toGravitinoColumn)
+            .toArray(Column[]::new);
+    String comment = table.getComment();
+    Map properties =
+        propertiesConverter.toGravitinoTableProperties(table.getOptions());
+    try {
+      catalog().asTableCatalog().createTable(identifier, columns, comment, properties);
+    } catch (NoSuchSchemaException e) {
+      throw new DatabaseNotExistException(catalogName(), tablePath.getDatabaseName(), e);
+    } catch (TableAlreadyExistsException e) {
+      if (!ignoreIfExists) {
+        throw new TableAlreadyExistException(catalogName(), tablePath, e);
+      }
+    } catch (Exception e) {
+      throw new CatalogException(e);
+    }
   }
 
   @Override
@@ -351,6 +437,31 @@ public void alterPartitionColumnStatistics(
 
   protected abstract PropertiesConverter getPropertiesConverter();
 
+  protected CatalogBaseTable toFlinkTable(Table table) {
+    org.apache.flink.table.api.Schema.Builder builder =
+        org.apache.flink.table.api.Schema.newBuilder();
+    for (Column column : table.columns()) {
+      DataType flinkType = TypeUtils.toFlinkType(column.dataType());
+      builder
+          .column(column.name(), column.nullable() ? flinkType.nullable() : flinkType.notNull())
+          .withComment(column.comment());
+    }
+    Map flinkTableProperties =
+        propertiesConverter.toFlinkTableProperties(table.properties());
+    return CatalogTable.of(
+        builder.build(), table.comment(), ImmutableList.of(), flinkTableProperties);
+  }
+
+  private Column toGravitinoColumn(org.apache.flink.table.catalog.Column column) {
+    return Column.of(
+        column.getName(),
+        TypeUtils.toGravitinoType(column.getDataType().getLogicalType()),
+        column.getComment().orElse(null),
+        column.getDataType().getLogicalType().isNullable(),
+        false,
+        null);
+  }
+
   @VisibleForTesting
   static SchemaChange[] getSchemaChange(CatalogDatabase current, CatalogDatabase updated) {
     Map currentProperties = current.getProperties();
diff --git a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/hive/GravitinoHiveCatalog.java b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/hive/GravitinoHiveCatalog.java
index aae5fb6ff68..857caad7008 100644
--- a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/hive/GravitinoHiveCatalog.java
+++ b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/hive/GravitinoHiveCatalog.java
@@ -22,7 +22,12 @@
 import com.datastrato.gravitino.flink.connector.catalog.BaseCatalog;
 import java.util.Optional;
 import javax.annotation.Nullable;
+import org.apache.flink.table.catalog.ObjectPath;
+import org.apache.flink.table.catalog.exceptions.CatalogException;
+import org.apache.flink.table.catalog.exceptions.TableNotExistException;
 import org.apache.flink.table.catalog.hive.HiveCatalog;
+import org.apache.flink.table.catalog.stats.CatalogColumnStatistics;
+import org.apache.flink.table.catalog.stats.CatalogTableStatistics;
 import org.apache.flink.table.factories.Factory;
 import org.apache.hadoop.hive.conf.HiveConf;
 
@@ -43,6 +48,18 @@ public class GravitinoHiveCatalog extends BaseCatalog {
     this.hiveCatalog = new HiveCatalog(catalogName, defaultDatabase, hiveConf, hiveVersion);
   }
 
+  @Override
+  public void open() throws CatalogException {
+    super.open();
+    hiveCatalog.open();
+  }
+
+  @Override
+  public void close() throws CatalogException {
+    super.close();
+    hiveCatalog.close();
+  }
+
   public HiveConf getHiveConf() {
     return hiveCatalog.getHiveConf();
   }
@@ -56,4 +73,16 @@ public Optional getFactory() {
   protected PropertiesConverter getPropertiesConverter() {
     return HivePropertiesConverter.INSTANCE;
   }
+
+  @Override
+  public CatalogTableStatistics getTableStatistics(ObjectPath objectPath)
+      throws TableNotExistException, CatalogException {
+    return hiveCatalog.getTableStatistics(objectPath);
+  }
+
+  @Override
+  public CatalogColumnStatistics getTableColumnStatistics(ObjectPath tablePath)
+      throws TableNotExistException, CatalogException {
+    return hiveCatalog.getTableColumnStatistics(tablePath);
+  }
 }
diff --git a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/hive/HivePropertiesConverter.java b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/hive/HivePropertiesConverter.java
index b780a5e99d8..9a2771ee314 100644
--- a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/hive/HivePropertiesConverter.java
+++ b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/hive/HivePropertiesConverter.java
@@ -20,10 +20,12 @@
 package com.datastrato.gravitino.flink.connector.hive;
 
 import com.datastrato.gravitino.catalog.hive.HiveCatalogPropertiesMeta;
+import com.datastrato.gravitino.catalog.hive.HiveTablePropertiesMetadata;
 import com.datastrato.gravitino.flink.connector.PropertiesConverter;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
 import java.util.Map;
+import java.util.stream.Collectors;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.table.catalog.CommonCatalogOptions;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -76,4 +78,25 @@ public Map toFlinkCatalogProperties(Map gravitin
         });
     return flinkCatalogProperties;
   }
+
+  @Override
+  public Map toFlinkTableProperties(Map gravitinoProperties) {
+    Map properties =
+        gravitinoProperties.entrySet().stream()
+            .collect(
+                Collectors.toMap(
+                    entry -> {
+                      String key = entry.getKey();
+                      if (key.startsWith(HiveTablePropertiesMetadata.SERDE_PARAMETER_PREFIX)) {
+                        return key.substring(
+                            HiveTablePropertiesMetadata.SERDE_PARAMETER_PREFIX.length());
+                      } else {
+                        return key;
+                      }
+                    },
+                    Map.Entry::getValue,
+                    (existingValue, newValue) -> newValue));
+    properties.put("connector", "hive");
+    return properties;
+  }
 }
diff --git a/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/utils/TypeUtils.java b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/utils/TypeUtils.java
new file mode 100644
index 00000000000..d6907ebb74d
--- /dev/null
+++ b/flink-connector/src/main/java/com/datastrato/gravitino/flink/connector/utils/TypeUtils.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.datastrato.gravitino.flink.connector.utils;
+
+import com.datastrato.gravitino.rel.types.Type;
+import com.datastrato.gravitino.rel.types.Types;
+import org.apache.flink.table.api.DataTypes;
+import org.apache.flink.table.types.DataType;
+import org.apache.flink.table.types.logical.LogicalType;
+
+public class TypeUtils {
+
+  private TypeUtils() {}
+
+  public static Type toGravitinoType(LogicalType logicalType) {
+    switch (logicalType.getTypeRoot()) {
+      case VARCHAR:
+        return Types.StringType.get();
+      case DOUBLE:
+        return Types.DoubleType.get();
+      case INTEGER:
+        return Types.IntegerType.get();
+      default:
+        throw new UnsupportedOperationException(
+            "Not support type: " + logicalType.asSummaryString());
+    }
+  }
+
+  public static DataType toFlinkType(Type gravitinoType) {
+    switch (gravitinoType.name()) {
+      case DOUBLE:
+        return DataTypes.DOUBLE();
+      case STRING:
+        return DataTypes.STRING();
+      case INTEGER:
+        return DataTypes.INT();
+      default:
+        throw new UnsupportedOperationException("Not support " + gravitinoType.toString());
+    }
+  }
+}
diff --git a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/hive/TestHivePropertiesConverter.java b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/hive/TestHivePropertiesConverter.java
index 397e1220cd0..807db5b3f57 100644
--- a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/hive/TestHivePropertiesConverter.java
+++ b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/hive/TestHivePropertiesConverter.java
@@ -1,4 +1,5 @@
 /*
+ * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
  * regarding copyright ownership.  The ASF licenses this file
diff --git a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/FlinkCommonIT.java b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/FlinkCommonIT.java
index a304c106c03..a59f29637ee 100644
--- a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/FlinkCommonIT.java
+++ b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/FlinkCommonIT.java
@@ -19,18 +19,31 @@
 
 package com.datastrato.gravitino.flink.connector.integration.test;
 
+import static com.datastrato.gravitino.flink.connector.integration.test.utils.TestUtils.assertColumns;
+import static com.datastrato.gravitino.flink.connector.integration.test.utils.TestUtils.toFlinkPhysicalColumn;
+import static com.datastrato.gravitino.rel.expressions.transforms.Transforms.EMPTY_TRANSFORM;
+
 import com.datastrato.gravitino.Catalog;
+import com.datastrato.gravitino.NameIdentifier;
 import com.datastrato.gravitino.Schema;
 import com.datastrato.gravitino.catalog.hive.HiveSchemaPropertiesMetadata;
 import com.datastrato.gravitino.flink.connector.integration.test.utils.TestUtils;
+import com.datastrato.gravitino.rel.Column;
+import com.datastrato.gravitino.rel.Table;
+import com.datastrato.gravitino.rel.types.Types;
+import com.google.common.collect.ImmutableMap;
+import java.util.Optional;
+import org.apache.flink.table.api.DataTypes;
 import org.apache.flink.table.api.ResultKind;
 import org.apache.flink.table.api.TableResult;
+import org.apache.flink.table.catalog.CatalogBaseTable;
+import org.apache.flink.table.catalog.CatalogTable;
+import org.apache.flink.table.catalog.ObjectPath;
+import org.apache.flink.table.catalog.exceptions.TableNotExistException;
 import org.apache.flink.types.Row;
 import org.junit.jupiter.api.Assertions;
-import org.junit.jupiter.api.Tag;
 import org.junit.jupiter.api.Test;
 
-@Tag("gravitino-docker-it")
 public abstract class FlinkCommonIT extends FlinkEnvIT {
 
   protected abstract Catalog currentCatalog();
@@ -163,4 +176,158 @@ public void testAlterSchema() {
           }
         });
   }
+
+  @Test
+  public void testCreateSimpleTable() {
+    String databaseName = "test_create_no_partition_table_db";
+    String tableName = "test_create_no_partition_table";
+    String comment = "test comment";
+    String key = "test key";
+    String value = "test value";
+
+    doWithSchema(
+        currentCatalog(),
+        databaseName,
+        catalog -> {
+          TableResult result =
+              sql(
+                  "CREATE TABLE %s "
+                      + "(string_type STRING COMMENT 'string_type', "
+                      + " double_type DOUBLE COMMENT 'double_type')"
+                      + " COMMENT '%s' WITH ("
+                      + "'%s' = '%s')",
+                  tableName, comment, key, value);
+          TestUtils.assertTableResult(result, ResultKind.SUCCESS);
+
+          Table table =
+              catalog.asTableCatalog().loadTable(NameIdentifier.of(databaseName, tableName));
+          Assertions.assertNotNull(table);
+          Assertions.assertEquals(comment, table.comment());
+          Assertions.assertEquals(value, table.properties().get(key));
+          Column[] columns =
+              new Column[] {
+                Column.of("string_type", Types.StringType.get(), "string_type", true, false, null),
+                Column.of("double_type", Types.DoubleType.get(), "double_type")
+              };
+          assertColumns(columns, table.columns());
+          Assertions.assertArrayEquals(EMPTY_TRANSFORM, table.partitioning());
+
+          TestUtils.assertTableResult(
+              sql("INSERT INTO %s VALUES ('A', 1.0), ('B', 2.0)", tableName),
+              ResultKind.SUCCESS_WITH_CONTENT,
+              Row.of(-1L));
+          TestUtils.assertTableResult(
+              sql("SELECT * FROM %s", tableName),
+              ResultKind.SUCCESS_WITH_CONTENT,
+              Row.of("A", 1.0),
+              Row.of("B", 2.0));
+        },
+        true);
+  }
+
+  @Test
+  public void testListTables() {
+    String newSchema = "test_list_table_catalog";
+    Column[] columns = new Column[] {Column.of("user_id", Types.IntegerType.get(), "USER_ID")};
+    doWithSchema(
+        currentCatalog(),
+        newSchema,
+        catalog -> {
+          catalog
+              .asTableCatalog()
+              .createTable(
+                  NameIdentifier.of(newSchema, "test_table1"),
+                  columns,
+                  "comment1",
+                  ImmutableMap.of());
+          catalog
+              .asTableCatalog()
+              .createTable(
+                  NameIdentifier.of(newSchema, "test_table2"),
+                  columns,
+                  "comment2",
+                  ImmutableMap.of());
+          TableResult result = sql("SHOW TABLES");
+          TestUtils.assertTableResult(
+              result,
+              ResultKind.SUCCESS_WITH_CONTENT,
+              Row.of("test_table1"),
+              Row.of("test_table2"));
+        },
+        true);
+  }
+
+  @Test
+  public void testDropTable() {
+    String databaseName = "test_drop_table_db";
+    doWithSchema(
+        currentCatalog(),
+        databaseName,
+        catalog -> {
+          String tableName = "test_drop_table";
+          Column[] columns =
+              new Column[] {Column.of("user_id", Types.IntegerType.get(), "USER_ID")};
+          NameIdentifier identifier = NameIdentifier.of(databaseName, tableName);
+          catalog.asTableCatalog().createTable(identifier, columns, "comment1", ImmutableMap.of());
+          Assertions.assertTrue(catalog.asTableCatalog().tableExists(identifier));
+
+          TableResult result = sql("DROP TABLE %s", tableName);
+          TestUtils.assertTableResult(result, ResultKind.SUCCESS);
+          Assertions.assertFalse(catalog.asTableCatalog().tableExists(identifier));
+        },
+        true);
+  }
+
+  @Test
+  public void testGetSimpleTable() {
+    String databaseName = "test_get_simple_table";
+    Column[] columns =
+        new Column[] {
+          Column.of("string_type", Types.StringType.get(), "string_type", true, false, null),
+          Column.of("double_type", Types.DoubleType.get(), "double_type")
+        };
+
+    doWithSchema(
+        currentCatalog(),
+        databaseName,
+        catalog -> {
+          String tableName = "test_desc_table";
+          String comment = "comment1";
+          catalog
+              .asTableCatalog()
+              .createTable(
+                  NameIdentifier.of(databaseName, "test_desc_table"),
+                  columns,
+                  comment,
+                  ImmutableMap.of("k1", "v1"));
+
+          Optional flinkCatalog =
+              tableEnv.getCatalog(catalog.name());
+          Assertions.assertTrue(flinkCatalog.isPresent());
+          try {
+            CatalogBaseTable table =
+                flinkCatalog.get().getTable(new ObjectPath(databaseName, tableName));
+            Assertions.assertNotNull(table);
+            Assertions.assertEquals(CatalogBaseTable.TableKind.TABLE, table.getTableKind());
+            Assertions.assertEquals(comment, table.getComment());
+
+            org.apache.flink.table.catalog.Column[] expected =
+                new org.apache.flink.table.catalog.Column[] {
+                  org.apache.flink.table.catalog.Column.physical("string_type", DataTypes.STRING())
+                      .withComment("string_type"),
+                  org.apache.flink.table.catalog.Column.physical("double_type", DataTypes.DOUBLE())
+                      .withComment("double_type")
+                };
+            org.apache.flink.table.catalog.Column[] actual =
+                toFlinkPhysicalColumn(table.getUnresolvedSchema().getColumns());
+            Assertions.assertArrayEquals(expected, actual);
+
+            CatalogTable catalogTable = (CatalogTable) table;
+            Assertions.assertFalse(catalogTable.isPartitioned());
+          } catch (TableNotExistException e) {
+            Assertions.fail(e);
+          }
+        },
+        true);
+  }
 }
diff --git a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/FlinkEnvIT.java b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/FlinkEnvIT.java
index 13ef566608f..fc574ffaebc 100644
--- a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/FlinkEnvIT.java
+++ b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/FlinkEnvIT.java
@@ -1,4 +1,5 @@
 /*
+ * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
  * regarding copyright ownership.  The ASF licenses this file
@@ -25,12 +26,14 @@
 import com.datastrato.gravitino.integration.test.container.HiveContainer;
 import com.datastrato.gravitino.integration.test.util.AbstractIT;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableMap;
 import com.google.errorprone.annotations.FormatMethod;
 import com.google.errorprone.annotations.FormatString;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.function.Consumer;
 import org.apache.flink.configuration.Configuration;
+import org.apache.flink.table.api.EnvironmentSettings;
 import org.apache.flink.table.api.TableEnvironment;
 import org.apache.flink.table.api.TableResult;
 import org.apache.flink.table.api.internal.TableEnvironmentImpl;
@@ -125,7 +128,9 @@ private static void initFlinkEnv() {
         "table.catalog-store.kind", GravitinoCatalogStoreFactoryOptions.GRAVITINO);
     configuration.setString("table.catalog-store.gravitino.gravitino.metalake", GRAVITINO_METALAKE);
     configuration.setString("table.catalog-store.gravitino.gravitino.uri", gravitinoUri);
-    tableEnv = TableEnvironment.create(configuration);
+    EnvironmentSettings.Builder builder =
+        EnvironmentSettings.newInstance().withConfiguration(configuration);
+    tableEnv = TableEnvironment.create(builder.inBatchMode().build());
   }
 
   private static void stopHdfsEnv() {
@@ -154,6 +159,27 @@ protected TableResult sql(@FormatString String sql, Object... args) {
     return tableEnv.executeSql(String.format(sql, args));
   }
 
+  protected static void doWithSchema(
+      Catalog catalog, String schemaName, Consumer action, boolean dropSchema) {
+    Preconditions.checkNotNull(catalog);
+    Preconditions.checkNotNull(schemaName);
+    try {
+      tableEnv.useCatalog(catalog.name());
+      if (!catalog.asSchemas().schemaExists(schemaName)) {
+        catalog
+            .asSchemas()
+            .createSchema(
+                schemaName, null, ImmutableMap.of("location", warehouse + "/" + schemaName));
+      }
+      tableEnv.useDatabase(schemaName);
+      action.accept(catalog);
+    } finally {
+      if (dropSchema) {
+        catalog.asSchemas().dropSchema(schemaName, true);
+      }
+    }
+  }
+
   protected static void doWithCatalog(Catalog catalog, Consumer action) {
     Preconditions.checkNotNull(catalog);
     tableEnv.useCatalog(catalog.name());
diff --git a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/hive/FlinkHiveCatalogIT.java b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/hive/FlinkHiveCatalogIT.java
index b85c0d3f673..4db72cc9e53 100644
--- a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/hive/FlinkHiveCatalogIT.java
+++ b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/hive/FlinkHiveCatalogIT.java
@@ -1,4 +1,5 @@
 /*
+ * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
  * regarding copyright ownership.  The ASF licenses this file
@@ -23,6 +24,7 @@
 import com.datastrato.gravitino.flink.connector.hive.GravitinoHiveCatalog;
 import com.datastrato.gravitino.flink.connector.hive.GravitinoHiveCatalogFactoryOptions;
 import com.datastrato.gravitino.flink.connector.integration.test.FlinkCommonIT;
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import java.util.Arrays;
@@ -42,28 +44,37 @@
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Tag;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
 
 @Tag("gravitino-docker-test")
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
 public class FlinkHiveCatalogIT extends FlinkCommonIT {
+  private static final String DEFAULT_HIVE_CATALOG = "test_flink_hive_schema_catalog";
 
   private static com.datastrato.gravitino.Catalog hiveCatalog;
 
   @BeforeAll
   static void hiveStartUp() {
+    initDefaultHiveCatalog();
+  }
+
+  @AfterAll
+  static void hiveStop() {
+    Preconditions.checkNotNull(metalake);
+    metalake.dropCatalog(DEFAULT_HIVE_CATALOG);
+  }
+
+  protected static void initDefaultHiveCatalog() {
+    Preconditions.checkNotNull(metalake);
     hiveCatalog =
         metalake.createCatalog(
-            "test_flink_hive_schema_catalog",
+            DEFAULT_HIVE_CATALOG,
             com.datastrato.gravitino.Catalog.Type.RELATIONAL,
             "hive",
             null,
             ImmutableMap.of("metastore.uris", hiveMetastoreUri));
   }
 
-  @AfterAll
-  static void hiveStop() {
-    metalake.dropCatalog("test_flink_hive_schema_catalog");
-  }
-
   @Test
   public void testCreateGravitinoHiveCatalog() {
     tableEnv.useCatalog(DEFAULT_CATALOG);
@@ -75,8 +86,7 @@ public void testCreateGravitinoHiveCatalog() {
     configuration.set(
         CommonCatalogOptions.CATALOG_TYPE, GravitinoHiveCatalogFactoryOptions.IDENTIFIER);
     configuration.set(HiveCatalogFactoryOptions.HIVE_CONF_DIR, "src/test/resources/flink-tests");
-    configuration.set(
-        GravitinoHiveCatalogFactoryOptions.HIVE_METASTORE_URIS, "thrift://127.0.0.1:9084");
+    configuration.set(GravitinoHiveCatalogFactoryOptions.HIVE_METASTORE_URIS, hiveMetastoreUri);
     CatalogDescriptor catalogDescriptor = CatalogDescriptor.of(catalogName, configuration);
     tableEnv.createCatalog(catalogName, catalogDescriptor);
     Assertions.assertTrue(metalake.catalogExists(catalogName));
@@ -84,7 +94,7 @@ public void testCreateGravitinoHiveCatalog() {
     // Check the catalog properties.
     com.datastrato.gravitino.Catalog gravitinoCatalog = metalake.loadCatalog(catalogName);
     Map properties = gravitinoCatalog.properties();
-    Assertions.assertEquals("thrift://127.0.0.1:9084", properties.get(METASTORE_URIS));
+    Assertions.assertEquals(hiveMetastoreUri, properties.get(METASTORE_URIS));
     Map flinkProperties =
         gravitinoCatalog.properties().entrySet().stream()
             .filter(e -> e.getKey().startsWith(PropertiesConverter.FLINK_PROPERTY_PREFIX))
@@ -141,16 +151,16 @@ public void testCreateGravitinoHiveCatalogUsingSQL() {
             "create catalog %s with ("
                 + "'type'='gravitino-hive', "
                 + "'hive-conf-dir'='src/test/resources/flink-tests',"
-                + "'hive.metastore.uris'='thrift://127.0.0.1:9084',"
+                + "'hive.metastore.uris'='%s',"
                 + "'unknown.key'='unknown.value'"
                 + ")",
-            catalogName));
+            catalogName, hiveMetastoreUri));
     Assertions.assertTrue(metalake.catalogExists(catalogName));
 
     // Check the properties of the created catalog.
     com.datastrato.gravitino.Catalog gravitinoCatalog = metalake.loadCatalog(catalogName);
     Map properties = gravitinoCatalog.properties();
-    Assertions.assertEquals("thrift://127.0.0.1:9084", properties.get(METASTORE_URIS));
+    Assertions.assertEquals(hiveMetastoreUri, properties.get(METASTORE_URIS));
     Map flinkProperties =
         properties.entrySet().stream()
             .filter(e -> e.getKey().startsWith(PropertiesConverter.FLINK_PROPERTY_PREFIX))
@@ -245,7 +255,7 @@ public void testGetCatalogFromGravitino() {
                 "flink.bypass.hive.test",
                 "hive.config",
                 "metastore.uris",
-                "thrift://127.0.0.1:9084"));
+                hiveMetastoreUri));
     Assertions.assertNotNull(gravitinoCatalog);
     Assertions.assertEquals(catalogName, gravitinoCatalog.name());
     Assertions.assertTrue(metalake.catalogExists(catalogName));
@@ -261,7 +271,7 @@ public void testGetCatalogFromGravitino() {
     Assertions.assertTrue(hiveConf.size() > 0, "Should have hive conf");
     Assertions.assertEquals("hive.config", hiveConf.get("hive.test"));
     Assertions.assertEquals(
-        "thrift://127.0.0.1:9084", hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname));
+        hiveMetastoreUri, hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname));
 
     // drop the catalog.
     tableEnv.useCatalog(DEFAULT_CATALOG);
diff --git a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/utils/TestUtils.java b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/utils/TestUtils.java
index 08a2f6b1ccf..e08b1cd7ccb 100644
--- a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/utils/TestUtils.java
+++ b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/integration/test/utils/TestUtils.java
@@ -1,4 +1,5 @@
 /*
+ * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
  * regarding copyright ownership.  The ASF licenses this file
@@ -17,10 +18,13 @@
  */
 package com.datastrato.gravitino.flink.connector.integration.test.utils;
 
+import com.datastrato.gravitino.rel.Column;
 import com.google.common.collect.Lists;
 import java.util.List;
 import org.apache.flink.table.api.ResultKind;
+import org.apache.flink.table.api.Schema;
 import org.apache.flink.table.api.TableResult;
+import org.apache.flink.table.types.DataType;
 import org.apache.flink.types.Row;
 import org.junit.jupiter.api.Assertions;
 
@@ -42,4 +46,32 @@ public static void assertTableResult(
       }
     }
   }
+
+  public static void assertColumns(Column[] expected, Column[] actual) {
+    Assertions.assertEquals(expected.length, actual.length);
+    for (int i = 0; i < expected.length; i++) {
+      Assertions.assertEquals(expected[i].name(), actual[i].name());
+      Assertions.assertEquals(expected[i].comment(), actual[i].comment());
+      Assertions.assertEquals(
+          expected[i].dataType().simpleString(), actual[i].dataType().simpleString());
+      Assertions.assertEquals(expected[i].defaultValue(), actual[i].defaultValue());
+      Assertions.assertEquals(expected[i].autoIncrement(), actual[i].autoIncrement());
+      Assertions.assertEquals(expected[i].nullable(), actual[i].nullable());
+    }
+  }
+
+  public static org.apache.flink.table.catalog.Column[] toFlinkPhysicalColumn(
+      List unresolvedPhysicalColumns) {
+    return unresolvedPhysicalColumns.stream()
+        .map(
+            column -> {
+              Schema.UnresolvedPhysicalColumn unresolvedPhysicalColumn =
+                  (Schema.UnresolvedPhysicalColumn) column;
+              return org.apache.flink.table.catalog.Column.physical(
+                      unresolvedPhysicalColumn.getName(),
+                      (DataType) unresolvedPhysicalColumn.getDataType())
+                  .withComment(unresolvedPhysicalColumn.getComment().orElse(null));
+            })
+        .toArray(org.apache.flink.table.catalog.Column[]::new);
+  }
 }
diff --git a/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/utils/TestTypeUtils.java b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/utils/TestTypeUtils.java
new file mode 100644
index 00000000000..1a381d23201
--- /dev/null
+++ b/flink-connector/src/test/java/com/datastrato/gravitino/flink/connector/utils/TestTypeUtils.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.datastrato.gravitino.flink.connector.utils;
+
+import com.datastrato.gravitino.rel.types.Types;
+import org.apache.flink.table.api.DataTypes;
+import org.apache.flink.table.catalog.UnresolvedIdentifier;
+import org.apache.flink.table.types.logical.DoubleType;
+import org.apache.flink.table.types.logical.IntType;
+import org.apache.flink.table.types.logical.UnresolvedUserDefinedType;
+import org.apache.flink.table.types.logical.VarCharType;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+public class TestTypeUtils {
+
+  @Test
+  public void testToGravitinoType() {
+    Assertions.assertEquals(
+        Types.StringType.get(), TypeUtils.toGravitinoType(new VarCharType(Integer.MAX_VALUE)));
+    Assertions.assertEquals(Types.DoubleType.get(), TypeUtils.toGravitinoType(new DoubleType()));
+    Assertions.assertEquals(Types.IntegerType.get(), TypeUtils.toGravitinoType(new IntType()));
+    Assertions.assertThrows(
+        UnsupportedOperationException.class,
+        () ->
+            TypeUtils.toGravitinoType(
+                new UnresolvedUserDefinedType(UnresolvedIdentifier.of("a", "b", "c"))));
+  }
+
+  @Test
+  public void testToFlinkType() {
+    Assertions.assertEquals(DataTypes.DOUBLE(), TypeUtils.toFlinkType(Types.DoubleType.get()));
+    Assertions.assertEquals(DataTypes.STRING(), TypeUtils.toFlinkType(Types.StringType.get()));
+    Assertions.assertEquals(DataTypes.INT(), TypeUtils.toFlinkType(Types.IntegerType.get()));
+    Assertions.assertThrows(
+        UnsupportedOperationException.class,
+        () -> TypeUtils.toFlinkType(Types.UnparsedType.of("unknown")));
+  }
+}

From 34a81df3f4c47ea50fd64ed5eb413a6187178240 Mon Sep 17 00:00:00 2001
From: Justin Mclean 
Date: Fri, 5 Jul 2024 16:36:19 +1000
Subject: [PATCH 12/12] [#4064] Fix GitHub and resources (#4070)

### What changes were proposed in this pull request?

Change GitHub and resources to ASF ones. Still to fix are Docker and the
documentation URL. But this can be merged now.

### Why are the changes needed?

As we are now an ASF project.

Fix: #4064

### Does this PR introduce _any_ user-facing change?

No

### How was this patch tested?

Built locally with no issues.
---
 CONTRIBUTING.md                                   |  6 +++---
 build.gradle.kts                                  |  6 +++---
 clients/client-python/setup.py                    |  2 +-
 docs/docker-image-details.md                      |  2 +-
 docs/getting-started.md                           | 13 ++++++-------
 docs/hadoop-catalog.md                            |  6 +++---
 docs/how-to-build.md                              |  4 ++--
 docs/how-to-install.md                            |  4 ++--
 docs/how-to-use-gvfs.md                           |  4 ++--
 docs/how-to-use-python-client.md                  | 15 ++++++---------
 docs/how-to-use-the-playground.md                 |  2 +-
 docs/index.md                                     |  4 ++--
 docs/manage-table-partition-using-gravitino.md    |  4 ++--
 docs/publish-docker-images.md                     |  2 +-
 docs/trino-connector/installation.md              |  4 ++--
 .../gravitino/server/web/JettyServer.java         |  2 +-
 web/src/app/rootLayout/Footer.js                  |  4 ++--
 17 files changed, 40 insertions(+), 44 deletions(-)

diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c4c83b6f669..89a63d79db4 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -55,13 +55,13 @@ Before you get started, please read and follow these guidelines to ensure a smoo
 Either click the "Fork" button at the top right of the repository's page on GitHub OR create a fork on your local machine using `git clone`.
 
 ```bash
-git clone https://github.com/datastrato/gravitino.git
+git clone https://github.com/apache/gravitino.git
 cd gravitino
 ```
 
 ### Development Setup
 
-Once you have cloned the [GitHub repository](https://github.com/datastrato/gravitino), see [how to build](/docs/how-to-build.md) for instructions on how to build, or you can use the provided docker images at [Datastrato's DockerHub repository](https://hub.docker.com/u/datastrato).
+Once you have cloned the [GitHub repository](https://github.com/apache/gravitino), see [how to build](/docs/how-to-build.md) for instructions on how to build, or you can use the provided docker images at [Datastrato's DockerHub repository](https://hub.docker.com/u/datastrato).
 
 To stop and start a local Gravitino server via `bin/gravitino.sh start` and `bin/gravitino.sh stop` in a Gravitino distribution, see [how to build](/docs/how-to-build.md) for more instructions.
 
@@ -225,7 +225,7 @@ If you have ideas for enhancements or new features, feel free to create an issue
 
 ### Good First Issues
 
-If you are new to open source or can't find something to work on, check out the [Good First Issues list](https://github.com/datastrato/gravitino/contribute).
+If you are new to open source or can't find something to work on, check out the [Good First Issues list](https://github.com/apache/gravitino/contribute).
 
 ### Working on Issues
 
diff --git a/build.gradle.kts b/build.gradle.kts
index abba4ee2a98..51d1f217522 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -384,12 +384,12 @@ subprojects {
             developer {
               id.set("The maintainers of Gravitino")
               name.set("support")
-              email.set("support@datastrato.com")
+              email.set("dev@datastrato.com")
             }
           }
           scm {
-            url.set("https://github.com/datastrato/gravitino")
-            connection.set("scm:git:git://github.com/datastrato/gravitino.git")
+            url.set("https://github.com/apache/gravitino")
+            connection.set("scm:git:git://github.com/apache/gravitino.git")
           }
         }
       }
diff --git a/clients/client-python/setup.py b/clients/client-python/setup.py
index d812e593cc1..48e8af03127 100644
--- a/clients/client-python/setup.py
+++ b/clients/client-python/setup.py
@@ -33,7 +33,7 @@
     version="0.6.0.dev1",
     long_description=long_description,
     long_description_content_type="text/markdown",
-    url="https://github.com/datastrato/gravitino",
+    url="https://github.com/apache/gravitino",
     author="datastrato",
     author_email="support@datastrato.com",
     python_requires=">=3.8",
diff --git a/docs/docker-image-details.md b/docs/docker-image-details.md
index f7b30a2d6f8..bd902707a08 100644
--- a/docs/docker-image-details.md
+++ b/docs/docker-image-details.md
@@ -46,7 +46,7 @@ Changelog
 
 ## Playground Docker image
 
-You can use the [playground](https://github.com/datastrato/gravitino-playground) to experience the whole Gravitino system with other components.
+You can use the [playground](https://github.com/apache/gravitino-playground) to experience the whole Gravitino system with other components.
 
 The playground consists of multiple Docker images.
 
diff --git a/docs/getting-started.md b/docs/getting-started.md
index 586962baf0e..01220ae675b 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -223,7 +223,7 @@ newgrp docker
 ```
 
 You can install and run all the programs as Docker containers by using the
-[gravitino-playground](https://github.com/datastrato/gravitino-playground). For details about
+[gravitino-playground](https://github.com/apache/gravitino-playground). For details about
 how to run the playground, see [how-to-use-the-playground](./how-to-use-the-playground.md)
 
 ## Installing Apache Gravitino playground locally
@@ -321,17 +321,16 @@ After completing these steps, you should be able to access the Gravitino REST in
 
 2. **Community engagement:**
    - Join the Gravitino community forums to connect with other users, share experiences, and seek assistance if needed.
-   - Check out our GitHub repository: https://github.com/datastrato
-   - Check out our Discourse group: https://gravitino.discourse.group/
+   - Check out our GitHub repository: https://github.com/apache/gravitino
+   - Check out our Slack channel in ASF Slack: https://the-asf.slack.com
    
 3. **Read our blogs:**
-   - Check out: https://datastrato.ai/blog
+   - Check out: https://gravitino.apache.org/blog (coming soon)
 
 4. **Continuous updates:**
    - Stay informed about Gravitino updates and new releases to benefit from the latest features, optimizations, and security       
      enhancements.
-   - Check out our Website: https://datastrato.ai/
-   - Check out our page on X (formerly Twitter): https://twitter.com/datastrato
+   - Check out our Website: https://gravitino.apache.org (coming soon)
   
 
-This document is just the beginning. You're welcome to customize your Gravitino setup based on your requirements and to explore the vast possibilities this powerful tool offers. If you encounter any issues or have questions, you can always connect with the Gravitino community for assistance. 
+This document is just the beginning. You're welcome to customize your Gravitino setup based on your requirements and to explore the vast possibilities this powerful tool offers. If you encounter any issues or have questions, you can always connect with the Gravitino community for assistance.
diff --git a/docs/hadoop-catalog.md b/docs/hadoop-catalog.md
index 0cf404ee3cc..1cfb95943bd 100644
--- a/docs/hadoop-catalog.md
+++ b/docs/hadoop-catalog.md
@@ -12,12 +12,12 @@ Hadoop catalog is a fileset catalog that using Hadoop Compatible File System (HC
 the storage location of the fileset. Currently, it supports local filesystem and HDFS. For
 object storage like S3, GCS, and Azure Blob Storage, you can put the hadoop object store jar like
 hadoop-aws into the `$GRAVITINO_HOME/catalogs/hadoop/libs` directory to enable the support.
-Apache Gravitino itself hasn't yet tested the object storage support, so if you have any issue,
-please create an [issue](https://github.com/datastrato/gravitino/issues).
+Gravitino itself hasn't yet tested the object storage support, so if you have any issue,
+please create an [issue](https://github.com/apache/gravitino/issues).
 
 Note that Gravitino uses Hadoop 3 dependencies to build Hadoop catalog. Theoretically, it should be
 compatible with both Hadoop 2.x and 3.x, since Gravitino doesn't leverage any new features in
-Hadoop 3. If there's any compatibility issue, please create an [issue](https://github.com/datastrato/gravitino/issues).
+Hadoop 3. If there's any compatibility issue, please create an [issue](https://github.com/apache/gravitino/issues).
 
 ## Catalog
 
diff --git a/docs/how-to-build.md b/docs/how-to-build.md
index 172bd1d287e..f9520640330 100644
--- a/docs/how-to-build.md
+++ b/docs/how-to-build.md
@@ -43,7 +43,7 @@ license: "This software is licensed under the Apache License version 2."
 If you want to contribute to this open-source project, please fork the project on GitHub first. After forking, clone the forked project to your local environment, make your changes, and submit a pull request (PR).
 
     ```shell
-    git clone git@github.com:datastrato/gravitino.git
+    git clone git@github.com:apache/gravitino.git
     ```
 
 2. Build the Gravitino project.
@@ -248,7 +248,7 @@ These commands add a repository that provides the latest Python versions and the
 **On Ubuntu (WSL):**
 
 ```shell
-git clone https://github.com/datastrato/gravitino.git
+git clone https://github.com/apache/gravitino.git
 cd gravitino
 ./gradlew compileDistribution -x test
 cd distribution/package/
diff --git a/docs/how-to-install.md b/docs/how-to-install.md
index 29994d6ecbd..5d9d8222805 100644
--- a/docs/how-to-install.md
+++ b/docs/how-to-install.md
@@ -15,7 +15,7 @@ Apache Gravitino supports running on Java 8, 11, and 17. Make sure you have Java
 ### Get the Apache Gravitino binary distribution package
 
 Before installing Gravitino, make sure you have the Gravitino binary distribution package. You can
-download the latest Gravitino binary distribution package from [GitHub](https://github.com/datastrato/gravitino/releases),
+download the latest Gravitino binary distribution package from [GitHub](https://github.com/apache/gravitino/releases),
 or you can build it yourself by following the instructions in [How to Build Gravitino](./how-to-build.md).
 
   - If you build Gravitino yourself using the `./gradlew compileDistribution` command, you can find the
@@ -152,5 +152,5 @@ you want to experience the whole Gravitino system with other components, use the
 `compose` file.
 
 For the details, review the
-[Gravitino playground repository](https://github.com/datastrato/gravitino-playground) and
+[Gravitino playground repository](https://github.com/apache/gravitino-playground) and
 [playground example](./how-to-use-the-playground.md).
diff --git a/docs/how-to-use-gvfs.md b/docs/how-to-use-gvfs.md
index 274834b27fd..654c9038748 100644
--- a/docs/how-to-use-gvfs.md
+++ b/docs/how-to-use-gvfs.md
@@ -34,7 +34,7 @@ the path mapping and convert automatically.
 
 + A Hadoop environment with HDFS running. GVFS has been tested against
   Hadoop 3.1.0. It is recommended to use Hadoop 3.1.0 or later, but it should work with Hadoop 2.
-  x. Please create an [issue](https://www.github.com/datastrato/gravitino/issues) if you find any
+  x. Please create an [issue](https://www.github.com/apache/gravitino/issues) if you find any
   compatibility issues.
 
 ## Configuration
@@ -104,7 +104,7 @@ two ways:
 
 2. Compile from the source code:
 
-   Download or clone the [Gravitino source code](https://github.com/datastrato/gravitino), and compile it
+   Download or clone the [Gravitino source code](https://github.com/apache/gravitino), and compile it
    locally using the following command in the Gravitino source code directory:
 
     ```shell
diff --git a/docs/how-to-use-python-client.md b/docs/how-to-use-python-client.md
index d33cd8de990..6531cd97b29 100644
--- a/docs/how-to-use-python-client.md
+++ b/docs/how-to-use-python-client.md
@@ -42,7 +42,7 @@ to launch a Gravitino server, HDFS and Jupyter notebook environment in you local
 Waiting for the playground Docker environment to start, you can directly open
 `http://localhost:8888/lab/tree/gravitino-fileset-example.ipynb` in the browser and run the example.
 
-The [gravitino-fileset-example](https://github.com/datastrato/gravitino-playground/blob/main/init/jupyter/gravitino-fileset-example.ipynb)
+The [gravitino-fileset-example](https://github.com/apache/gravitino-playground/blob/main/init/jupyter/gravitino-fileset-example.ipynb)
 contains the following code snippets:
 
 1. Install HDFS Python client.
@@ -78,7 +78,7 @@ You can ues any IDE to develop Gravitino Python Client. Directly open the client
 1. Clone the Gravitino project.
 
     ```shell
-    git clone git@github.com:datastrato/gravitino.git
+    git clone git@github.com:apache/gravitino.git
     ```
 
 2. Build the Gravitino Python client module
@@ -119,12 +119,9 @@ You can ues any IDE to develop Gravitino Python Client. Directly open the client
 
 ## Resources
 
-+ Official website https://datastrato.ai/
-+ Project home on GitHub: https://github.com/datastrato/gravitino/
-+ Playground with Docker: https://github.com/datastrato/gravitino-playground
++ Official website https://gravitino.apache.org/ (coming soon)
++ Project home on GitHub: https://github.com/apache/gravitino/
++ Playground with Docker: https://github.com/apache/gravitino-playground
 + User documentation: https://datastrato.ai/docs/
 + Videos on Youtube: https://www.youtube.com/@Datastrato
-+ Twitter: https://twitter.com/datastrato
-+ Linkedin: https://www.linkedin.com/company/datastrato
-+ Slack Community: [https://join.slack.com/t/datastrato-community](https://join.slack.com/t/datastrato-community/shared_invite/zt-2a8vsjoch-cU_uUwHA_QU6Ab50thoq8w)
-+ Discourse Community: https://gravitino.discourse.group/
++ Slack Community: [https://the-asf.slack.com
diff --git a/docs/how-to-use-the-playground.md b/docs/how-to-use-the-playground.md
index a5be2677c5c..82003af39e2 100644
--- a/docs/how-to-use-the-playground.md
+++ b/docs/how-to-use-the-playground.md
@@ -76,7 +76,7 @@ to launch a Gravitino server, HDFS and Jupyter notebook environment in you local
 Waiting for the playground Docker environment to start, you can directly open
 `http://localhost:8888/lab/tree/gravitino-fileset-example.ipynb` in the browser and run the example.
 
-The [gravitino-fileset-example](https://github.com/datastrato/gravitino-playground/blob/main/init/jupyter/gravitino-fileset-example.ipynb)
+The [gravitino-fileset-example](https://github.com/apache/gravitino-playground/blob/main/init/jupyter/gravitino-fileset-example.ipynb)
 contains the following code snippets:
 
 1. Install HDFS Python client.
diff --git a/docs/index.md b/docs/index.md
index 7a006afe9c1..71a48ab3a2d 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -14,7 +14,7 @@ metadata access for data and AI assets.
 
 ## Downloading
 
-You can get Gravitino from the [GitHub release page](https://github.com/datastrato/gravitino/releases),
+You can get Gravitino from the [GitHub release page](https://github.com/apache/gravitino/releases),
 or you can build Gravitino from source code. See [How to build Gravitino](./how-to-build.md).
 
 Gravitino runs on both Linux and macOS platforms, and it requires the installation of Java 8, Java 11, or Java 17. Gravitino trino-connector runs with
@@ -29,7 +29,7 @@ Pull the image and run it. For details of the Gravitino Docker image, see
 [Docker image details](./docker-image-details.md).
 
 Gravitino also provides a playground to experience the whole Gravitino system with other components.
-See the [Gravitino playground repository](https://github.com/datastrato/gravitino-playground)
+See the [Gravitino playground repository](https://github.com/apache/gravitino-playground)
 and [How to use the playground](./how-to-use-the-playground.md).
 
 ## Getting started
diff --git a/docs/manage-table-partition-using-gravitino.md b/docs/manage-table-partition-using-gravitino.md
index 7dac4f9a498..4d0886e782e 100644
--- a/docs/manage-table-partition-using-gravitino.md
+++ b/docs/manage-table-partition-using-gravitino.md
@@ -25,10 +25,10 @@ The following table shows the partition operations supported across various cata
 | Get Partition by Name | ✔     | ✘                                                                      | ✘           | ✘                |
 | List Partition Names  | ✔     | ✘                                                                      | ✘           | ✘                |
 | List Partitions       | ✔     | ✘                                                                      | ✘           | ✘                |
-| Drop Partition        | ✔     | 🚀([Coming Soon](https://github.com/datastrato/gravitino/issues/1655)) | ✘           | ✘                |
+| Drop Partition        | ✔     | 🚀([Coming Soon](https://github.com/apache/gravitino/issues/1655)) | ✘           | ✘                |
 
 :::tip[WELCOME FEEDBACK]
-If you need additional partition management support for a specific catalog, please feel free to [create an issue](https://github.com/datastrato/gravitino/issues/new/choose) on the [Gravitino repository](https://github.com/datastrato/gravitino).
+If you need additional partition management support for a specific catalog, please feel free to [create an issue](https://github.com/apache/gravitino/issues/new/choose) on the [Gravitino repository](https://github.com/apache/gravitino).
 :::
 
 ## Partition operations
diff --git a/docs/publish-docker-images.md b/docs/publish-docker-images.md
index a1923f92baa..da37aeee4ae 100644
--- a/docs/publish-docker-images.md
+++ b/docs/publish-docker-images.md
@@ -15,7 +15,7 @@ The Apache Gravitino project provides a set of Docker images to facilitate the p
 
 You can use GitHub actions to publish Docker images to the Docker Hub repository.
 
-1. Open the [Docker publish link](https://github.com/datastrato/gravitino/actions/workflows/docker-image.yml)
+1. Open the [Docker publish link](https://github.com/apache/gravitino/actions/workflows/docker-image.yml)
 2. Click the `Run workflow` button.
 3. Select the branch you want to build
    + Selecting the main branch results in publishing the Docker image with the specified tag and the latest tag.
diff --git a/docs/trino-connector/installation.md b/docs/trino-connector/installation.md
index a946f12b5fe..f7fc1b75f13 100644
--- a/docs/trino-connector/installation.md
+++ b/docs/trino-connector/installation.md
@@ -8,7 +8,7 @@ license: "This software is licensed under the Apache License version 2."
 To install the Apache Gravitino connector, you should first deploy the Trino environment, and then install the Gravitino connector plugin into Trino.
 Please refer to the [Deploying Trino documentation](https://trino.io/docs/current/installation/deployment.html) and do the following steps:
 
-1. [Download](https://github.com/datastrato/gravitino/releases) the Gravitino connector tarball and unpack it.
+1. [Download](https://github.com/apache/gravitino/releases) the Gravitino connector tarball and unpack it.
    The tarball contains a single top-level directory `gravitino-trino-connector-`,
    which called the connector directory.
 2. Copy the connector directory to the Trino's plugin directory.
@@ -52,7 +52,7 @@ Download the Gravitino connector tarball and unpack it.
 
 ```shell
 cd /tmp
-wget https://github.com/datastrato/gravitino/releases/gravitino-trino-connector-.tar.gz
+wget https://github.com/apache/gravitino/releases/gravitino-trino-connector-.tar.gz
 tar -zxvf gravitino-trino-connector-.tar.gz
 ```
 
diff --git a/server-common/src/main/java/com/datastrato/gravitino/server/web/JettyServer.java b/server-common/src/main/java/com/datastrato/gravitino/server/web/JettyServer.java
index be7880bbb8e..9f95d5e92e1 100644
--- a/server-common/src/main/java/com/datastrato/gravitino/server/web/JettyServer.java
+++ b/server-common/src/main/java/com/datastrato/gravitino/server/web/JettyServer.java
@@ -413,7 +413,7 @@ private ThreadPool createThreadPool(int minThreads, int maxThreads, int threadPo
 
     ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
     // Use QueuedThreadPool not ExecutorThreadPool to work around the accidental test failures.
-    // see https://github.com/datastrato/gravitino/issues/546
+    // see https://github.com/apache/gravitino/issues/546
     QueuedThreadPool threadPool =
         new QueuedThreadPool(
             maxThreads, minThreads, 60000, new LinkedBlockingQueue(threadPoolWorkQueueSize)) {
diff --git a/web/src/app/rootLayout/Footer.js b/web/src/app/rootLayout/Footer.js
index ef53f9e8cc0..a6e308cd230 100644
--- a/web/src/app/rootLayout/Footer.js
+++ b/web/src/app/rootLayout/Footer.js
@@ -41,7 +41,7 @@ const Footer = props => {
             
               License
@@ -57,7 +57,7 @@ const Footer = props => {
             
               Support