diff --git a/.gitignore b/.gitignore index 7889cf7a923..eae3d3c952c 100644 --- a/.gitignore +++ b/.gitignore @@ -53,3 +53,7 @@ include clients/client-python/.gitignore **/metastore_db **/spark-warehouse derby.log + +web/node_modules +web/dist +web/.next diff --git a/api/src/main/java/org/apache/gravitino/credential/Credential.java b/api/src/main/java/org/apache/gravitino/credential/Credential.java new file mode 100644 index 00000000000..b2fdb1971e6 --- /dev/null +++ b/api/src/main/java/org/apache/gravitino/credential/Credential.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.credential; + +import com.google.common.collect.ImmutableMap; +import java.util.Map; + +/** Interface representing a credential with type, expiration time, and additional information. */ +public interface Credential { + /** Credential type in the credential. */ + String CREDENTIAL_TYPE = "credential-type"; + /** Credential expire time in ms since the epoch. */ + String EXPIRE_TIME_IN_MS = "expire-time-in-ms"; + + /** + * Returns the type of the credential. It should be the same as the credential type of the + * credential provider. + * + * @return the credential type as a String. + */ + String credentialType(); + + /** + * Returns the expiration time of the credential in milliseconds since the epoch, 0 means not + * expire. + * + * @return the expiration time as a long. + */ + long expireTimeInMs(); + + /** + * Returns credential information. + * + * @return a map of credential information. + */ + Map credentialInfo(); + + /** + * Converts the credential to properties to transfer the credential though API. + * + * @return a map containing credential properties. + */ + default Map toProperties() { + return new ImmutableMap.Builder() + .putAll(credentialInfo()) + .put(CREDENTIAL_TYPE, credentialType()) + .put(EXPIRE_TIME_IN_MS, String.valueOf(expireTimeInMs())) + .build(); + } +} diff --git a/api/src/main/java/org/apache/gravitino/exceptions/ForbiddenException.java b/api/src/main/java/org/apache/gravitino/exceptions/ForbiddenException.java new file mode 100644 index 00000000000..95ee76ad1a7 --- /dev/null +++ b/api/src/main/java/org/apache/gravitino/exceptions/ForbiddenException.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.exceptions; + +import com.google.errorprone.annotations.FormatMethod; +import com.google.errorprone.annotations.FormatString; + +/** Exception thrown when a user is forbidden to perform an action. */ +public class ForbiddenException extends GravitinoRuntimeException { + /** + * Constructs a new exception with the specified detail message. + * + * @param message the detail message. + * @param args the arguments to the message. + */ + @FormatMethod + public ForbiddenException(@FormatString String message, Object... args) { + super(message, args); + } + + /** + * Constructs a new exception with the specified detail message and cause. + * + * @param cause the cause. + * @param message the detail message. + * @param args the arguments to the message. + */ + @FormatMethod + public ForbiddenException(Throwable cause, @FormatString String message, Object... args) { + super(cause, message, args); + } +} diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java index 2769d2fbc8a..28f9228e893 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java @@ -54,7 +54,7 @@ import org.apache.gravitino.connector.AuthorizationPropertiesMeta; import org.apache.gravitino.integration.test.container.HiveContainer; import org.apache.gravitino.integration.test.container.RangerContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.meta.AuditInfo; import org.apache.gravitino.meta.RoleEntity; @@ -71,7 +71,7 @@ import org.slf4j.LoggerFactory; @Tag("gravitino-docker-test") -public class RangerHiveE2EIT extends AbstractIT { +public class RangerHiveE2EIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(RangerHiveE2EIT.class); private static RangerAuthorizationPlugin rangerAuthPlugin; @@ -99,7 +99,7 @@ public class RangerHiveE2EIT extends AbstractIT { private static String RANGER_ADMIN_URL = null; @BeforeAll - public static void startIntegrationTest() throws Exception { + public void startIntegrationTest() throws Exception { // Enable Gravitino Authorization mode Map configs = Maps.newHashMap(); configs.put(Configs.ENABLE_AUTHORIZATION.getKey(), String.valueOf(true)); @@ -107,7 +107,7 @@ public static void startIntegrationTest() throws Exception { configs.put(Configs.AUTHENTICATORS.getKey(), AuthenticatorType.SIMPLE.name().toLowerCase()); configs.put("SimpleAuthUserName", TEST_USER_NAME); registerCustomConfigs(configs); - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); RangerITEnv.setup(); RangerITEnv.startHiveRangerContainer(); @@ -180,7 +180,8 @@ private static void generateRangerSparkSecurityXML() throws IOException { } @AfterAll - public static void stop() throws IOException { + public void stop() throws IOException { + client = null; if (client != null) { Arrays.stream(catalog.asSchemas().listSchemas()) .filter(schema -> !schema.equals("default")) @@ -204,7 +205,7 @@ public static void stop() throws IOException { LOG.error("Failed to close CloseableGroup", e); } - AbstractIT.client = null; + client = null; } @Test @@ -267,7 +268,7 @@ void testAllowUseSchemaPrivilege() throws InterruptedException { 1, rows2.stream().filter(row -> row.getString(0).equals(schemaName)).count()); } - private static void createMetalake() { + private void createMetalake() { GravitinoMetalake[] gravitinoMetalakes = client.listMetalakes(); Assertions.assertEquals(0, gravitinoMetalakes.length); diff --git a/build.gradle.kts b/build.gradle.kts index a09391d39a6..23528db0c37 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -745,7 +745,7 @@ tasks { if (!it.name.startsWith("catalog") && !it.name.startsWith("authorization") && !it.name.startsWith("client") && !it.name.startsWith("filesystem") && !it.name.startsWith("spark") && !it.name.startsWith("iceberg") && it.name != "trino-connector" && - it.name != "integration-test" && it.name != "hive-metastore-common" && !it.name.startsWith("flink") + it.name != "integration-test" && it.name != "hive-metastore-common" && !it.name.startsWith("flink") && it.name != "gcp-bundle" ) { from(it.configurations.runtimeClasspath) into("distribution/package/libs") @@ -765,7 +765,7 @@ tasks { !it.name.startsWith("integration-test") && !it.name.startsWith("flink") && !it.name.startsWith("trino-connector") && - it.name != "hive-metastore-common" + it.name != "hive-metastore-common" && it.name != "gcp-bundle" ) { dependsOn("${it.name}:build") from("${it.name}/build/libs") @@ -781,6 +781,7 @@ tasks { ":catalogs:catalog-hive:copyLibAndConfig", ":catalogs:catalog-lakehouse-iceberg:copyLibAndConfig", ":catalogs:catalog-lakehouse-paimon:copyLibAndConfig", + "catalogs:catalog-lakehouse-hudi:copyLibAndConfig", ":catalogs:catalog-jdbc-doris:copyLibAndConfig", ":catalogs:catalog-jdbc-mysql:copyLibAndConfig", ":catalogs:catalog-jdbc-postgresql:copyLibAndConfig", diff --git a/bundles/build.gradle.kts b/bundles/build.gradle.kts new file mode 100644 index 00000000000..043fbfec673 --- /dev/null +++ b/bundles/build.gradle.kts @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +tasks.all { + enabled = false +} \ No newline at end of file diff --git a/bundles/gcp-bundle/build.gradle.kts b/bundles/gcp-bundle/build.gradle.kts new file mode 100644 index 00000000000..9433a600429 --- /dev/null +++ b/bundles/gcp-bundle/build.gradle.kts @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar + +plugins { + `maven-publish` + id("java") + alias(libs.plugins.shadow) +} + +dependencies { + compileOnly(project(":catalogs:catalog-hadoop")) + compileOnly(libs.hadoop3.common) + implementation(libs.hadoop3.gcs) +} + +tasks.withType(ShadowJar::class.java) { + isZip64 = true + configurations = listOf(project.configurations.runtimeClasspath.get()) + archiveClassifier.set("") +} + +tasks.jar { + dependsOn(tasks.named("shadowJar")) + archiveClassifier.set("empty") +} + +tasks.compileJava { + dependsOn(":catalogs:catalog-hadoop:runtimeJars") +} diff --git a/bundles/gcp-bundle/src/main/java/org/apache/gravitino/gcs/fs/GCSFileSystemProvider.java b/bundles/gcp-bundle/src/main/java/org/apache/gravitino/gcs/fs/GCSFileSystemProvider.java new file mode 100644 index 00000000000..919baa03b19 --- /dev/null +++ b/bundles/gcp-bundle/src/main/java/org/apache/gravitino/gcs/fs/GCSFileSystemProvider.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.gcs.fs; + +import com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystem; +import java.io.IOException; +import java.util.Map; +import org.apache.gravitino.catalog.hadoop.fs.FileSystemProvider; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +public class GCSFileSystemProvider implements FileSystemProvider { + @Override + public FileSystem getFileSystem(Path path, Map config) throws IOException { + Configuration configuration = new Configuration(); + config.forEach( + (k, v) -> { + configuration.set(k.replace("gravitino.bypass.", ""), v); + }); + + return GoogleHadoopFileSystem.newInstance(path.toUri(), configuration); + } + + @Override + public String scheme() { + return "gs"; + } + + @Override + public String name() { + return "gcs"; + } +} diff --git a/bundles/gcp-bundle/src/main/resources/META-INF/services/org.apache.gravitino.catalog.hadoop.fs.FileSystemProvider b/bundles/gcp-bundle/src/main/resources/META-INF/services/org.apache.gravitino.catalog.hadoop.fs.FileSystemProvider new file mode 100644 index 00000000000..8a65be70fd5 --- /dev/null +++ b/bundles/gcp-bundle/src/main/resources/META-INF/services/org.apache.gravitino.catalog.hadoop.fs.FileSystemProvider @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +org.apache.gravitino.gcs.fs.GCSFileSystemProvider \ No newline at end of file diff --git a/catalogs/catalog-hadoop/build.gradle.kts b/catalogs/catalog-hadoop/build.gradle.kts index ba60a161d8f..9ff3cc0e31c 100644 --- a/catalogs/catalog-hadoop/build.gradle.kts +++ b/catalogs/catalog-hadoop/build.gradle.kts @@ -36,6 +36,8 @@ dependencies { exclude(group = "*") } + compileOnly(libs.guava) + implementation(libs.hadoop3.common) { exclude("com.sun.jersey") exclude("javax.servlet", "servlet-api") @@ -71,6 +73,7 @@ dependencies { testImplementation(project(":integration-test-common", "testArtifacts")) testImplementation(project(":server")) testImplementation(project(":server-common")) + testImplementation(project(":bundles:gcp-bundle")) testImplementation(libs.minikdc) testImplementation(libs.hadoop3.minicluster) @@ -84,6 +87,7 @@ dependencies { testImplementation(libs.junit.jupiter.params) testImplementation(libs.testcontainers) testImplementation(libs.testcontainers.mysql) + testImplementation(libs.hadoop3.gcs) testRuntimeOnly(libs.junit.jupiter.engine) } diff --git a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/HadoopCatalogOperations.java b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/HadoopCatalogOperations.java index da4d0e1a18e..8515ea7d20f 100644 --- a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/HadoopCatalogOperations.java +++ b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/HadoopCatalogOperations.java @@ -30,7 +30,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; import org.apache.gravitino.Catalog; import org.apache.gravitino.Entity; @@ -44,6 +43,8 @@ import org.apache.gravitino.audit.CallerContext; import org.apache.gravitino.audit.FilesetAuditConstants; import org.apache.gravitino.audit.FilesetDataOperation; +import org.apache.gravitino.catalog.hadoop.fs.FileSystemProvider; +import org.apache.gravitino.catalog.hadoop.fs.FileSystemUtils; import org.apache.gravitino.connector.CatalogInfo; import org.apache.gravitino.connector.CatalogOperations; import org.apache.gravitino.connector.HasPropertyMetadata; @@ -71,11 +72,9 @@ import org.slf4j.LoggerFactory; public class HadoopCatalogOperations implements CatalogOperations, SupportsSchemas, FilesetCatalog { - private static final String SCHEMA_DOES_NOT_EXIST_MSG = "Schema %s does not exist"; private static final String FILESET_DOES_NOT_EXIST_MSG = "Fileset %s does not exist"; private static final String SLASH = "/"; - private static final Logger LOG = LoggerFactory.getLogger(HadoopCatalogOperations.class); private final EntityStore store; @@ -90,6 +89,10 @@ public class HadoopCatalogOperations implements CatalogOperations, SupportsSchem private CatalogInfo catalogInfo; + private final Map fileSystemProvidersMap = Maps.newHashMap(); + + private FileSystemProvider defaultFileSystemProvider; + HadoopCatalogOperations(EntityStore store) { this.store = store; } @@ -107,7 +110,9 @@ public CatalogInfo getCatalogInfo() { } public Configuration getHadoopConf() { - return hadoopConf; + Configuration configuration = new Configuration(); + conf.forEach((k, v) -> configuration.set(k.replace(CATALOG_BYPASS_PREFIX, ""), v)); + return configuration; } public Map getConf() { @@ -119,26 +124,31 @@ public void initialize( Map config, CatalogInfo info, HasPropertyMetadata propertiesMetadata) throws RuntimeException { this.propertiesMetadata = propertiesMetadata; - // Initialize Hadoop Configuration. - this.conf = config; - this.hadoopConf = new Configuration(); this.catalogInfo = info; - Map bypassConfigs = - config.entrySet().stream() - .filter(e -> e.getKey().startsWith(CATALOG_BYPASS_PREFIX)) - .collect( - Collectors.toMap( - e -> e.getKey().substring(CATALOG_BYPASS_PREFIX.length()), - Map.Entry::getValue)); - bypassConfigs.forEach(hadoopConf::set); + + this.conf = config; + + String fileSystemProviders = + (String) + propertiesMetadata + .catalogPropertiesMetadata() + .getOrDefault(config, HadoopCatalogPropertiesMetadata.FILESYSTEM_PROVIDERS); + this.fileSystemProvidersMap.putAll(FileSystemUtils.getFileSystemProviders(fileSystemProviders)); + + String defaultFileSystemProviderName = + (String) + propertiesMetadata + .catalogPropertiesMetadata() + .getOrDefault(config, HadoopCatalogPropertiesMetadata.DEFAULT_FS_PROVIDER); + this.defaultFileSystemProvider = + FileSystemUtils.getFileSystemProviderByName( + fileSystemProvidersMap, defaultFileSystemProviderName); String catalogLocation = (String) propertiesMetadata .catalogPropertiesMetadata() .getOrDefault(config, HadoopCatalogPropertiesMetadata.LOCATION); - conf.forEach(hadoopConf::set); - this.catalogStorageLocation = StringUtils.isNotBlank(catalogLocation) ? Optional.of(catalogLocation).map(Path::new) @@ -235,8 +245,9 @@ public Fileset createFileset( try { // formalize the path to avoid path without scheme, uri, authority, etc. - filesetPath = formalizePath(filesetPath, hadoopConf); - FileSystem fs = filesetPath.getFileSystem(hadoopConf); + filesetPath = formalizePath(filesetPath, conf); + + FileSystem fs = getFileSystem(filesetPath, conf); if (!fs.exists(filesetPath)) { if (!fs.mkdirs(filesetPath)) { throw new RuntimeException( @@ -339,7 +350,7 @@ public boolean dropFileset(NameIdentifier ident) { // For managed fileset, we should delete the related files. if (filesetEntity.filesetType() == Fileset.Type.MANAGED) { - FileSystem fs = filesetPath.getFileSystem(hadoopConf); + FileSystem fs = getFileSystem(filesetPath, conf); if (fs.exists(filesetPath)) { if (!fs.delete(filesetPath, true)) { LOG.warn("Failed to delete fileset {} location {}", ident, filesetPath); @@ -459,7 +470,7 @@ public Schema createSchema(NameIdentifier ident, String comment, Map properties) { } @VisibleForTesting - static Path formalizePath(Path path, Configuration configuration) throws IOException { - FileSystem defaultFs = FileSystem.get(configuration); + Path formalizePath(Path path, Map configuration) throws IOException { + FileSystem defaultFs = getFileSystem(path, configuration); return path.makeQualified(defaultFs.getUri(), defaultFs.getWorkingDirectory()); } @@ -731,7 +741,7 @@ private boolean hasCallerContext() { private boolean checkSingleFile(Fileset fileset) { try { Path locationPath = new Path(fileset.storageLocation()); - return locationPath.getFileSystem(hadoopConf).getFileStatus(locationPath).isFile(); + return getFileSystem(locationPath, conf).getFileStatus(locationPath).isFile(); } catch (FileNotFoundException e) { // We should always return false here, same with the logic in `FileSystem.isFile(Path f)`. return false; @@ -742,4 +752,25 @@ private boolean checkSingleFile(Fileset fileset) { fileset.name()); } } + + FileSystem getFileSystem(Path path, Map config) throws IOException { + if (path == null) { + throw new IllegalArgumentException("Path should not be null"); + } + + String scheme = + path.toUri().getScheme() != null + ? path.toUri().getScheme() + : defaultFileSystemProvider.scheme(); + + FileSystemProvider provider = fileSystemProvidersMap.get(scheme); + if (provider == null) { + throw new IllegalArgumentException( + String.format( + "Unsupported scheme: %s, path: %s, all supported schemes: %s and providers: %s", + scheme, path, fileSystemProvidersMap.keySet(), fileSystemProvidersMap.values())); + } + + return provider.getFileSystem(path, config); + } } diff --git a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/HadoopCatalogPropertiesMetadata.java b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/HadoopCatalogPropertiesMetadata.java index 9a68e2d5522..397e13aa4af 100644 --- a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/HadoopCatalogPropertiesMetadata.java +++ b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/HadoopCatalogPropertiesMetadata.java @@ -18,10 +18,13 @@ */ package org.apache.gravitino.catalog.hadoop; +import static org.apache.gravitino.catalog.hadoop.authentication.kerberos.KerberosConfig.KERBEROS_PROPERTY_ENTRIES; + import com.google.common.collect.ImmutableMap; import java.util.Map; import org.apache.gravitino.catalog.hadoop.authentication.AuthenticationConfig; -import org.apache.gravitino.catalog.hadoop.authentication.kerberos.KerberosConfig; +import org.apache.gravitino.catalog.hadoop.fs.FileSystemProvider; +import org.apache.gravitino.catalog.hadoop.fs.LocalFileSystemProvider; import org.apache.gravitino.connector.BaseCatalogPropertiesMetadata; import org.apache.gravitino.connector.PropertyEntry; @@ -34,6 +37,24 @@ public class HadoopCatalogPropertiesMetadata extends BaseCatalogPropertiesMetada // If not, users have to specify the storage location in the Schema or Fileset level. public static final String LOCATION = "location"; + /** + * The name of {@link FileSystemProvider} to be added to the catalog. Except built-in + * FileSystemProvider like LocalFileSystemProvider and HDFSFileSystemProvider, users can add their + * own FileSystemProvider by specifying the provider name here. The value can be find {@link + * FileSystemProvider#name()}. + */ + public static final String FILESYSTEM_PROVIDERS = "filesystem-providers"; + + /** + * The default file system provider class name, used to create the default file system. If not + * specified, the default file system provider will be {@link LocalFileSystemProvider#name()}: + * 'builtin-local'. + */ + public static final String DEFAULT_FS_PROVIDER = "default-filesystem-provider"; + + public static final String BUILTIN_LOCAL_FS_PROVIDER = "builtin-local"; + public static final String BUILTIN_HDFS_FS_PROVIDER = "builtin-hdfs"; + private static final Map> HADOOP_CATALOG_PROPERTY_ENTRIES = ImmutableMap.>builder() .put( @@ -44,8 +65,24 @@ public class HadoopCatalogPropertiesMetadata extends BaseCatalogPropertiesMetada false /* immutable */, null, false /* hidden */)) + .put( + FILESYSTEM_PROVIDERS, + PropertyEntry.stringOptionalPropertyEntry( + FILESYSTEM_PROVIDERS, + "The file system provider names, separated by comma", + false /* immutable */, + null, + false /* hidden */)) + .put( + DEFAULT_FS_PROVIDER, + PropertyEntry.stringOptionalPropertyEntry( + DEFAULT_FS_PROVIDER, + "Default file system provider name", + false /* immutable */, + BUILTIN_LOCAL_FS_PROVIDER, // please see LocalFileSystemProvider#name() + false /* hidden */)) // The following two are about authentication. - .putAll(KerberosConfig.KERBEROS_PROPERTY_ENTRIES) + .putAll(KERBEROS_PROPERTY_ENTRIES) .putAll(AuthenticationConfig.AUTHENTICATION_PROPERTY_ENTRIES) .build(); diff --git a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/fs/FileSystemProvider.java b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/fs/FileSystemProvider.java new file mode 100644 index 00000000000..5bee821e505 --- /dev/null +++ b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/fs/FileSystemProvider.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.catalog.hadoop.fs; + +import java.io.IOException; +import java.util.Map; +import javax.annotation.Nonnull; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +/** + * FileSystemProvider is an interface for providing FileSystem instances. It is used by the + * HadoopCatalog to create FileSystem instances for accessing Hadoop compatible file systems. + */ +public interface FileSystemProvider { + + /** + * Get the FileSystem instance according to the configuration map and file path. + * + *

Compared to the {@link FileSystem#get(Configuration)} method, this method allows the + * provider to create a FileSystem instance with a specific configuration and do further + * initialization if needed. + * + *

For example: 1. We can check the endpoint value validity for S3AFileSystem then do further + * actions. 2. We can also change some default behavior of the FileSystem initialization process + * 3. More... + * + * @param config The configuration for the FileSystem instance. + * @param path The path to the file system. + * @return The FileSystem instance. + * @throws IOException If the FileSystem instance cannot be created. + */ + FileSystem getFileSystem(@Nonnull Path path, @Nonnull Map config) + throws IOException; + + /** + * Scheme of this FileSystem provider. The value is 'file' for LocalFileSystem, 'hdfs' for HDFS, + * etc. + * + * @return The scheme of this FileSystem provider used. + */ + String scheme(); + + /** + * Name of this FileSystem provider. The value is 'builtin-local' for LocalFileSystem, + * 'builtin-hdfs' for HDFS, etc. + * + * @return The name of this FileSystem provider. + */ + String name(); +} diff --git a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/fs/FileSystemUtils.java b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/fs/FileSystemUtils.java new file mode 100644 index 00000000000..3a959ff3738 --- /dev/null +++ b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/fs/FileSystemUtils.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.catalog.hadoop.fs; + +import static org.apache.gravitino.catalog.hadoop.HadoopCatalogPropertiesMetadata.BUILTIN_HDFS_FS_PROVIDER; +import static org.apache.gravitino.catalog.hadoop.HadoopCatalogPropertiesMetadata.BUILTIN_LOCAL_FS_PROVIDER; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.collect.Streams; +import java.util.Arrays; +import java.util.Locale; +import java.util.Map; +import java.util.ServiceLoader; +import java.util.Set; +import java.util.stream.Collectors; + +public class FileSystemUtils { + + private FileSystemUtils() {} + + public static Map getFileSystemProviders(String fileSystemProviders) { + Map resultMap = Maps.newHashMap(); + ServiceLoader allFileSystemProviders = + ServiceLoader.load(FileSystemProvider.class); + + Set providersInUses = + fileSystemProviders != null + ? Arrays.stream(fileSystemProviders.split(",")) + .map(f -> f.trim().toLowerCase(Locale.ROOT)) + .collect(java.util.stream.Collectors.toSet()) + : Sets.newHashSet(); + + // Add built-in file system providers to the use list automatically. + providersInUses.add(BUILTIN_LOCAL_FS_PROVIDER.toLowerCase(Locale.ROOT)); + providersInUses.add(BUILTIN_HDFS_FS_PROVIDER.toLowerCase(Locale.ROOT)); + + // Only get the file system providers that are in the user list and check if the scheme is + // unique. + Streams.stream(allFileSystemProviders.iterator()) + .filter( + fileSystemProvider -> + providersInUses.contains(fileSystemProvider.name().toLowerCase(Locale.ROOT))) + .forEach( + fileSystemProvider -> { + if (resultMap.containsKey(fileSystemProvider.scheme())) { + throw new UnsupportedOperationException( + String.format( + "File system provider: '%s' with scheme '%s' already exists in the use provider list " + + "Please make sure the file system provider scheme is unique.", + fileSystemProvider.getClass().getName(), fileSystemProvider.scheme())); + } + resultMap.put(fileSystemProvider.scheme(), fileSystemProvider); + }); + + // If not all file system providers in providersInUses was found, throw an exception. + Set notFoundProviders = + Sets.difference( + providersInUses, + resultMap.values().stream() + .map(p -> p.name().toLowerCase(Locale.ROOT)) + .collect(Collectors.toSet())) + .immutableCopy(); + if (!notFoundProviders.isEmpty()) { + throw new UnsupportedOperationException( + String.format( + "File system providers %s not found in the classpath. Please make sure the file system " + + "provider is in the classpath.", + notFoundProviders)); + } + + return resultMap; + } + + public static FileSystemProvider getFileSystemProviderByName( + Map fileSystemProviders, String fileSystemProviderName) { + return fileSystemProviders.entrySet().stream() + .filter(entry -> entry.getValue().name().equals(fileSystemProviderName)) + .map(Map.Entry::getValue) + .findFirst() + .orElseThrow( + () -> + new UnsupportedOperationException( + String.format( + "File system provider with name '%s' not found in the file system provider list.", + fileSystemProviderName))); + } +} diff --git a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/fs/HDFSFileSystemProvider.java b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/fs/HDFSFileSystemProvider.java new file mode 100644 index 00000000000..c7c2fd393f6 --- /dev/null +++ b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/fs/HDFSFileSystemProvider.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.catalog.hadoop.fs; + +import static org.apache.gravitino.catalog.hadoop.HadoopCatalogPropertiesMetadata.BUILTIN_HDFS_FS_PROVIDER; +import static org.apache.gravitino.connector.BaseCatalog.CATALOG_BYPASS_PREFIX; + +import java.io.IOException; +import java.util.Map; +import javax.annotation.Nonnull; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +public class HDFSFileSystemProvider implements FileSystemProvider { + + @Override + public FileSystem getFileSystem(@Nonnull Path path, @Nonnull Map config) + throws IOException { + Configuration configuration = new Configuration(); + config.forEach( + (k, v) -> { + configuration.set(k.replace(CATALOG_BYPASS_PREFIX, ""), v); + }); + return FileSystem.newInstance(path.toUri(), configuration); + } + + @Override + public String scheme() { + return "hdfs"; + } + + @Override + public String name() { + return BUILTIN_HDFS_FS_PROVIDER; + } +} diff --git a/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/fs/LocalFileSystemProvider.java b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/fs/LocalFileSystemProvider.java new file mode 100644 index 00000000000..e940e2bb6ba --- /dev/null +++ b/catalogs/catalog-hadoop/src/main/java/org/apache/gravitino/catalog/hadoop/fs/LocalFileSystemProvider.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.catalog.hadoop.fs; + +import static org.apache.gravitino.catalog.hadoop.HadoopCatalogPropertiesMetadata.BUILTIN_LOCAL_FS_PROVIDER; +import static org.apache.gravitino.connector.BaseCatalog.CATALOG_BYPASS_PREFIX; + +import java.io.IOException; +import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +public class LocalFileSystemProvider implements FileSystemProvider { + + @Override + public FileSystem getFileSystem(Path path, Map config) throws IOException { + Configuration configuration = new Configuration(); + config.forEach( + (k, v) -> { + configuration.set(k.replace(CATALOG_BYPASS_PREFIX, ""), v); + }); + + return FileSystem.newInstance(path.toUri(), configuration); + } + + @Override + public String scheme() { + return "file"; + } + + @Override + public String name() { + return BUILTIN_LOCAL_FS_PROVIDER; + } +} diff --git a/catalogs/catalog-hadoop/src/main/resources/META-INF/services/org.apache.gravitino.catalog.hadoop.fs.FileSystemProvider b/catalogs/catalog-hadoop/src/main/resources/META-INF/services/org.apache.gravitino.catalog.hadoop.fs.FileSystemProvider new file mode 100644 index 00000000000..93a84744aa5 --- /dev/null +++ b/catalogs/catalog-hadoop/src/main/resources/META-INF/services/org.apache.gravitino.catalog.hadoop.fs.FileSystemProvider @@ -0,0 +1,21 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +org.apache.gravitino.catalog.hadoop.fs.HDFSFileSystemProvider +org.apache.gravitino.catalog.hadoop.fs.LocalFileSystemProvider \ No newline at end of file diff --git a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/TestHadoopCatalogOperations.java b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/TestHadoopCatalogOperations.java index d3206972680..2b89180a8d1 100644 --- a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/TestHadoopCatalogOperations.java +++ b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/TestHadoopCatalogOperations.java @@ -34,7 +34,6 @@ import static org.apache.gravitino.catalog.hadoop.HadoopCatalog.CATALOG_PROPERTIES_META; import static org.apache.gravitino.catalog.hadoop.HadoopCatalog.FILESET_PROPERTIES_META; import static org.apache.gravitino.catalog.hadoop.HadoopCatalog.SCHEMA_PROPERTIES_META; -import static org.apache.gravitino.connector.BaseCatalog.CATALOG_BYPASS_PREFIX; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.when; @@ -50,10 +49,12 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.gravitino.Catalog; import org.apache.gravitino.Config; import org.apache.gravitino.EntityStore; import org.apache.gravitino.EntityStoreFactory; +import org.apache.gravitino.GravitinoEnv; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.Namespace; import org.apache.gravitino.Schema; @@ -65,6 +66,7 @@ import org.apache.gravitino.connector.CatalogInfo; import org.apache.gravitino.connector.HasPropertyMetadata; import org.apache.gravitino.connector.PropertiesMetadata; +import org.apache.gravitino.connector.PropertyEntry; import org.apache.gravitino.exceptions.GravitinoRuntimeException; import org.apache.gravitino.exceptions.NoSuchFilesetException; import org.apache.gravitino.exceptions.NoSuchSchemaException; @@ -74,6 +76,7 @@ import org.apache.gravitino.file.FilesetChange; import org.apache.gravitino.storage.IdGenerator; import org.apache.gravitino.storage.RandomIdGenerator; +import org.apache.gravitino.storage.relational.RelationalEntityStore; import org.apache.gravitino.storage.relational.service.CatalogMetaService; import org.apache.gravitino.storage.relational.service.MetalakeMetaService; import org.apache.gravitino.utils.NameIdentifierUtil; @@ -230,18 +233,10 @@ public void testHadoopCatalogConfiguration() { CatalogInfo catalogInfo = randomCatalogInfo(); ops.initialize(emptyProps, catalogInfo, HADOOP_PROPERTIES_METADATA); - Configuration conf = ops.hadoopConf; + Configuration conf = ops.getHadoopConf(); String value = conf.get("fs.defaultFS"); Assertions.assertEquals("file:///", value); - emptyProps.put(CATALOG_BYPASS_PREFIX + "fs.defaultFS", "hdfs://localhost:9000"); - ops.initialize(emptyProps, catalogInfo, HADOOP_PROPERTIES_METADATA); - Configuration conf1 = ops.hadoopConf; - String value1 = conf1.get("fs.defaultFS"); - Assertions.assertEquals("hdfs://localhost:9000", value1); - - Assertions.assertFalse(ops.catalogStorageLocation.isPresent()); - emptyProps.put(HadoopCatalogPropertiesMetadata.LOCATION, "file:///tmp/catalog"); ops.initialize(emptyProps, catalogInfo, HADOOP_PROPERTIES_METADATA); Assertions.assertTrue(ops.catalogStorageLocation.isPresent()); @@ -677,33 +672,68 @@ public void testAlterFilesetProperties() throws IOException { } @Test - public void testFormalizePath() throws IOException { + public void testFormalizePath() throws IOException, IllegalAccessException { String[] paths = - new String[] { - "tmp/catalog", - "/tmp/catalog", - "file:/tmp/catalog", - "file:///tmp/catalog", - "hdfs://localhost:9000/tmp/catalog", - "s3://bucket/tmp/catalog", - "gs://bucket/tmp/catalog" - }; + new String[] {"tmp/catalog", "/tmp/catalog", "file:/tmp/catalog", "file:///tmp/catalog"}; String[] expected = new String[] { "file:" + Paths.get("").toAbsolutePath() + "/tmp/catalog", "file:/tmp/catalog", "file:/tmp/catalog", - "file:/tmp/catalog", - "hdfs://localhost:9000/tmp/catalog", - "s3://bucket/tmp/catalog", - "gs://bucket/tmp/catalog" + "file:/tmp/catalog" + }; + + HasPropertyMetadata hasPropertyMetadata = + new HasPropertyMetadata() { + @Override + public PropertiesMetadata tablePropertiesMetadata() throws UnsupportedOperationException { + return null; + } + + @Override + public PropertiesMetadata catalogPropertiesMetadata() + throws UnsupportedOperationException { + return new PropertiesMetadata() { + @Override + public Map> propertyEntries() { + return new HadoopCatalogPropertiesMetadata().propertyEntries(); + } + }; + } + + @Override + public PropertiesMetadata schemaPropertiesMetadata() + throws UnsupportedOperationException { + return null; + } + + @Override + public PropertiesMetadata filesetPropertiesMetadata() + throws UnsupportedOperationException { + return null; + } + + @Override + public PropertiesMetadata topicPropertiesMetadata() throws UnsupportedOperationException { + return null; + } }; - for (int i = 0; i < paths.length; i++) { - Path actual = HadoopCatalogOperations.formalizePath(new Path(paths[i]), new Configuration()); - Assertions.assertEquals(expected[i], actual.toString()); + try { + FieldUtils.writeField( + GravitinoEnv.getInstance(), "entityStore", new RelationalEntityStore(), true); + try (HadoopCatalogOperations hadoopCatalogOperations = new HadoopCatalogOperations()) { + Map map = ImmutableMap.of("default-filesystem", "file:///"); + hadoopCatalogOperations.initialize(map, null, hasPropertyMetadata); + for (int i = 0; i < paths.length; i++) { + Path actual = hadoopCatalogOperations.formalizePath(new Path(paths[i]), map); + Assertions.assertEquals(expected[i], actual.toString()); + } + } + } finally { + FieldUtils.writeField(GravitinoEnv.getInstance(), "entityStore", null, true); } } @@ -877,8 +907,11 @@ public void testGetFileLocation() throws IOException { try (HadoopCatalogOperations mockOps = Mockito.mock(HadoopCatalogOperations.class)) { mockOps.hadoopConf = new Configuration(); when(mockOps.loadFileset(filesetIdent)).thenReturn(mockFileset); + when(mockOps.getConf()).thenReturn(Maps.newHashMap()); String subPath = "/test/test.parquet"; when(mockOps.getFileLocation(filesetIdent, subPath)).thenCallRealMethod(); + when(mockOps.getFileSystem(Mockito.any(), Mockito.any())) + .thenReturn(FileSystem.getLocal(new Configuration())); String fileLocation = mockOps.getFileLocation(filesetIdent, subPath); Assertions.assertEquals( String.format("%s%s", mockFileset.storageLocation(), subPath.substring(1)), fileLocation); diff --git a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopCatalogIT.java b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopCatalogIT.java index 20f9a1eeab8..76d17ff0146 100644 --- a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopCatalogIT.java +++ b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopCatalogIT.java @@ -43,7 +43,7 @@ import org.apache.gravitino.file.FilesetChange; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -57,28 +57,26 @@ import org.slf4j.LoggerFactory; @Tag("gravitino-docker-test") -public class HadoopCatalogIT extends AbstractIT { +public class HadoopCatalogIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(HadoopCatalogIT.class); - private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); + protected static final ContainerSuite containerSuite = ContainerSuite.getInstance(); - public static final String metalakeName = - GravitinoITUtils.genRandomName("CatalogFilesetIT_metalake"); - public static final String catalogName = - GravitinoITUtils.genRandomName("CatalogFilesetIT_catalog"); + public String metalakeName = GravitinoITUtils.genRandomName("CatalogFilesetIT_metalake"); + public String catalogName = GravitinoITUtils.genRandomName("CatalogFilesetIT_catalog"); public static final String SCHEMA_PREFIX = "CatalogFilesetIT_schema"; - public static final String schemaName = GravitinoITUtils.genRandomName(SCHEMA_PREFIX); - private static final String provider = "hadoop"; - private static GravitinoMetalake metalake; - private static Catalog catalog; - private static FileSystem hdfs; - private static String defaultBaseLocation; + public String schemaName = GravitinoITUtils.genRandomName(SCHEMA_PREFIX); + protected static final String provider = "hadoop"; + protected static GravitinoMetalake metalake; + protected static Catalog catalog; + protected static FileSystem fileSystem; + protected static String defaultBaseLocation; @BeforeAll - public static void setup() throws IOException { + public void setup() throws IOException { containerSuite.startHiveContainer(); Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultBaseLocation()); - hdfs = FileSystem.get(conf); + fileSystem = FileSystem.get(conf); createMetalake(); createCatalog(); @@ -86,13 +84,13 @@ public static void setup() throws IOException { } @AfterAll - public static void stop() throws IOException { + public void stop() throws IOException { Catalog catalog = metalake.loadCatalog(catalogName); catalog.asSchemas().dropSchema(schemaName, true); metalake.dropCatalog(catalogName); client.dropMetalake(metalakeName); - if (hdfs != null) { - hdfs.close(); + if (fileSystem != null) { + fileSystem.close(); } try { @@ -102,7 +100,7 @@ public static void stop() throws IOException { } } - private static void createMetalake() { + protected void createMetalake() { GravitinoMetalake[] gravitinoMetalakes = client.listMetalakes(); Assertions.assertEquals(0, gravitinoMetalakes.length); @@ -114,14 +112,14 @@ private static void createMetalake() { metalake = loadMetalake; } - private static void createCatalog() { + protected void createCatalog() { metalake.createCatalog( catalogName, Catalog.Type.FILESET, provider, "comment", ImmutableMap.of()); catalog = metalake.loadCatalog(catalogName); } - private static void createSchema() { + protected void createSchema() { Map properties = Maps.newHashMap(); properties.put("key1", "val1"); properties.put("key2", "val2"); @@ -137,7 +135,7 @@ private static void createSchema() { Assertions.assertNotNull(loadSchema.properties().get("location")); } - private static void dropSchema() { + private void dropSchema() { catalog.asSchemas().dropSchema(schemaName, true); Assertions.assertFalse(catalog.asSchemas().schemaExists(schemaName)); } @@ -171,7 +169,7 @@ public void testCreateFileset() throws IOException { String filesetName = "test_create_fileset"; String storageLocation = storageLocation(filesetName); Assertions.assertFalse( - hdfs.exists(new Path(storageLocation)), "storage location should not exists"); + fileSystem.exists(new Path(storageLocation)), "storage location should not exists"); Fileset fileset = createFileset( filesetName, @@ -242,7 +240,7 @@ public void testCreateFilesetWithChinese() throws IOException { String filesetName = "test_create_fileset_with_chinese"; String storageLocation = storageLocation(filesetName) + "/中文目录test"; Assertions.assertFalse( - hdfs.exists(new Path(storageLocation)), "storage location should not exists"); + fileSystem.exists(new Path(storageLocation)), "storage location should not exists"); Fileset fileset = createFileset( filesetName, @@ -285,7 +283,7 @@ public void testExternalFileset() throws IOException { Assertions.assertEquals(1, fileset.properties().size()); Assertions.assertEquals("v1", fileset.properties().get("k1")); Assertions.assertTrue( - hdfs.exists(new Path(storageLocation)), "storage location should be created"); + fileSystem.exists(new Path(storageLocation)), "storage location should be created"); // create fileset with storage location that not exist String filesetName2 = "test_external_fileset_no_exist"; @@ -349,7 +347,7 @@ public void testDropManagedFileset() throws IOException { String storageLocation = storageLocation(filesetName); Assertions.assertFalse( - hdfs.exists(new Path(storageLocation)), "storage location should not exists"); + fileSystem.exists(new Path(storageLocation)), "storage location should not exists"); createFileset( filesetName, "comment", Fileset.Type.MANAGED, storageLocation, ImmutableMap.of("k1", "v1")); @@ -365,7 +363,7 @@ public void testDropManagedFileset() throws IOException { catalog.asFilesetCatalog().filesetExists(NameIdentifier.of(schemaName, filesetName)), "fileset should not be exists"); Assertions.assertFalse( - hdfs.exists(new Path(storageLocation)), "storage location should be dropped"); + fileSystem.exists(new Path(storageLocation)), "storage location should be dropped"); } @Test @@ -392,7 +390,7 @@ public void testDropExternalFileset() throws IOException { catalog.asFilesetCatalog().filesetExists(NameIdentifier.of(schemaName, filesetName)), "fileset should not be exists"); Assertions.assertTrue( - hdfs.exists(new Path(storageLocation)), "storage location should not be dropped"); + fileSystem.exists(new Path(storageLocation)), "storage location should not be dropped"); } @Test @@ -688,7 +686,7 @@ public void testGetFileLocationWithInvalidAuditHeaders() { } } - private static String generateLocation(String filesetName) { + protected String generateLocation(String filesetName) { return String.format( "hdfs://%s:%d/user/hadoop/%s/%s/%s", containerSuite.getHiveContainer().getContainerIpAddress(), @@ -707,7 +705,7 @@ private Fileset createFileset( if (storageLocation != null) { Path location = new Path(storageLocation); try { - hdfs.deleteOnExit(location); + fileSystem.deleteOnExit(location); } catch (IOException e) { LOG.warn("Failed to delete location: {}", location, e); } @@ -724,10 +722,11 @@ private void assertFilesetExists(String filesetName) throws IOException { catalog.asFilesetCatalog().filesetExists(NameIdentifier.of(schemaName, filesetName)), "fileset should be exists"); Assertions.assertTrue( - hdfs.exists(new Path(storageLocation(filesetName))), "storage location should be exists"); + fileSystem.exists(new Path(storageLocation(filesetName))), + "storage location should be exists"); } - private static String defaultBaseLocation() { + protected String defaultBaseLocation() { if (defaultBaseLocation == null) { defaultBaseLocation = String.format( @@ -739,7 +738,7 @@ private static String defaultBaseLocation() { return defaultBaseLocation; } - private static String storageLocation(String filesetName) { + private String storageLocation(String filesetName) { return defaultBaseLocation() + "/" + filesetName; } } diff --git a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopGCSCatalogIT.java b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopGCSCatalogIT.java new file mode 100644 index 00000000000..74ae2a77cdb --- /dev/null +++ b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopGCSCatalogIT.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.catalog.hadoop.integration.test; + +import static org.apache.gravitino.catalog.hadoop.HadoopCatalogPropertiesMetadata.FILESYSTEM_PROVIDERS; + +import com.google.common.collect.Maps; +import java.io.IOException; +import java.net.URI; +import java.util.Map; +import org.apache.gravitino.Catalog; +import org.apache.gravitino.integration.test.util.GravitinoITUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; + +@Tag("gravitino-docker-test") +@Disabled( + "Disabled due to we don't have a real GCP account to test. If you have a GCP account," + + "please change the configuration(YOUR_KEY_FILE, YOUR_BUCKET) and enable this test.") +public class HadoopGCSCatalogIT extends HadoopCatalogIT { + + public static final String BUCKET_NAME = "YOUR_BUCKET"; + public static final String SERVICE_ACCOUNT_FILE = "YOUR_KEY_FILE"; + + @BeforeAll + public void setup() throws IOException { + metalakeName = GravitinoITUtils.genRandomName("CatalogFilesetIT_metalake"); + catalogName = GravitinoITUtils.genRandomName("CatalogFilesetIT_catalog"); + schemaName = GravitinoITUtils.genRandomName("CatalogFilesetIT_schema"); + + schemaName = GravitinoITUtils.genRandomName(SCHEMA_PREFIX); + Configuration conf = new Configuration(); + + conf.set("fs.gs.auth.service.account.enable", "true"); + conf.set("fs.gs.auth.service.account.json.keyfile", SERVICE_ACCOUNT_FILE); + fileSystem = FileSystem.get(URI.create(String.format("gs://%s", BUCKET_NAME)), conf); + + createMetalake(); + createCatalog(); + createSchema(); + } + + protected String defaultBaseLocation() { + if (defaultBaseLocation == null) { + try { + Path bucket = + new Path( + String.format( + "gs://%s/%s", BUCKET_NAME, GravitinoITUtils.genRandomName("CatalogFilesetIT"))); + if (!fileSystem.exists(bucket)) { + fileSystem.mkdirs(bucket); + } + + defaultBaseLocation = bucket.toString(); + } catch (IOException e) { + throw new RuntimeException("Failed to create default base location", e); + } + } + + return defaultBaseLocation; + } + + protected void createCatalog() { + Map map = Maps.newHashMap(); + map.put("gravitino.bypass.fs.gs.auth.service.account.enable", "true"); + map.put("gravitino.bypass.fs.gs.auth.service.account.json.keyfile", SERVICE_ACCOUNT_FILE); + map.put(FILESYSTEM_PROVIDERS, "gcs"); + + metalake.createCatalog(catalogName, Catalog.Type.FILESET, provider, "comment", map); + + catalog = metalake.loadCatalog(catalogName); + } + + protected String generateLocation(String filesetName) { + return String.format("%s/%s", defaultBaseLocation, filesetName); + } +} diff --git a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserAuthenticationIT.java b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserAuthenticationIT.java index 7a56f8503a3..0a23ea7d326 100644 --- a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserAuthenticationIT.java +++ b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserAuthenticationIT.java @@ -46,7 +46,7 @@ import org.apache.gravitino.file.Fileset; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterAll; @@ -60,7 +60,7 @@ import sun.security.krb5.KrbException; @Tag("gravitino-docker-test") -public class HadoopUserAuthenticationIT extends AbstractIT { +public class HadoopUserAuthenticationIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(HadoopUserAuthenticationIT.class); private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); @@ -104,7 +104,7 @@ public class HadoopUserAuthenticationIT extends AbstractIT { private static final String TABLE_NAME = "test_table"; @BeforeAll - public static void startIntegrationTest() throws Exception { + public void startIntegrationTest() throws Exception { containerSuite.startKerberosHiveContainer(); kerberosHiveContainer = containerSuite.getKerberosHiveContainer(); @@ -122,7 +122,7 @@ public static void startIntegrationTest() throws Exception { addKerberosConfig(); // Start Gravitino server - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); } @AfterAll @@ -222,14 +222,12 @@ private static void createKeyTableForSchemaAndFileset() throws IOException { .copyFileFromContainer(HADOOP_FILESET_KEYTAB, TMP_DIR + HADOOP_FILESET_KEYTAB); } - private static void addKerberosConfig() { - AbstractIT.customConfigs.put(Configs.AUTHENTICATORS.getKey(), "kerberos"); - AbstractIT.customConfigs.put( - "gravitino.authenticator.kerberos.principal", GRAVITINO_SERVER_PRINCIPAL); - AbstractIT.customConfigs.put( - "gravitino.authenticator.kerberos.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); - AbstractIT.customConfigs.put(SDK_KERBEROS_KEYTAB_KEY, TMP_DIR + GRAVITINO_CLIENT_KEYTAB); - AbstractIT.customConfigs.put(SDK_KERBEROS_PRINCIPAL_KEY, GRAVITINO_CLIENT_PRINCIPAL); + private void addKerberosConfig() { + customConfigs.put(Configs.AUTHENTICATORS.getKey(), "kerberos"); + customConfigs.put("gravitino.authenticator.kerberos.principal", GRAVITINO_SERVER_PRINCIPAL); + customConfigs.put("gravitino.authenticator.kerberos.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); + customConfigs.put(SDK_KERBEROS_KEYTAB_KEY, TMP_DIR + GRAVITINO_CLIENT_KEYTAB); + customConfigs.put(SDK_KERBEROS_PRINCIPAL_KEY, GRAVITINO_CLIENT_PRINCIPAL); } @Test diff --git a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserImpersonationIT.java b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserImpersonationIT.java index 248b8d54ffb..9515b45b5dd 100644 --- a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserImpersonationIT.java +++ b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserImpersonationIT.java @@ -45,7 +45,7 @@ import org.apache.gravitino.exceptions.FilesetAlreadyExistsException; import org.apache.gravitino.exceptions.IllegalNameIdentifierException; import org.apache.gravitino.file.Fileset; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.integration.test.util.ITUtils; import org.apache.hadoop.conf.Configuration; @@ -67,7 +67,7 @@ import org.slf4j.LoggerFactory; @Tag("gravitino-docker-test") -public class HadoopUserImpersonationIT extends AbstractIT { +public class HadoopUserImpersonationIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(HadoopCatalogIT.class); public static final String metalakeName = @@ -111,7 +111,7 @@ private static void refreshKerberosConfig() { } @BeforeAll - public static void setup() throws Exception { + public void setup() throws Exception { if (!isEmbedded()) { return; } @@ -254,7 +254,7 @@ void testListFileSystem() throws Exception { }); } - private static void createMetalake() { + private void createMetalake() { GravitinoMetalake[] gravitinoMetalakes = client.listMetalakes(); Assertions.assertEquals(0, gravitinoMetalakes.length); diff --git a/catalogs/catalog-hive/build.gradle.kts b/catalogs/catalog-hive/build.gradle.kts index 2afb48f9a2d..aca8959df13 100644 --- a/catalogs/catalog-hive/build.gradle.kts +++ b/catalogs/catalog-hive/build.gradle.kts @@ -58,7 +58,7 @@ dependencies { exclude("com.google.code.findbugs", "sr305") exclude("com.tdunning", "json") exclude("com.zaxxer", "HikariCP") - exclude("io.dropwizard.metricss") + exclude("io.dropwizard.metrics") exclude("javax.transaction", "transaction-api") exclude("org.apache.ant") exclude("org.apache.avro") diff --git a/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/CatalogHiveIT.java b/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/CatalogHiveIT.java index 081aad480da..1233870b0b1 100644 --- a/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/CatalogHiveIT.java +++ b/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/CatalogHiveIT.java @@ -70,7 +70,7 @@ import org.apache.gravitino.hive.HiveClientPool; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.Table; @@ -108,13 +108,11 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @Tag("gravitino-docker-test") -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -public class CatalogHiveIT extends AbstractIT { +public class CatalogHiveIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(CatalogHiveIT.class); public static final String metalakeName = GravitinoITUtils.genRandomName("CatalogHiveIT_metalake"); @@ -253,7 +251,7 @@ public void stop() throws IOException { LOG.error("Failed to close CloseableGroup", e); } - AbstractIT.client = null; + client = null; } @AfterEach diff --git a/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/HiveUserAuthenticationIT.java b/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/HiveUserAuthenticationIT.java index a4d982e30b8..d6330b0d819 100644 --- a/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/HiveUserAuthenticationIT.java +++ b/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/HiveUserAuthenticationIT.java @@ -43,7 +43,7 @@ import org.apache.gravitino.client.KerberosTokenProvider; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.TableChange; @@ -63,7 +63,7 @@ import org.slf4j.LoggerFactory; @Tag("gravitino-docker-test") -public class HiveUserAuthenticationIT extends AbstractIT { +public class HiveUserAuthenticationIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(HiveUserAuthenticationIT.class); private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); @@ -98,7 +98,7 @@ public class HiveUserAuthenticationIT extends AbstractIT { private static final String HIVE_COL_NAME3 = "col3"; @BeforeAll - public static void startIntegrationTest() throws Exception { + public void startIntegrationTest() throws Exception { containerSuite.startKerberosHiveContainer(); kerberosHiveContainer = containerSuite.getKerberosHiveContainer(); @@ -119,11 +119,11 @@ public static void startIntegrationTest() throws Exception { addKerberosConfig(); // Start Gravitino server - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); } @AfterAll - public static void stop() { + public void stop() { // Reset the UGI UserGroupInformation.reset(); @@ -132,7 +132,7 @@ public static void stop() { System.clearProperty("java.security.krb5.conf"); System.clearProperty("sun.security.krb5.debug"); - AbstractIT.client = null; + client = null; } private static void prepareKerberosConfig() throws Exception { @@ -188,14 +188,12 @@ private static void refreshKerberosConfig() { } } - private static void addKerberosConfig() { - AbstractIT.customConfigs.put(Configs.AUTHENTICATORS.getKey(), "kerberos"); - AbstractIT.customConfigs.put( - "gravitino.authenticator.kerberos.principal", GRAVITINO_SERVER_PRINCIPAL); - AbstractIT.customConfigs.put( - "gravitino.authenticator.kerberos.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); - AbstractIT.customConfigs.put(SDK_KERBEROS_KEYTAB_KEY, TMP_DIR + GRAVITINO_CLIENT_KEYTAB); - AbstractIT.customConfigs.put(SDK_KERBEROS_PRINCIPAL_KEY, GRAVITINO_CLIENT_PRINCIPAL); + private void addKerberosConfig() { + customConfigs.put(Configs.AUTHENTICATORS.getKey(), "kerberos"); + customConfigs.put("gravitino.authenticator.kerberos.principal", GRAVITINO_SERVER_PRINCIPAL); + customConfigs.put("gravitino.authenticator.kerberos.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); + customConfigs.put(SDK_KERBEROS_KEYTAB_KEY, TMP_DIR + GRAVITINO_CLIENT_KEYTAB); + customConfigs.put(SDK_KERBEROS_PRINCIPAL_KEY, GRAVITINO_CLIENT_PRINCIPAL); } @Test diff --git a/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/ProxyCatalogHiveIT.java b/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/ProxyCatalogHiveIT.java index 24c3c2cf443..d328a44dc64 100644 --- a/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/ProxyCatalogHiveIT.java +++ b/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/ProxyCatalogHiveIT.java @@ -39,7 +39,7 @@ import org.apache.gravitino.hive.HiveClientPool; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.Table; @@ -64,7 +64,7 @@ import org.junit.jupiter.api.Test; @Tag("gravitino-docker-test") -public class ProxyCatalogHiveIT extends AbstractIT { +public class ProxyCatalogHiveIT extends BaseIT { public static final String METALAKE_NAME = GravitinoITUtils.genRandomName("ProxyCatalogHiveIT_metalake"); @@ -88,10 +88,10 @@ public class ProxyCatalogHiveIT extends AbstractIT { private static GravitinoAdminClient anotherClientWithNotExistingName; private static Catalog anotherCatalog; private static Catalog anotherCatalogWithUsername; - private static Catalog anotherCatatlogWithNotExistingName; + private static Catalog anotherCatalogWithNotExistingName; @BeforeAll - public static void startIntegrationTest() throws Exception { + public void startIntegrationTest() throws Exception { originHadoopUser = System.getenv(HADOOP_USER_NAME); setEnv(HADOOP_USER_NAME, null); @@ -100,7 +100,7 @@ public static void startIntegrationTest() throws Exception { Map configs = Maps.newHashMap(); configs.put(Configs.AUTHENTICATORS.getKey(), AuthenticatorType.SIMPLE.name().toLowerCase()); registerCustomConfigs(configs); - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); containerSuite.startHiveContainer(); HIVE_METASTORE_URIS = String.format( @@ -137,13 +137,13 @@ public static void startIntegrationTest() throws Exception { } @AfterAll - public static void stop() { + public void stop() { setEnv(HADOOP_USER_NAME, originHadoopUser); anotherClient.close(); anotherClientWithUsername.close(); anotherClientWithNotExistingName.close(); - AbstractIT.client = null; + client = null; } @Test @@ -195,7 +195,7 @@ public void testOperateSchema() throws Exception { Assertions.assertThrows( RuntimeException.class, () -> - anotherCatatlogWithNotExistingName + anotherCatalogWithNotExistingName .asSchemas() .createSchema("new_schema", comment, properties)); Assertions.assertTrue(e.getMessage().contains("AccessControlException Permission denied")); @@ -256,7 +256,7 @@ public void testOperateTable() throws Exception { Assertions.assertThrows( RuntimeException.class, () -> { - anotherCatatlogWithNotExistingName + anotherCatalogWithNotExistingName .asTableCatalog() .createTable( anotherIdentWithNotExisting, @@ -370,7 +370,7 @@ public void testOperatePartition() throws Exception { Assertions.assertThrows( RuntimeException.class, () -> - anotherCatatlogWithNotExistingName + anotherCatalogWithNotExistingName .asTableCatalog() .loadTable(nameIdentifier) .supportPartitions() @@ -385,7 +385,7 @@ private Column[] createColumns() { return new Column[] {col1, col2, col3}; } - private static void createMetalake() { + private void createMetalake() { GravitinoMetalake[] gravitinoMetalakes = client.listMetalakes(); Assertions.assertEquals(0, gravitinoMetalakes.length); @@ -421,7 +421,7 @@ private static void loadCatalogWithAnotherClient() { anotherCatalogWithUsername = anotherClientWithUsername.loadMetalake(METALAKE_NAME).loadCatalog(CATALOG_NAME); - anotherCatatlogWithNotExistingName = + anotherCatalogWithNotExistingName = anotherClientWithNotExistingName.loadMetalake(METALAKE_NAME).loadCatalog(CATALOG_NAME); } diff --git a/catalogs/catalog-jdbc-common/src/main/java/org/apache/gravitino/catalog/jdbc/operation/JdbcTableOperations.java b/catalogs/catalog-jdbc-common/src/main/java/org/apache/gravitino/catalog/jdbc/operation/JdbcTableOperations.java index e65926fd0c2..e9b6bf6abc2 100644 --- a/catalogs/catalog-jdbc-common/src/main/java/org/apache/gravitino/catalog/jdbc/operation/JdbcTableOperations.java +++ b/catalogs/catalog-jdbc-common/src/main/java/org/apache/gravitino/catalog/jdbc/operation/JdbcTableOperations.java @@ -168,6 +168,15 @@ protected JdbcTable.Builder getTableBuilder( return builder; } + protected JdbcColumn.Builder getColumnBuilder( + ResultSet columnsResult, String databaseName, String tableName) throws SQLException { + JdbcColumn.Builder builder = null; + if (Objects.equals(columnsResult.getString("TABLE_NAME"), tableName)) { + builder = getBasicJdbcColumnInfo(columnsResult); + } + return builder; + } + @Override public JdbcTable load(String databaseName, String tableName) throws NoSuchTableException { // We should handle case sensitivity and wild card issue in some catalog tables, take MySQL @@ -188,8 +197,8 @@ public JdbcTable load(String databaseName, String tableName) throws NoSuchTableE ResultSet columns = getColumns(connection, databaseName, tableName); while (columns.next()) { // TODO(yunqing): check schema and catalog also - if (Objects.equals(columns.getString("TABLE_NAME"), tableName)) { - JdbcColumn.Builder columnBuilder = getBasicJdbcColumnInfo(columns); + JdbcColumn.Builder columnBuilder = getColumnBuilder(columns, databaseName, tableName); + if (columnBuilder != null) { boolean autoIncrement = getAutoIncrementInfo(columns); columnBuilder.withAutoIncrement(autoIncrement); jdbcColumns.add(columnBuilder.build()); diff --git a/catalogs/catalog-jdbc-doris/src/main/java/org/apache/gravitino/catalog/doris/operation/DorisTableOperations.java b/catalogs/catalog-jdbc-doris/src/main/java/org/apache/gravitino/catalog/doris/operation/DorisTableOperations.java index 479e3e5dca6..ebd7027b168 100644 --- a/catalogs/catalog-jdbc-doris/src/main/java/org/apache/gravitino/catalog/doris/operation/DorisTableOperations.java +++ b/catalogs/catalog-jdbc-doris/src/main/java/org/apache/gravitino/catalog/doris/operation/DorisTableOperations.java @@ -179,7 +179,7 @@ private Map appendNecessaryProperties(Map proper // If the backend server is less than DEFAULT_REPLICATION_FACTOR_IN_SERVER_SIDE (3), we need to // set the property 'replication_num' to 1 explicitly. - if (!properties.containsKey(REPLICATION_FACTOR)) { + if (!resultMap.containsKey(REPLICATION_FACTOR)) { // Try to check the number of backend servers. String query = "select count(*) from information_schema.backends where Alive = 'true'"; diff --git a/catalogs/catalog-jdbc-doris/src/test/java/org/apache/gravitino/catalog/doris/integration/test/CatalogDorisIT.java b/catalogs/catalog-jdbc-doris/src/test/java/org/apache/gravitino/catalog/doris/integration/test/CatalogDorisIT.java index d50020c8156..a6059a56e74 100644 --- a/catalogs/catalog-jdbc-doris/src/test/java/org/apache/gravitino/catalog/doris/integration/test/CatalogDorisIT.java +++ b/catalogs/catalog-jdbc-doris/src/test/java/org/apache/gravitino/catalog/doris/integration/test/CatalogDorisIT.java @@ -47,7 +47,7 @@ import org.apache.gravitino.exceptions.SchemaAlreadyExistsException; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.DorisContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.integration.test.util.ITUtils; import org.apache.gravitino.rel.Column; @@ -78,13 +78,10 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.TestInstance.Lifecycle; import org.testcontainers.shaded.org.awaitility.Awaitility; @Tag("gravitino-docker-test") -@TestInstance(Lifecycle.PER_CLASS) -public class CatalogDorisIT extends AbstractIT { +public class CatalogDorisIT extends BaseIT { private static final String provider = "jdbc-doris"; @@ -129,7 +126,7 @@ public void startup() throws IOException { public void stop() { clearTableAndSchema(); metalake.dropCatalog(catalogName); - AbstractIT.client.dropMetalake(metalakeName); + client.dropMetalake(metalakeName); } @AfterEach @@ -143,12 +140,12 @@ private void clearTableAndSchema() { } private void createMetalake() { - GravitinoMetalake[] gravitinoMetaLakes = AbstractIT.client.listMetalakes(); + GravitinoMetalake[] gravitinoMetaLakes = client.listMetalakes(); assertEquals(0, gravitinoMetaLakes.length); GravitinoMetalake createdMetalake = - AbstractIT.client.createMetalake(metalakeName, "comment", Collections.emptyMap()); - GravitinoMetalake loadMetalake = AbstractIT.client.loadMetalake(metalakeName); + client.createMetalake(metalakeName, "comment", Collections.emptyMap()); + GravitinoMetalake loadMetalake = client.loadMetalake(metalakeName); assertEquals(createdMetalake, loadMetalake); metalake = loadMetalake; diff --git a/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/integration/test/AuditCatalogMysqlIT.java b/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/integration/test/AuditCatalogMysqlIT.java index b2cf571475f..a70b7007050 100644 --- a/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/integration/test/AuditCatalogMysqlIT.java +++ b/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/integration/test/AuditCatalogMysqlIT.java @@ -36,7 +36,7 @@ import org.apache.gravitino.client.GravitinoMetalake; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.MySQLContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.integration.test.util.TestDatabaseName; import org.apache.gravitino.rel.Column; @@ -50,7 +50,7 @@ import org.junit.jupiter.api.Test; @Tag("gravitino-docker-test") -public class AuditCatalogMysqlIT extends AbstractIT { +public class AuditCatalogMysqlIT extends BaseIT { private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); public static final String metalakeName = GravitinoITUtils.genRandomName("audit_mysql_metalake"); private static final String expectUser = System.getProperty("user.name"); @@ -62,11 +62,11 @@ public class AuditCatalogMysqlIT extends AbstractIT { private static GravitinoMetalake metalake; @BeforeAll - public static void startIntegrationTest() throws Exception { + public void startIntegrationTest() throws Exception { Map configs = Maps.newHashMap(); configs.put(Configs.AUTHENTICATORS.getKey(), AuthenticatorType.SIMPLE.name().toLowerCase()); registerCustomConfigs(configs); - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); containerSuite.startMySQLContainer(TestDatabaseName.MYSQL_AUDIT_CATALOG_MYSQL_IT); MYSQL_CONTAINER = containerSuite.getMySQLContainer(); @@ -76,10 +76,10 @@ public static void startIntegrationTest() throws Exception { } @AfterAll - public static void stopIntegrationTest() throws IOException, InterruptedException { + public void stopIntegrationTest() throws IOException, InterruptedException { client.dropMetalake(metalakeName); mysqlService.close(); - AbstractIT.stopIntegrationTest(); + super.stopIntegrationTest(); } @Test @@ -165,7 +165,7 @@ private static Catalog createCatalog(String catalogName) throws SQLException { catalogName, Catalog.Type.RELATIONAL, provider, "comment", catalogProperties); } - private static void createMetalake() { + private void createMetalake() { GravitinoMetalake[] gravitinoMetalakes = client.listMetalakes(); Assertions.assertEquals(0, gravitinoMetalakes.length); diff --git a/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/integration/test/CatalogMysqlIT.java b/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/integration/test/CatalogMysqlIT.java index 71003720522..fe43538e3b5 100644 --- a/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/integration/test/CatalogMysqlIT.java +++ b/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/integration/test/CatalogMysqlIT.java @@ -50,7 +50,7 @@ import org.apache.gravitino.exceptions.SchemaAlreadyExistsException; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.MySQLContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.integration.test.util.ITUtils; import org.apache.gravitino.integration.test.util.TestDatabaseName; @@ -84,7 +84,7 @@ @Tag("gravitino-docker-test") @TestInstance(Lifecycle.PER_CLASS) -public class CatalogMysqlIT extends AbstractIT { +public class CatalogMysqlIT extends BaseIT { private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); private static final String provider = "jdbc-mysql"; diff --git a/catalogs/catalog-jdbc-postgresql/src/main/java/org/apache/gravitino/catalog/postgresql/operation/PostgreSqlTableOperations.java b/catalogs/catalog-jdbc-postgresql/src/main/java/org/apache/gravitino/catalog/postgresql/operation/PostgreSqlTableOperations.java index 639544105e6..775687abd47 100644 --- a/catalogs/catalog-jdbc-postgresql/src/main/java/org/apache/gravitino/catalog/postgresql/operation/PostgreSqlTableOperations.java +++ b/catalogs/catalog-jdbc-postgresql/src/main/java/org/apache/gravitino/catalog/postgresql/operation/PostgreSqlTableOperations.java @@ -108,6 +108,17 @@ protected JdbcTable.Builder getTableBuilder( return builder; } + @Override + protected JdbcColumn.Builder getColumnBuilder( + ResultSet columnsResult, String databaseName, String tableName) throws SQLException { + JdbcColumn.Builder builder = null; + if (Objects.equals(columnsResult.getString("TABLE_NAME"), tableName) + && Objects.equals(columnsResult.getString("TABLE_SCHEM"), databaseName)) { + builder = getBasicJdbcColumnInfo(columnsResult); + } + return builder; + } + @Override protected String generateCreateTableSql( String tableName, diff --git a/catalogs/catalog-jdbc-postgresql/src/test/java/org/apache/gravitino/catalog/postgresql/integration/test/CatalogPostgreSqlIT.java b/catalogs/catalog-jdbc-postgresql/src/test/java/org/apache/gravitino/catalog/postgresql/integration/test/CatalogPostgreSqlIT.java index 5b7ec298f89..c657b04ca74 100644 --- a/catalogs/catalog-jdbc-postgresql/src/test/java/org/apache/gravitino/catalog/postgresql/integration/test/CatalogPostgreSqlIT.java +++ b/catalogs/catalog-jdbc-postgresql/src/test/java/org/apache/gravitino/catalog/postgresql/integration/test/CatalogPostgreSqlIT.java @@ -49,7 +49,7 @@ import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.PGImageName; import org.apache.gravitino.integration.test.container.PostgreSQLContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.integration.test.util.ITUtils; import org.apache.gravitino.integration.test.util.TestDatabaseName; @@ -82,7 +82,7 @@ @Tag("gravitino-docker-test") @TestInstance(Lifecycle.PER_CLASS) -public class CatalogPostgreSqlIT extends AbstractIT { +public class CatalogPostgreSqlIT extends BaseIT { private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); public static final PGImageName DEFAULT_POSTGRES_IMAGE = PGImageName.VERSION_13; diff --git a/catalogs/catalog-jdbc-postgresql/src/test/java/org/apache/gravitino/catalog/postgresql/integration/test/TestMultipleJDBCLoad.java b/catalogs/catalog-jdbc-postgresql/src/test/java/org/apache/gravitino/catalog/postgresql/integration/test/TestMultipleJDBCLoad.java index 8dc4466eff5..be16ee79485 100644 --- a/catalogs/catalog-jdbc-postgresql/src/test/java/org/apache/gravitino/catalog/postgresql/integration/test/TestMultipleJDBCLoad.java +++ b/catalogs/catalog-jdbc-postgresql/src/test/java/org/apache/gravitino/catalog/postgresql/integration/test/TestMultipleJDBCLoad.java @@ -32,7 +32,7 @@ import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.MySQLContainer; import org.apache.gravitino.integration.test.container.PostgreSQLContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.TestDatabaseName; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.types.Types; @@ -43,7 +43,7 @@ import org.junit.jupiter.api.Test; @Tag("gravitino-docker-test") -public class TestMultipleJDBCLoad extends AbstractIT { +public class TestMultipleJDBCLoad extends BaseIT { private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); private static final TestDatabaseName TEST_DB_NAME = TestDatabaseName.PG_TEST_PG_CATALOG_MULTIPLE_JDBC_LOAD; @@ -52,7 +52,7 @@ public class TestMultipleJDBCLoad extends AbstractIT { private static PostgreSQLContainer postgreSQLContainer; @BeforeAll - public static void startup() throws IOException { + public void startup() throws IOException { containerSuite.startMySQLContainer(TEST_DB_NAME); mySQLContainer = containerSuite.getMySQLContainer(); containerSuite.startPostgreSQLContainer(TEST_DB_NAME); diff --git a/catalogs/catalog-kafka/build.gradle.kts b/catalogs/catalog-kafka/build.gradle.kts index fe8e6086f46..6a8c104c726 100644 --- a/catalogs/catalog-kafka/build.gradle.kts +++ b/catalogs/catalog-kafka/build.gradle.kts @@ -97,15 +97,6 @@ tasks.getByName("generateMetadataFileForMavenJavaPublication") { } tasks.test { - doFirst { - val testMode = project.properties["testMode"] as? String ?: "embedded" - if (testMode == "deploy") { - environment("GRAVITINO_HOME", project.rootDir.path + "/distribution/package") - } else if (testMode == "embedded") { - environment("GRAVITINO_HOME", project.rootDir.path) - } - } - val skipITs = project.hasProperty("skipITs") if (skipITs) { // Exclude integration tests diff --git a/catalogs/catalog-kafka/src/test/java/org/apache/gravitino/catalog/kafka/integration/test/CatalogKafkaIT.java b/catalogs/catalog-kafka/src/test/java/org/apache/gravitino/catalog/kafka/integration/test/CatalogKafkaIT.java index b73a6c1b86a..baa83619873 100644 --- a/catalogs/catalog-kafka/src/test/java/org/apache/gravitino/catalog/kafka/integration/test/CatalogKafkaIT.java +++ b/catalogs/catalog-kafka/src/test/java/org/apache/gravitino/catalog/kafka/integration/test/CatalogKafkaIT.java @@ -41,7 +41,7 @@ import org.apache.gravitino.client.GravitinoMetalake; import org.apache.gravitino.exceptions.NoSuchCatalogException; import org.apache.gravitino.integration.test.container.ContainerSuite; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.messaging.Topic; import org.apache.gravitino.messaging.TopicChange; @@ -72,7 +72,7 @@ import org.slf4j.LoggerFactory; @Tag("gravitino-docker-test") -public class CatalogKafkaIT extends AbstractIT { +public class CatalogKafkaIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(CatalogKafkaIT.class); private static final ContainerSuite CONTAINER_SUITE = ContainerSuite.getInstance(); private static final String METALAKE_NAME = @@ -87,7 +87,7 @@ public class CatalogKafkaIT extends AbstractIT { private static AdminClient adminClient; @BeforeAll - public static void startUp() throws ExecutionException, InterruptedException { + public void startUp() throws ExecutionException, InterruptedException { CONTAINER_SUITE.startKafkaContainer(); kafkaBootstrapServers = String.format( @@ -112,7 +112,7 @@ public static void startUp() throws ExecutionException, InterruptedException { } @AfterAll - public static void shutdown() { + public void shutdown() { Catalog catalog = metalake.loadCatalog(CATALOG_NAME); Arrays.stream(catalog.asSchemas().listSchemas()) .filter(ident -> !ident.equals("default")) @@ -552,7 +552,7 @@ private TopicDescription getTopicDesc(String topicName) .get(); } - private static void createMetalake() { + private void createMetalake() { GravitinoMetalake createdMetalake = client.createMetalake(METALAKE_NAME, "comment", Collections.emptyMap()); GravitinoMetalake loadMetalake = client.loadMetalake(METALAKE_NAME); diff --git a/catalogs/catalog-lakehouse-hudi/build.gradle.kts b/catalogs/catalog-lakehouse-hudi/build.gradle.kts index eef90f02957..814965ec038 100644 --- a/catalogs/catalog-lakehouse-hudi/build.gradle.kts +++ b/catalogs/catalog-lakehouse-hudi/build.gradle.kts @@ -27,55 +27,38 @@ plugins { val scalaVersion: String = project.properties["scalaVersion"] as? String ?: extra["defaultScalaVersion"].toString() val fullSparkVersion: String = libs.versions.spark34.get() val sparkVersion = fullSparkVersion.split(".").take(2).joinToString(".") +val hudiVersion = libs.versions.hudi.get() dependencies { implementation(project(":api")) { - exclude(group = "*") + exclude("*") } implementation(project(":common")) { - exclude(group = "*") + exclude("*") } implementation(project(":catalogs:hive-metastore-common")) implementation(project(":core")) { - exclude(group = "*") + exclude("*") } + implementation(libs.commons.collections3) + implementation(libs.commons.configuration1) + implementation(libs.htrace.core4) implementation(libs.guava) - implementation(libs.hive2.exec) { - artifact { - classifier = "core" - } - exclude("com.google.code.findbugs", "jsr305") - exclude("com.google.protobuf") - exclude("org.apache.avro") - exclude("org.apache.ant") - exclude("org.apache.calcite") - exclude("org.apache.calcite.avatica") - exclude("org.apache.curator") - exclude("org.apache.derby") - exclude("org.apache.hadoop", "hadoop-yarn-server-resourcemanager") - exclude("org.apache.hive", "hive-llap-tez") - exclude("org.apache.hive", "hive-vector-code-gen") - exclude("org.apache.ivy") - exclude("org.apache.logging.log4j") - exclude("org.apache.zookeeper") - exclude("org.codehaus.groovy", "groovy-all") - exclude("org.datanucleus", "datanucleus-core") - exclude("org.eclipse.jetty.aggregate", "jetty-all") - exclude("org.eclipse.jetty.orbit", "javax.servlet") - exclude("org.openjdk.jol") - exclude("org.pentaho") - exclude("org.slf4j") + implementation(libs.hadoop2.auth) { + exclude("*") } + implementation(libs.woodstox.core) implementation(libs.hive2.metastore) { exclude("ant") exclude("co.cask.tephra") + exclude("com.fasterxml.jackson.core", "jackson-core") exclude("com.github.joshelser") exclude("com.google.code.findbugs", "jsr305") exclude("com.google.code.findbugs", "sr305") exclude("com.tdunning", "json") exclude("com.zaxxer", "HikariCP") - exclude("io.dropwizard.metricss") + exclude("io.dropwizard.metrics") exclude("javax.transaction", "transaction-api") exclude("org.apache.ant") exclude("org.apache.avro") @@ -95,16 +78,29 @@ dependencies { implementation(libs.hadoop2.common) { exclude("*") } + implementation(libs.hadoop2.mapreduce.client.core) { + exclude("*") + } implementation(libs.slf4j.api) - implementation(libs.thrift) compileOnly(libs.lombok) annotationProcessor(libs.lombok) testImplementation(project(":catalogs:hive-metastore-common", "testArtifacts")) + testImplementation(project(":clients:client-java")) { + exclude("org.apache.logging.log4j") + } + testImplementation(project(":integration-test-common", "testArtifacts")) + testImplementation(project(":server")) { + exclude("org.apache.logging.log4j") + } + testImplementation(project(":server-common")) { + exclude("org.apache.logging.log4j") + } - testImplementation(libs.bundles.log4j) + testImplementation(libs.bundles.jetty) + testImplementation(libs.bundles.jersey) testImplementation(libs.commons.collections3) testImplementation(libs.commons.configuration1) testImplementation(libs.datanucleus.core) @@ -115,12 +111,29 @@ dependencies { testImplementation(libs.hadoop2.auth) { exclude("*") } + testImplementation(libs.hadoop2.hdfs) testImplementation(libs.hadoop2.mapreduce.client.core) { exclude("*") } testImplementation(libs.htrace.core4) testImplementation(libs.junit.jupiter.api) - testImplementation(libs.woodstox.core) + testImplementation(libs.mysql.driver) + testImplementation(libs.postgresql.driver) + testImplementation(libs.prometheus.dropwizard) + testImplementation("org.apache.spark:spark-hive_$scalaVersion:$fullSparkVersion") { + exclude("org.apache.hadoop") + exclude("io.dropwizard.metrics") + exclude("com.fasterxml.jackson.core") + exclude("com.fasterxml.jackson.module", "jackson-module-scala_2.12") + } + testImplementation("org.apache.spark:spark-sql_$scalaVersion:$fullSparkVersion") { + exclude("org.apache.avro") + exclude("org.apache.hadoop") + exclude("org.apache.zookeeper") + exclude("io.dropwizard.metrics") + exclude("org.rocksdb") + } + testImplementation(libs.testcontainers) testImplementation("org.apache.spark:spark-hive_$scalaVersion:$fullSparkVersion") { exclude("org.apache.hadoop") exclude("io.dropwizard.metrics") @@ -135,6 +148,63 @@ dependencies { exclude("org.rocksdb") } - testRuntimeOnly("org.apache.hudi:hudi-spark$sparkVersion-bundle_$scalaVersion:0.15.0") + testRuntimeOnly("org.apache.hudi:hudi-spark$sparkVersion-bundle_$scalaVersion:$hudiVersion") testRuntimeOnly(libs.junit.jupiter.engine) } + +tasks { + val runtimeJars by registering(Copy::class) { + from(configurations.runtimeClasspath) + into("build/libs") + } + + val copyCatalogLibs by registering(Copy::class) { + dependsOn("jar", "runtimeJars") + from("build/libs") { + exclude("guava-*.jar") + exclude("log4j-*.jar") + exclude("slf4j-*.jar") + } + into("$rootDir/distribution/package/catalogs/lakehouse-hudi/libs") + } + + val copyCatalogConfig by registering(Copy::class) { + from("src/main/resources") + into("$rootDir/distribution/package/catalogs/lakehouse-hudi/conf") + + include("lakehouse-hudi.conf") + include("hive-site.xml.template") + + rename { original -> + if (original.endsWith(".template")) { + original.replace(".template", "") + } else { + original + } + } + + exclude { details -> + details.file.isDirectory() + } + + fileMode = 0b111101101 + } + + register("copyLibAndConfig", Copy::class) { + dependsOn(copyCatalogLibs, copyCatalogConfig) + } +} + +tasks.test { + val skipITs = project.hasProperty("skipITs") + if (skipITs) { + // Exclude integration tests + exclude("**/integration/test/**") + } else { + dependsOn(tasks.jar) + } +} + +tasks.getByName("generateMetadataFileForMavenJavaPublication") { + dependsOn("runtimeJars") +} diff --git a/catalogs/catalog-lakehouse-hudi/src/main/java/org/apache/gravitino/catalog/lakehouse/hudi/HudiCatalogOperations.java b/catalogs/catalog-lakehouse-hudi/src/main/java/org/apache/gravitino/catalog/lakehouse/hudi/HudiCatalogOperations.java index c2b68d11d7e..f73927233a6 100644 --- a/catalogs/catalog-lakehouse-hudi/src/main/java/org/apache/gravitino/catalog/lakehouse/hudi/HudiCatalogOperations.java +++ b/catalogs/catalog-lakehouse-hudi/src/main/java/org/apache/gravitino/catalog/lakehouse/hudi/HudiCatalogOperations.java @@ -92,7 +92,8 @@ public void testConnection( Map properties) throws Exception { try { - hudiCatalogBackendOps.listSchemas(null); + hudiCatalogBackendOps.listSchemas( + Namespace.of(catalogIdent.namespace().level(0), catalogIdent.name())); } catch (Exception e) { throw new ConnectionFailedException( e, "Failed to run listSchemas on Hudi catalog: %s", e.getMessage()); diff --git a/catalogs/catalog-lakehouse-hudi/src/main/resources/hive-site.xml.template b/catalogs/catalog-lakehouse-hudi/src/main/resources/hive-site.xml.template new file mode 100644 index 00000000000..efa7db5adb6 --- /dev/null +++ b/catalogs/catalog-lakehouse-hudi/src/main/resources/hive-site.xml.template @@ -0,0 +1,21 @@ + + + + diff --git a/catalogs/catalog-lakehouse-hudi/src/main/resources/lakehouse-hudi.conf b/catalogs/catalog-lakehouse-hudi/src/main/resources/lakehouse-hudi.conf new file mode 100644 index 00000000000..ebab7ce76d6 --- /dev/null +++ b/catalogs/catalog-lakehouse-hudi/src/main/resources/lakehouse-hudi.conf @@ -0,0 +1,22 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# This file holds common configurations for Lakehouse-hudi catalog. The format of the key is +# 'gravitino.bypass.{hudi-inner-config-key}' and `hudi-inner-config-key` is the +# real key that pass to Lakehouse-hudi catalog. diff --git a/catalogs/catalog-lakehouse-hudi/src/test/java/org/apache/gravitino/catalog/lakehouse/hudi/TestHudiCatalogOperations.java b/catalogs/catalog-lakehouse-hudi/src/test/java/org/apache/gravitino/catalog/lakehouse/hudi/TestHudiCatalogOperations.java index 16595da6aab..01e6166476b 100644 --- a/catalogs/catalog-lakehouse-hudi/src/test/java/org/apache/gravitino/catalog/lakehouse/hudi/TestHudiCatalogOperations.java +++ b/catalogs/catalog-lakehouse-hudi/src/test/java/org/apache/gravitino/catalog/lakehouse/hudi/TestHudiCatalogOperations.java @@ -75,7 +75,9 @@ public void testTestConnection() throws Exception { InMemoryBackendOps inMemoryBackendOps = new InMemoryBackendOps()) { ops.hudiCatalogBackendOps = inMemoryBackendOps; - Assertions.assertDoesNotThrow(() -> ops.testConnection(null, null, null, null, null)); + Assertions.assertDoesNotThrow( + () -> + ops.testConnection(NameIdentifier.of("metalake", "catalog"), null, null, null, null)); } } diff --git a/catalogs/catalog-lakehouse-hudi/src/test/java/org/apache/gravitino/catalog/lakehouse/hudi/integration/test/HudiCatalogHMSIT.java b/catalogs/catalog-lakehouse-hudi/src/test/java/org/apache/gravitino/catalog/lakehouse/hudi/integration/test/HudiCatalogHMSIT.java new file mode 100644 index 00000000000..a3d07125f3b --- /dev/null +++ b/catalogs/catalog-lakehouse-hudi/src/test/java/org/apache/gravitino/catalog/lakehouse/hudi/integration/test/HudiCatalogHMSIT.java @@ -0,0 +1,503 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.catalog.lakehouse.hudi.integration.test; + +import static org.apache.gravitino.catalog.lakehouse.hudi.HudiCatalogPropertiesMetadata.CATALOG_BACKEND; +import static org.apache.gravitino.catalog.lakehouse.hudi.HudiCatalogPropertiesMetadata.URI; +import static org.apache.gravitino.catalog.lakehouse.hudi.HudiSchemaPropertiesMetadata.LOCATION; + +import com.google.common.collect.ImmutableMap; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.gravitino.Catalog; +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.Namespace; +import org.apache.gravitino.Schema; +import org.apache.gravitino.SchemaChange; +import org.apache.gravitino.SupportsSchemas; +import org.apache.gravitino.client.GravitinoMetalake; +import org.apache.gravitino.dto.rel.ColumnDTO; +import org.apache.gravitino.integration.test.container.ContainerSuite; +import org.apache.gravitino.integration.test.container.HiveContainer; +import org.apache.gravitino.integration.test.util.BaseIT; +import org.apache.gravitino.rel.Column; +import org.apache.gravitino.rel.Table; +import org.apache.gravitino.rel.TableCatalog; +import org.apache.gravitino.rel.TableChange; +import org.apache.gravitino.rel.expressions.distributions.Distributions; +import org.apache.gravitino.rel.expressions.sorts.SortOrders; +import org.apache.gravitino.rel.expressions.transforms.Transform; +import org.apache.gravitino.rel.expressions.transforms.Transforms; +import org.apache.gravitino.rel.indexes.Indexes; +import org.apache.gravitino.rel.types.Types; +import org.apache.gravitino.utils.RandomNameUtils; +import org.apache.spark.sql.SparkSession; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +@Tag("gravitino-docker-test") +public class HudiCatalogHMSIT extends BaseIT { + private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); + + private static String hmsURI; + private static SparkSession sparkSession; + private static GravitinoMetalake metalake; + private static Catalog catalog; + private static final String METALAKE_NAME = RandomNameUtils.genRandomName("hudi_metalake"); + private static final String CATALOG_NAME = RandomNameUtils.genRandomName("hudi_catalog"); + private static final String DB_NAME = RandomNameUtils.genRandomName("hudi_schema"); + private static final String DB_LOCATION = "/user/hive/warehouse-catalog-hudi/" + DB_NAME; + private static final String DATA_TABLE_NAME = RandomNameUtils.genRandomName("hudi_data_table"); + private static final String COW_TABLE = RandomNameUtils.genRandomName("hudi_cow_table"); + private static final String MOR_TABLE = RandomNameUtils.genRandomName("hudi_mor_table"); + + @BeforeAll + public void prepare() { + containerSuite.startHiveContainer(); + hmsURI = + String.format( + "thrift://%s:%d", + containerSuite.getHiveContainer().getContainerIpAddress(), + HiveContainer.HIVE_METASTORE_PORT); + + createHudiTables(); + + metalake = + client.createMetalake(METALAKE_NAME, "metalake for hudi catalog IT", ImmutableMap.of()); + catalog = + metalake.createCatalog( + CATALOG_NAME, + Catalog.Type.RELATIONAL, + "lakehouse-hudi", + "hudi catalog for hms", + ImmutableMap.of(CATALOG_BACKEND, "hms", URI, hmsURI)); + } + + @Test + public void testCatalog() { + String catalogName = RandomNameUtils.genRandomName("hudi_catalog"); + String comment = "hudi catalog for hms"; + // test create exception + Exception exception = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + metalake.createCatalog( + catalogName, + Catalog.Type.RELATIONAL, + "lakehouse-hudi", + comment, + ImmutableMap.of())); + Assertions.assertTrue( + exception + .getMessage() + .contains("Properties are required and must be set: [catalog-backend, uri]"), + "Unexpected exception message: " + exception.getMessage()); + + // test testConnection exception + exception = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + metalake.testConnection( + catalogName, + Catalog.Type.RELATIONAL, + "lakehouse-hudi", + comment, + ImmutableMap.of())); + Assertions.assertTrue( + exception + .getMessage() + .contains("Properties are required and must be set: [catalog-backend, uri]"), + "Unexpected exception message: " + exception.getMessage()); + + // test testConnection + ImmutableMap properties = ImmutableMap.of(CATALOG_BACKEND, "hms", URI, hmsURI); + Assertions.assertDoesNotThrow( + () -> + metalake.testConnection( + catalogName, Catalog.Type.RELATIONAL, "lakehouse-hudi", comment, properties)); + + // test create and load + metalake.createCatalog( + catalogName, Catalog.Type.RELATIONAL, "lakehouse-hudi", comment, properties); + Catalog catalog = metalake.loadCatalog(catalogName); + Assertions.assertEquals(catalogName, catalog.name()); + Assertions.assertEquals(Catalog.Type.RELATIONAL, catalog.type()); + Assertions.assertEquals("lakehouse-hudi", catalog.provider()); + Assertions.assertEquals(comment, catalog.comment()); + Assertions.assertEquals(properties, catalog.properties()); + + // test list + String[] catalogs = metalake.listCatalogs(); + Assertions.assertTrue(Arrays.asList(catalogs).contains(catalogName)); + } + + @Test + public void testSchema() { + SupportsSchemas schemaOps = catalog.asSchemas(); + String schemaName = RandomNameUtils.genRandomName("hudi_schema"); + // test create + Exception exception = + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> schemaOps.createSchema(schemaName, null, ImmutableMap.of())); + Assertions.assertTrue( + exception.getMessage().contains("Not implemented yet"), + "Unexpected exception message: " + exception.getMessage()); + + // test alter + exception = + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> schemaOps.alterSchema(schemaName, SchemaChange.removeProperty("test"))); + Assertions.assertTrue( + exception.getMessage().contains("Not implemented yet"), + "Unexpected exception message: " + exception.getMessage()); + + // test list + String[] schemas = schemaOps.listSchemas(); + Assertions.assertTrue(Arrays.asList(schemas).contains(DB_NAME)); + + // test load + Schema schema = schemaOps.loadSchema(DB_NAME); + Assertions.assertEquals(DB_NAME, schema.name()); + Assertions.assertEquals("", schema.comment()); + Assertions.assertTrue(schema.properties().get(LOCATION).endsWith(DB_NAME)); + } + + @Test + public void testTable() { + TableCatalog tableOps = catalog.asTableCatalog(); + String tableName = RandomNameUtils.genRandomName("hudi_table"); + NameIdentifier tableIdent = NameIdentifier.of(DB_NAME, tableName); + + // test create + Exception exception = + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> + tableOps.createTable( + tableIdent, + new Column[] {Column.of("col1", Types.StringType.get())}, + null, + null)); + Assertions.assertTrue( + exception.getMessage().contains("Not implemented yet"), + "Unexpected exception message: " + exception.getMessage()); + + // test alter + exception = + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> tableOps.alterTable(tableIdent, TableChange.updateComment("new comment"))); + Assertions.assertTrue( + exception.getMessage().contains("Not implemented yet"), + "Unexpected exception message: " + exception.getMessage()); + + // test list + NameIdentifier[] tables = tableOps.listTables(Namespace.of(DB_NAME)); + List tableNames = + Arrays.stream(tables).map(NameIdentifier::name).collect(Collectors.toList()); + + Assertions.assertTrue(tableNames.contains(DATA_TABLE_NAME)); + + Assertions.assertTrue(tableNames.contains(COW_TABLE)); + Assertions.assertFalse(tableNames.contains(COW_TABLE + "_rt")); + Assertions.assertFalse(tableNames.contains(COW_TABLE + "_ro")); + + Assertions.assertTrue(tableNames.contains(MOR_TABLE)); + Assertions.assertTrue(tableNames.contains(MOR_TABLE + "_rt")); + Assertions.assertTrue(tableNames.contains(MOR_TABLE + "_ro")); + + // test load + Table table = tableOps.loadTable(NameIdentifier.of(DB_NAME, COW_TABLE)); + Assertions.assertEquals(COW_TABLE, table.name()); + assertTable(table); + + table = tableOps.loadTable(NameIdentifier.of(DB_NAME, MOR_TABLE)); + Assertions.assertEquals(MOR_TABLE, table.name()); + assertTable(table); + + table = tableOps.loadTable(NameIdentifier.of(DB_NAME, MOR_TABLE + "_rt")); + Assertions.assertEquals(MOR_TABLE + "_rt", table.name()); + assertTable(table); + + table = tableOps.loadTable(NameIdentifier.of(DB_NAME, MOR_TABLE + "_ro")); + Assertions.assertEquals(MOR_TABLE + "_ro", table.name()); + assertTable(table); + } + + private void assertTable(Table table) { + Assertions.assertNull(table.comment()); + assertColumns(table); + assertProperties(table); + assertPartitioning(table.partitioning()); + Assertions.assertEquals(Distributions.NONE, table.distribution()); + Assertions.assertEquals(SortOrders.NONE, table.sortOrder()); + Assertions.assertEquals(Indexes.EMPTY_INDEXES, table.index()); + } + + private void assertPartitioning(Transform[] partitioning) { + Assertions.assertEquals(1, partitioning.length); + Assertions.assertEquals(Transforms.identity("city"), partitioning[0]); + } + + private void assertProperties(Table table) { + Map properties = table.properties(); + Assertions.assertTrue(properties.containsKey("last_commit_time_sync")); + Assertions.assertTrue(properties.containsKey("last_commit_completion_time_sync")); + Assertions.assertTrue(properties.containsKey("transient_lastDdlTime")); + Assertions.assertTrue(properties.containsKey("spark.sql.sources.schema.numParts")); + Assertions.assertTrue(properties.containsKey("spark.sql.sources.schema.part.0")); + Assertions.assertTrue(properties.containsKey("spark.sql.sources.schema.partCol.0")); + Assertions.assertTrue(properties.containsKey("spark.sql.sources.schema.numPartCols")); + Assertions.assertTrue(properties.containsKey("spark.sql.sources.provider")); + Assertions.assertTrue(properties.containsKey("spark.sql.create.version")); + + if (table.name().endsWith("_rt") || table.name().endsWith("_ro")) { + Assertions.assertEquals("TRUE", properties.get("EXTERNAL")); + } else { + Assertions.assertTrue(properties.containsKey("type")); + Assertions.assertTrue(properties.containsKey("provider")); + } + } + + private void assertColumns(Table table) { + Column[] columns = table.columns(); + Assertions.assertEquals(11, columns.length); + if (table.name().endsWith("_rt") || table.name().endsWith("_ro")) { + Assertions.assertEquals( + ColumnDTO.builder() + .withName("_hoodie_commit_time") + .withDataType(Types.StringType.get()) + .withComment("") + .build(), + columns[0]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("_hoodie_commit_seqno") + .withDataType(Types.StringType.get()) + .withComment("") + .build(), + columns[1]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("_hoodie_record_key") + .withDataType(Types.StringType.get()) + .withComment("") + .build(), + columns[2]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("_hoodie_partition_path") + .withDataType(Types.StringType.get()) + .withComment("") + .build(), + columns[3]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("_hoodie_file_name") + .withDataType(Types.StringType.get()) + .withComment("") + .build(), + columns[4]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("ts") + .withDataType(Types.LongType.get()) + .withComment("") + .build(), + columns[5]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("uuid") + .withDataType(Types.StringType.get()) + .withComment("") + .build(), + columns[6]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("rider") + .withDataType(Types.StringType.get()) + .withComment("") + .build(), + columns[7]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("driver") + .withDataType(Types.StringType.get()) + .withComment("") + .build(), + columns[8]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("fare") + .withDataType(Types.DoubleType.get()) + .withComment("") + .build(), + columns[9]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("city") + .withDataType(Types.StringType.get()) + .withComment("") + .build(), + columns[10]); + } else { + Assertions.assertEquals( + ColumnDTO.builder() + .withName("_hoodie_commit_time") + .withDataType(Types.StringType.get()) + .build(), + columns[0]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("_hoodie_commit_seqno") + .withDataType(Types.StringType.get()) + .build(), + columns[1]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("_hoodie_record_key") + .withDataType(Types.StringType.get()) + .build(), + columns[2]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("_hoodie_partition_path") + .withDataType(Types.StringType.get()) + .build(), + columns[3]); + Assertions.assertEquals( + ColumnDTO.builder() + .withName("_hoodie_file_name") + .withDataType(Types.StringType.get()) + .build(), + columns[4]); + Assertions.assertEquals( + ColumnDTO.builder().withName("ts").withDataType(Types.LongType.get()).build(), + columns[5]); + Assertions.assertEquals( + ColumnDTO.builder().withName("uuid").withDataType(Types.StringType.get()).build(), + columns[6]); + Assertions.assertEquals( + ColumnDTO.builder().withName("rider").withDataType(Types.StringType.get()).build(), + columns[7]); + Assertions.assertEquals( + ColumnDTO.builder().withName("driver").withDataType(Types.StringType.get()).build(), + columns[8]); + Assertions.assertEquals( + ColumnDTO.builder().withName("fare").withDataType(Types.DoubleType.get()).build(), + columns[9]); + Assertions.assertEquals( + ColumnDTO.builder().withName("city").withDataType(Types.StringType.get()).build(), + columns[10]); + } + } + + private static void createHudiTables() { + sparkSession = + SparkSession.builder() + .master("local[1]") + .appName("Hudi Catalog integration test") + .config("hive.metastore.uris", hmsURI) + .config( + "spark.sql.warehouse.dir", + String.format( + "hdfs://%s:%d/user/hive/warehouse-catalog-hudi", + containerSuite.getHiveContainer().getContainerIpAddress(), + HiveContainer.HDFS_DEFAULTFS_PORT)) + .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") + .config("spark.sql.extensions", "org.apache.spark.sql.hudi.HoodieSparkSessionExtension") + .config( + "spark.sql.catalog.spark_catalog", + "org.apache.spark.sql.hudi.catalog.HoodieCatalog") + .config("spark.kryo.registrator", "org.apache.spark.HoodieSparkKryoRegistrar") + .config("dfs.replication", "1") + .enableHiveSupport() + .getOrCreate(); + + sparkSession.sql( + String.format("CREATE DATABASE IF NOT EXISTS %s LOCATION '%s'", DB_NAME, DB_LOCATION)); + + sparkSession.sql( + String.format( + "CREATE TABLE %s.%s (\n" + + " ts BIGINT,\n" + + " uuid STRING,\n" + + " rider STRING,\n" + + " driver STRING,\n" + + " fare DOUBLE,\n" + + " city STRING\n" + + ") USING HUDI TBLPROPERTIES (type = 'cow') \n" + + "PARTITIONED BY (city)", + DB_NAME, COW_TABLE)); + sparkSession.sql( + String.format( + "INSERT INTO %s.%s\n" + + "VALUES\n" + + "(1695115999911,'c8abbe79-8d89-47ea-b4ce-4d224bae5bfa','rider-J','driver-T',17.85,'chennai')", + DB_NAME, COW_TABLE)); + + sparkSession.sql( + String.format( + "CREATE TABLE %s.%s (\n" + + " ts BIGINT,\n" + + " uuid STRING,\n" + + " rider STRING,\n" + + " driver STRING,\n" + + " fare DOUBLE,\n" + + " city STRING\n" + + ") USING HUDI TBLPROPERTIES (type = 'mor') \n" + + "PARTITIONED BY (city)", + DB_NAME, MOR_TABLE)); + sparkSession.sql( + String.format( + "INSERT INTO %s.%s\n" + + "VALUES\n" + + "(1695115999911,'c8abbe79-8d89-47ea-b4ce-4d224bae5bfa','rider-J','driver-T',17.85,'chennai')", + DB_NAME, MOR_TABLE)); + + sparkSession.sql( + String.format( + "CREATE TABLE %s.%s (\n" + + " ts BIGINT,\n" + + " uuid STRING,\n" + + " rider STRING,\n" + + " driver STRING,\n" + + " fare DOUBLE,\n" + + " city STRING\n" + + ") USING HUDI\n" + + "PARTITIONED BY (city)", + DB_NAME, DATA_TABLE_NAME)); + + sparkSession.sql( + String.format( + "INSERT INTO %s.%s\n" + + "VALUES\n" + + "(1695159649087,'334e26e9-8355-45cc-97c6-c31daf0df330','rider-A','driver-K',19.10,'san_francisco'),\n" + + "(1695115999911,'c8abbe79-8d89-47ea-b4ce-4d224bae5bfa','rider-J','driver-T',17.85,'chennai')", + DB_NAME, DATA_TABLE_NAME)); + } +} diff --git a/catalogs/catalog-lakehouse-hudi/src/test/resources/log4j2.properties b/catalogs/catalog-lakehouse-hudi/src/test/resources/log4j2.properties new file mode 100644 index 00000000000..062b0a5c77c --- /dev/null +++ b/catalogs/catalog-lakehouse-hudi/src/test/resources/log4j2.properties @@ -0,0 +1,73 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Set to debug or trace if log4j initialization is failing +status = info + +# Name of the configuration +name = ConsoleLogConfig + +# Console appender configuration +appender.console.type = Console +appender.console.name = consoleLogger +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p [%t] %c{1}:%L - %m%n + +# Log files location +property.logPath = ${sys:gravitino.log.path:-build/catalog-lakehouse-hudi-integration-test.log} + +# File appender configuration +appender.file.type = File +appender.file.name = fileLogger +appender.file.fileName = ${logPath} +appender.file.layout.type = PatternLayout +appender.file.layout.pattern = %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c - %m%n + +# Root logger level +rootLogger.level = info + +# Root logger referring to console and file appenders +rootLogger.appenderRef.stdout.ref = consoleLogger +rootLogger.appenderRef.file.ref = fileLogger + +# File appender configuration for testcontainers +appender.testcontainersFile.type = File +appender.testcontainersFile.name = testcontainersLogger +appender.testcontainersFile.fileName = build/testcontainers.log +appender.testcontainersFile.layout.type = PatternLayout +appender.testcontainersFile.layout.pattern = %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c - %m%n + +# Logger for testcontainers +logger.testcontainers.name = org.testcontainers +logger.testcontainers.level = debug +logger.testcontainers.additivity = false +logger.testcontainers.appenderRef.file.ref = testcontainersLogger + +logger.tc.name = tc +logger.tc.level = debug +logger.tc.additivity = false +logger.tc.appenderRef.file.ref = testcontainersLogger + +logger.docker.name = com.github.dockerjava +logger.docker.level = warn +logger.docker.additivity = false +logger.docker.appenderRef.file.ref = testcontainersLogger + +logger.http.name = com.github.dockerjava.zerodep.shaded.org.apache.hc.client5.http.wire +logger.http.level = off \ No newline at end of file diff --git a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergBaseIT.java b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergBaseIT.java index 4a829b3bc9c..1c60e04b4c9 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergBaseIT.java +++ b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergBaseIT.java @@ -59,7 +59,7 @@ import org.apache.gravitino.iceberg.common.IcebergConfig; import org.apache.gravitino.iceberg.common.utils.IcebergCatalogUtil; import org.apache.gravitino.integration.test.container.ContainerSuite; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.Table; @@ -94,7 +94,7 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -public abstract class CatalogIcebergBaseIT extends AbstractIT { +public abstract class CatalogIcebergBaseIT extends BaseIT { protected static final ContainerSuite containerSuite = ContainerSuite.getInstance(); protected String WAREHOUSE; @@ -125,7 +125,7 @@ public abstract class CatalogIcebergBaseIT extends AbstractIT { @BeforeAll public void startup() throws Exception { ignoreIcebergRestService = false; - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); containerSuite.startHiveContainer(); initIcebergCatalogProperties(); createMetalake(); @@ -144,12 +144,12 @@ public void stop() throws Exception { if (spark != null) { spark.close(); } - AbstractIT.stopIntegrationTest(); + super.stopIntegrationTest(); } } @AfterEach - private void resetSchema() { + public void resetSchema() { clearTableAndSchema(); createSchema(); } @@ -158,10 +158,10 @@ private void resetSchema() { // if startIntegrationTest() is auto invoked by Junit. So here we override // startIntegrationTest() to disable the auto invoke by junit. @BeforeAll - public static void startIntegrationTest() {} + public void startIntegrationTest() {} @AfterAll - public static void stopIntegrationTest() {} + public void stopIntegrationTest() {} protected abstract void initIcebergCatalogProperties(); diff --git a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergHiveIT.java b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergHiveIT.java index 5ec9de6dd98..a9ca1a1108c 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergHiveIT.java +++ b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergHiveIT.java @@ -28,10 +28,8 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; @Tag("gravitino-docker-test") -@TestInstance(TestInstance.Lifecycle.PER_CLASS) public class CatalogIcebergHiveIT extends CatalogIcebergBaseIT { @Override diff --git a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergKerberosHiveIT.java b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergKerberosHiveIT.java index ebd8737f550..0f899463203 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergKerberosHiveIT.java +++ b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergKerberosHiveIT.java @@ -43,7 +43,7 @@ import org.apache.gravitino.iceberg.common.IcebergConfig; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.TableChange; @@ -63,7 +63,7 @@ import org.slf4j.LoggerFactory; @Tag("gravitino-docker-test") -public class CatalogIcebergKerberosHiveIT extends AbstractIT { +public class CatalogIcebergKerberosHiveIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(CatalogIcebergKerberosHiveIT.class); @@ -101,7 +101,7 @@ public class CatalogIcebergKerberosHiveIT extends AbstractIT { private static final String HIVE_COL_NAME3 = "col3"; @BeforeAll - public static void startIntegrationTest() { + public void startIntegrationTest() { containerSuite.startKerberosHiveContainer(); kerberosHiveContainer = containerSuite.getKerberosHiveContainer(); @@ -129,14 +129,14 @@ public static void startIntegrationTest() { ignoreIcebergRestService = false; // Start Gravitino server - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); } catch (Exception e) { throw new RuntimeException(e); } } @AfterAll - public static void stop() { + public void stop() { // Reset the UGI UserGroupInformation.reset(); @@ -145,7 +145,7 @@ public static void stop() { System.clearProperty("java.security.krb5.conf"); System.clearProperty("sun.security.krb5.debug"); - AbstractIT.client = null; + client = null; } private static void prepareKerberosConfig() throws Exception { @@ -201,14 +201,12 @@ private static void refreshKerberosConfig() { } } - private static void addKerberosConfig() { - AbstractIT.customConfigs.put(Configs.AUTHENTICATORS.getKey(), "kerberos"); - AbstractIT.customConfigs.put( - "gravitino.authenticator.kerberos.principal", GRAVITINO_SERVER_PRINCIPAL); - AbstractIT.customConfigs.put( - "gravitino.authenticator.kerberos.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); - AbstractIT.customConfigs.put(SDK_KERBEROS_KEYTAB_KEY, TMP_DIR + GRAVITINO_CLIENT_KEYTAB); - AbstractIT.customConfigs.put(SDK_KERBEROS_PRINCIPAL_KEY, GRAVITINO_CLIENT_PRINCIPAL); + private void addKerberosConfig() { + customConfigs.put(Configs.AUTHENTICATORS.getKey(), "kerberos"); + customConfigs.put("gravitino.authenticator.kerberos.principal", GRAVITINO_SERVER_PRINCIPAL); + customConfigs.put("gravitino.authenticator.kerberos.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); + customConfigs.put(SDK_KERBEROS_KEYTAB_KEY, TMP_DIR + GRAVITINO_CLIENT_KEYTAB); + customConfigs.put(SDK_KERBEROS_PRINCIPAL_KEY, GRAVITINO_CLIENT_PRINCIPAL); } @Test diff --git a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergRestIT.java b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergRestIT.java index 808da5cfc1f..f992821a8d4 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergRestIT.java +++ b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergRestIT.java @@ -24,10 +24,8 @@ import org.apache.gravitino.integration.test.container.HiveContainer; import org.apache.gravitino.server.web.JettyServerConfig; import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.TestInstance; @Tag("gravitino-docker-test") -@TestInstance(TestInstance.Lifecycle.PER_CLASS) public class CatalogIcebergRestIT extends CatalogIcebergBaseIT { @Override diff --git a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/TestMultipleJDBCLoad.java b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/TestMultipleJDBCLoad.java index 5f78ab57377..4b1edae827c 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/TestMultipleJDBCLoad.java +++ b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/TestMultipleJDBCLoad.java @@ -34,7 +34,7 @@ import org.apache.gravitino.iceberg.common.IcebergConfig; import org.apache.gravitino.integration.test.container.MySQLContainer; import org.apache.gravitino.integration.test.container.PostgreSQLContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.TestDatabaseName; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.types.Types; @@ -45,7 +45,7 @@ import org.junit.jupiter.api.Test; @Tag("gravitino-docker-test") -public class TestMultipleJDBCLoad extends AbstractIT { +public class TestMultipleJDBCLoad extends BaseIT { private static final TestDatabaseName TEST_DB_NAME = TestDatabaseName.PG_TEST_ICEBERG_CATALOG_MULTIPLE_JDBC_LOAD; @@ -55,7 +55,7 @@ public class TestMultipleJDBCLoad extends AbstractIT { public static final String DEFAULT_POSTGRES_IMAGE = "postgres:13"; @BeforeAll - public static void startup() throws IOException { + public void startup() throws IOException { containerSuite.startMySQLContainer(TEST_DB_NAME); mySQLContainer = containerSuite.getMySQLContainer(); containerSuite.startPostgreSQLContainer(TEST_DB_NAME); diff --git a/catalogs/catalog-lakehouse-paimon/build.gradle.kts b/catalogs/catalog-lakehouse-paimon/build.gradle.kts index 8fee917458b..16a3382cfc5 100644 --- a/catalogs/catalog-lakehouse-paimon/build.gradle.kts +++ b/catalogs/catalog-lakehouse-paimon/build.gradle.kts @@ -122,6 +122,7 @@ dependencies { testImplementation(libs.junit.jupiter.api) testImplementation(libs.mysql.driver) testImplementation(libs.postgresql.driver) + testImplementation(libs.h2db) testImplementation(libs.bundles.log4j) testImplementation(libs.junit.jupiter.params) testImplementation(libs.paimon.oss) @@ -129,6 +130,7 @@ dependencies { testImplementation(libs.paimon.spark) testImplementation(libs.testcontainers) testImplementation(libs.testcontainers.localstack) + testImplementation(libs.testcontainers.mysql) testRuntimeOnly(libs.junit.jupiter.engine) } diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonCatalogBackend.java b/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonCatalogBackend.java index 32effe45256..355a79f5850 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonCatalogBackend.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonCatalogBackend.java @@ -20,5 +20,6 @@ /** The type of Apache Paimon catalog backend. */ public enum PaimonCatalogBackend { - FILESYSTEM + FILESYSTEM, + JDBC } diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonCatalogPropertiesMetadata.java b/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonCatalogPropertiesMetadata.java index 0c6a87c0269..78a7eb1eb2a 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonCatalogPropertiesMetadata.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonCatalogPropertiesMetadata.java @@ -49,6 +49,10 @@ public class PaimonCatalogPropertiesMetadata extends BaseCatalogPropertiesMetada public static final String PAIMON_METASTORE = "metastore"; public static final String WAREHOUSE = "warehouse"; public static final String URI = "uri"; + public static final String GRAVITINO_JDBC_USER = "jdbc-user"; + public static final String PAIMON_JDBC_USER = "jdbc.user"; + public static final String GRAVITINO_JDBC_PASSWORD = "jdbc-password"; + public static final String PAIMON_JDBC_PASSWORD = "jdbc.password"; // S3 properties needed by Paimon public static final String S3_ENDPOINT = "s3.endpoint"; @@ -56,7 +60,17 @@ public class PaimonCatalogPropertiesMetadata extends BaseCatalogPropertiesMetada public static final String S3_SECRET_KEY = "s3.secret-key"; public static final Map GRAVITINO_CONFIG_TO_PAIMON = - ImmutableMap.of(GRAVITINO_CATALOG_BACKEND, PAIMON_METASTORE, WAREHOUSE, WAREHOUSE, URI, URI); + ImmutableMap.of( + GRAVITINO_CATALOG_BACKEND, + PAIMON_METASTORE, + WAREHOUSE, + WAREHOUSE, + URI, + URI, + GRAVITINO_JDBC_USER, + PAIMON_JDBC_USER, + GRAVITINO_JDBC_PASSWORD, + PAIMON_JDBC_PASSWORD); private static final Map> PROPERTIES_METADATA; public static final Map KERBEROS_CONFIGURATION = ImmutableMap.of( @@ -105,7 +119,19 @@ public class PaimonCatalogPropertiesMetadata extends BaseCatalogPropertiesMetada "Paimon catalog uri config", false /* immutable */, null /* defaultValue */, - false /* hidden */)); + false /* hidden */), + stringOptionalPropertyEntry( + GRAVITINO_JDBC_USER, + "Gravitino Paimon catalog jdbc user", + false /* immutable */, + null /* defaultValue */, + true /* hidden */), + stringOptionalPropertyEntry( + GRAVITINO_JDBC_PASSWORD, + "Gravitino Paimon catalog jdbc password", + false /* immutable */, + null /* defaultValue */, + true /* hidden */)); HashMap> result = Maps.newHashMap(); result.putAll(Maps.uniqueIndex(propertyEntries, PropertyEntry::getName)); result.putAll(KerberosConfig.KERBEROS_PROPERTY_ENTRIES); diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonConfig.java b/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonConfig.java index 71b6292d1f3..97adfeb51f5 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonConfig.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonConfig.java @@ -50,6 +50,22 @@ public class PaimonConfig extends Config { .stringConf() .create(); + public static final ConfigEntry CATALOG_JDBC_USER = + new ConfigBuilder(PaimonCatalogPropertiesMetadata.GRAVITINO_JDBC_USER) + .doc("Paimon catalog jdbc user") + .version(ConfigConstants.VERSION_0_7_0) + .stringConf() + .checkValue(StringUtils::isNotBlank, ConfigConstants.NOT_BLANK_ERROR_MSG) + .create(); + + public static final ConfigEntry CATALOG_JDBC_PASSWORD = + new ConfigBuilder(PaimonCatalogPropertiesMetadata.GRAVITINO_JDBC_PASSWORD) + .doc("Paimon catalog jdbc password") + .version(ConfigConstants.VERSION_0_7_0) + .stringConf() + .checkValue(StringUtils::isNotBlank, ConfigConstants.NOT_BLANK_ERROR_MSG) + .create(); + public PaimonConfig() { super(false); } diff --git a/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonTablePropertiesMetadata.java b/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonTablePropertiesMetadata.java index 1c57e5b2cc5..671dd9d6682 100644 --- a/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonTablePropertiesMetadata.java +++ b/catalogs/catalog-lakehouse-paimon/src/main/java/org/apache/gravitino/catalog/lakehouse/paimon/PaimonTablePropertiesMetadata.java @@ -18,6 +18,7 @@ */ package org.apache.gravitino.catalog.lakehouse.paimon; +import static org.apache.gravitino.connector.PropertyEntry.stringImmutablePropertyEntry; import static org.apache.gravitino.connector.PropertyEntry.stringReservedPropertyEntry; import com.google.common.collect.ImmutableList; @@ -51,9 +52,12 @@ public class PaimonTablePropertiesMetadata extends BasePropertiesMetadata { stringReservedPropertyEntry(COMMENT, "The table comment", true), stringReservedPropertyEntry(OWNER, "The table owner", false), stringReservedPropertyEntry(BUCKET_KEY, "The table bucket key", false), - stringReservedPropertyEntry(MERGE_ENGINE, "The table merge engine", false), - stringReservedPropertyEntry(SEQUENCE_FIELD, "The table sequence field", false), - stringReservedPropertyEntry(ROWKIND_FIELD, "The table rowkind field", false), + stringImmutablePropertyEntry( + MERGE_ENGINE, "The table merge engine", false, null, false, false), + stringImmutablePropertyEntry( + SEQUENCE_FIELD, "The table sequence field", false, null, false, false), + stringImmutablePropertyEntry( + ROWKIND_FIELD, "The table rowkind field", false, null, false, false), stringReservedPropertyEntry(PRIMARY_KEY, "The table primary key", false), stringReservedPropertyEntry(PARTITION, "The table partition", false)); PROPERTIES_METADATA = Maps.uniqueIndex(propertyEntries, PropertyEntry::getName); diff --git a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/TestGravitinoPaimonTable.java b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/TestGravitinoPaimonTable.java index 05e36219d99..466ecb3459d 100644 --- a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/TestGravitinoPaimonTable.java +++ b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/TestGravitinoPaimonTable.java @@ -382,16 +382,41 @@ void testTableProperty() { try (PaimonCatalogOperations ops = new PaimonCatalogOperations()) { ops.initialize( initBackendCatalogProperties(), entity.toCatalogInfo(), PAIMON_PROPERTIES_METADATA); - Map map = Maps.newHashMap(); - map.put(PaimonTablePropertiesMetadata.COMMENT, "test"); - map.put(PaimonTablePropertiesMetadata.OWNER, "test"); - map.put(PaimonTablePropertiesMetadata.BUCKET_KEY, "test"); - map.put(PaimonTablePropertiesMetadata.MERGE_ENGINE, "test"); - map.put(PaimonTablePropertiesMetadata.SEQUENCE_FIELD, "test"); - map.put(PaimonTablePropertiesMetadata.ROWKIND_FIELD, "test"); - map.put(PaimonTablePropertiesMetadata.PRIMARY_KEY, "test"); - map.put(PaimonTablePropertiesMetadata.PARTITION, "test"); - for (Map.Entry entry : map.entrySet()) { + HashMap reservedProps = + new HashMap() { + { + put(PaimonTablePropertiesMetadata.COMMENT, "test"); + put(PaimonTablePropertiesMetadata.OWNER, "test"); + put(PaimonTablePropertiesMetadata.BUCKET_KEY, "test"); + put(PaimonTablePropertiesMetadata.PRIMARY_KEY, "test"); + put(PaimonTablePropertiesMetadata.PARTITION, "test"); + } + }; + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + PropertiesMetadataHelpers.validatePropertyForCreate( + paimonCatalog.tablePropertiesMetadata(), reservedProps)); + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + PropertiesMetadataHelpers.validatePropertyForAlter( + paimonCatalog.tablePropertiesMetadata(), reservedProps, Collections.emptyMap())); + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + PropertiesMetadataHelpers.validatePropertyForAlter( + paimonCatalog.tablePropertiesMetadata(), Collections.emptyMap(), reservedProps)); + + Map immutableProps = + new HashMap() { + { + put(PaimonTablePropertiesMetadata.MERGE_ENGINE, "test"); + put(PaimonTablePropertiesMetadata.SEQUENCE_FIELD, "test"); + put(PaimonTablePropertiesMetadata.ROWKIND_FIELD, "test"); + } + }; + for (Map.Entry entry : immutableProps.entrySet()) { HashMap properties = new HashMap() { { @@ -399,26 +424,18 @@ void testTableProperty() { } }; PropertiesMetadata metadata = paimonCatalog.tablePropertiesMetadata(); + Assertions.assertDoesNotThrow( + () -> PropertiesMetadataHelpers.validatePropertyForCreate(metadata, properties)); Assertions.assertThrows( IllegalArgumentException.class, - () -> PropertiesMetadataHelpers.validatePropertyForCreate(metadata, properties)); - } - - map = Maps.newHashMap(); - map.put("key1", "val1"); - map.put("key2", "val2"); - for (Map.Entry entry : map.entrySet()) { - HashMap properties = - new HashMap() { - { - put(entry.getKey(), entry.getValue()); - } - }; - PropertiesMetadata metadata = paimonCatalog.tablePropertiesMetadata(); - Assertions.assertDoesNotThrow( - () -> { - PropertiesMetadataHelpers.validatePropertyForCreate(metadata, properties); - }); + () -> + PropertiesMetadataHelpers.validatePropertyForAlter( + metadata, properties, Collections.emptyMap())); + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + PropertiesMetadataHelpers.validatePropertyForAlter( + metadata, Collections.emptyMap(), properties)); } } } diff --git a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonBaseIT.java b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonBaseIT.java index b5ac224d973..45cd2464058 100644 --- a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonBaseIT.java +++ b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonBaseIT.java @@ -51,8 +51,10 @@ import org.apache.gravitino.exceptions.SchemaAlreadyExistsException; import org.apache.gravitino.exceptions.TableAlreadyExistsException; import org.apache.gravitino.integration.test.container.ContainerSuite; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.container.MySQLContainer; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; +import org.apache.gravitino.integration.test.util.TestDatabaseName; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.Table; import org.apache.gravitino.rel.TableCatalog; @@ -84,15 +86,26 @@ import org.junit.jupiter.api.Test; import org.junit.platform.commons.util.StringUtils; -public abstract class CatalogPaimonBaseIT extends AbstractIT { +public abstract class CatalogPaimonBaseIT extends BaseIT { protected static final ContainerSuite containerSuite = ContainerSuite.getInstance(); + protected static final TestDatabaseName TEST_DB_NAME = + TestDatabaseName.PG_TEST_ICEBERG_CATALOG_MULTIPLE_JDBC_LOAD; + protected static MySQLContainer mySQLContainer; protected String WAREHOUSE; protected String TYPE; + protected String URI; + protected String jdbcUser; + protected String jdbcPassword; + protected Catalog catalog; + protected org.apache.paimon.catalog.Catalog paimonCatalog; + protected String metalakeName = GravitinoITUtils.genRandomName("paimon_it_metalake"); + protected String catalogName = GravitinoITUtils.genRandomName("paimon_it_catalog"); + protected String schemaName = GravitinoITUtils.genRandomName("paimon_it_schema"); + protected static final String schema_comment = "schema_comment"; private static final String provider = "lakehouse-paimon"; private static final String catalog_comment = "catalog_comment"; - private static final String schema_comment = "schema_comment"; private static final String table_comment = "table_comment"; private static final String PAIMON_COL_NAME1 = "paimon_col_name1"; private static final String PAIMON_COL_NAME2 = "paimon_col_name2"; @@ -100,15 +113,9 @@ public abstract class CatalogPaimonBaseIT extends AbstractIT { private static final String PAIMON_COL_NAME4 = "paimon_col_name4"; private static final String PAIMON_COL_NAME5 = "paimon_col_name5"; private static final String alertTableName = "alert_table_name"; - private String metalakeName = GravitinoITUtils.genRandomName("paimon_it_metalake"); - private String catalogName = GravitinoITUtils.genRandomName("paimon_it_catalog"); - private String schemaName = GravitinoITUtils.genRandomName("paimon_it_schema"); - private String tableName = GravitinoITUtils.genRandomName("paimon_it_table"); private static String INSERT_BATCH_WITHOUT_PARTITION_TEMPLATE = "INSERT INTO paimon.%s VALUES %s"; private static final String SELECT_ALL_TEMPLATE = "SELECT * FROM paimon.%s"; private GravitinoMetalake metalake; - private Catalog catalog; - private org.apache.paimon.catalog.Catalog paimonCatalog; protected SparkSession spark; private Map catalogProperties; @@ -163,9 +170,7 @@ void testPaimonSchemaOperations() throws DatabaseNotExistException { // load schema check. Schema schema = schemas.loadSchema(schemaIdent.name()); - // database properties is empty for Paimon FilesystemCatalog. - Assertions.assertTrue(schema.properties().isEmpty()); - Assertions.assertTrue(paimonCatalog.loadDatabaseProperties(schemaIdent.name()).isEmpty()); + Assertions.assertEquals(testSchemaName, schema.name()); Map emptyMap = Collections.emptyMap(); Assertions.assertThrows( @@ -202,6 +207,7 @@ void testPaimonSchemaOperations() throws DatabaseNotExistException { @Test void testCreateTableWithNullComment() { + String tableName = GravitinoITUtils.genRandomName("paimon_table_with_null_comment"); Column[] columns = createColumns(); NameIdentifier tableIdentifier = NameIdentifier.of(schemaName, tableName); @@ -217,6 +223,8 @@ void testCreateTableWithNullComment() { @Test void testCreateAndLoadPaimonTable() throws org.apache.paimon.catalog.Catalog.TableNotExistException { + String tableName = GravitinoITUtils.genRandomName("create_and_load_paimon_table"); + // Create table from Gravitino API Column[] columns = createColumns(); @@ -301,6 +309,8 @@ void testCreateAndLoadPaimonTable() @Test void testCreateAndLoadPaimonPartitionedTable() throws org.apache.paimon.catalog.Catalog.TableNotExistException { + String tableName = GravitinoITUtils.genRandomName("create_and_load_paimon_partitioned_table"); + // Create table from Gravitino API Column[] columns = createColumns(); @@ -390,6 +400,8 @@ void testCreateAndLoadPaimonPartitionedTable() @Test void testCreateAndLoadPaimonPrimaryKeyTable() throws org.apache.paimon.catalog.Catalog.TableNotExistException { + String tableName = GravitinoITUtils.genRandomName("create_and_load_paimon_primary_key_table"); + // Create table from Gravitino API Column[] columns = createColumns(); ArrayList newColumns = new ArrayList<>(Arrays.asList(columns)); @@ -615,6 +627,8 @@ void testListAndDropPaimonTable() throws DatabaseNotExistException { @Test public void testAlterPaimonTable() { + String tableName = GravitinoITUtils.genRandomName("alter_paimon_table"); + Column[] columns = createColumns(); catalog .asTableCatalog() @@ -716,7 +730,7 @@ public void testAlterPaimonTable() { Column[] newColumns = new Column[] {col1, col2, col3}; NameIdentifier tableIdentifier = - NameIdentifier.of(schemaName, GravitinoITUtils.genRandomName("PaimonAlterTableIT")); + NameIdentifier.of(schemaName, GravitinoITUtils.genRandomName("new_alter_paimon_table")); catalog .asTableCatalog() .createTable( @@ -857,9 +871,9 @@ void testOperationDataOfPaimonTable() { } private void clearTableAndSchema() { - if (catalog.asSchemas().schemaExists(schemaName)) { - catalog.asSchemas().dropSchema(schemaName, true); - } + SupportsSchemas supportsSchema = catalog.asSchemas(); + Arrays.stream(supportsSchema.listSchemas()) + .forEach(schema -> supportsSchema.dropSchema(schema, true)); } private void createMetalake() { @@ -903,10 +917,8 @@ private void createSchema() { prop.put("key2", "val2"); Schema createdSchema = catalog.asSchemas().createSchema(ident.name(), schema_comment, prop); - // database properties is empty for Paimon FilesystemCatalog. Schema loadSchema = catalog.asSchemas().loadSchema(ident.name()); Assertions.assertEquals(createdSchema.name(), loadSchema.name()); - Assertions.assertTrue(loadSchema.properties().isEmpty()); } private Column[] createColumns() { diff --git a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonFileSystemIT.java b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonFileSystemIT.java index c4717b43eee..442fe331b84 100644 --- a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonFileSystemIT.java +++ b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonFileSystemIT.java @@ -20,13 +20,18 @@ import com.google.common.collect.Maps; import java.util.Map; +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.Schema; +import org.apache.gravitino.SupportsSchemas; import org.apache.gravitino.catalog.lakehouse.paimon.PaimonCatalogPropertiesMetadata; import org.apache.gravitino.integration.test.container.HiveContainer; +import org.apache.gravitino.integration.test.util.GravitinoITUtils; +import org.apache.paimon.catalog.Catalog; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.Test; @Tag("gravitino-docker-test") -@TestInstance(TestInstance.Lifecycle.PER_CLASS) public class CatalogPaimonFileSystemIT extends CatalogPaimonBaseIT { @Override @@ -48,4 +53,22 @@ protected Map initPaimonCatalogProperties() { return catalogProperties; } + + @Test + void testPaimonSchemaProperties() throws Catalog.DatabaseNotExistException { + SupportsSchemas schemas = catalog.asSchemas(); + + // create schema. + String testSchemaName = GravitinoITUtils.genRandomName("test_schema_1"); + NameIdentifier schemaIdent = NameIdentifier.of(metalakeName, catalogName, testSchemaName); + Map schemaProperties = Maps.newHashMap(); + schemaProperties.put("key1", "val1"); + schemaProperties.put("key2", "val2"); + schemas.createSchema(schemaIdent.name(), schema_comment, schemaProperties); + + // load schema check, database properties is empty for Paimon FilesystemCatalog. + Schema schema = schemas.loadSchema(schemaIdent.name()); + Assertions.assertTrue(schema.properties().isEmpty()); + Assertions.assertTrue(paimonCatalog.loadDatabaseProperties(schemaIdent.name()).isEmpty()); + } } diff --git a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonJdbcIT.java b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonJdbcIT.java new file mode 100644 index 00000000000..d252b901d3b --- /dev/null +++ b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonJdbcIT.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.catalog.lakehouse.paimon.integration.test; + +import com.google.common.collect.Maps; +import java.util.Map; +import org.apache.gravitino.catalog.lakehouse.paimon.PaimonCatalogPropertiesMetadata; +import org.apache.gravitino.integration.test.container.HiveContainer; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestInstance; + +@Tag("gravitino-docker-test") +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public class CatalogPaimonJdbcIT extends CatalogPaimonBaseIT { + + @Override + protected Map initPaimonCatalogProperties() { + containerSuite.startMySQLContainer(TEST_DB_NAME); + mySQLContainer = containerSuite.getMySQLContainer(); + + Map catalogProperties = Maps.newHashMap(); + catalogProperties.put("key1", "val1"); + catalogProperties.put("key2", "val2"); + + TYPE = "jdbc"; + WAREHOUSE = + String.format( + "hdfs://%s:%d/user/hive/warehouse-catalog-paimon/", + containerSuite.getHiveContainer().getContainerIpAddress(), + HiveContainer.HDFS_DEFAULTFS_PORT); + URI = mySQLContainer.getJdbcUrl(TEST_DB_NAME); + jdbcUser = mySQLContainer.getUsername(); + jdbcPassword = mySQLContainer.getPassword(); + + catalogProperties.put(PaimonCatalogPropertiesMetadata.GRAVITINO_CATALOG_BACKEND, TYPE); + catalogProperties.put(PaimonCatalogPropertiesMetadata.WAREHOUSE, WAREHOUSE); + catalogProperties.put(PaimonCatalogPropertiesMetadata.URI, URI); + catalogProperties.put(PaimonCatalogPropertiesMetadata.GRAVITINO_JDBC_USER, jdbcUser); + catalogProperties.put(PaimonCatalogPropertiesMetadata.GRAVITINO_JDBC_PASSWORD, jdbcPassword); + + return catalogProperties; + } +} diff --git a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonKerberosFilesystemIT.java b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonKerberosFilesystemIT.java index f9f31ceadfe..cf00cf5ffdb 100644 --- a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonKerberosFilesystemIT.java +++ b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonKerberosFilesystemIT.java @@ -40,7 +40,7 @@ import org.apache.gravitino.client.KerberosTokenProvider; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.Table; @@ -56,13 +56,11 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @Tag("gravitino-docker-test") -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -public class CatalogPaimonKerberosFilesystemIT extends AbstractIT { +public class CatalogPaimonKerberosFilesystemIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(CatalogPaimonKerberosFilesystemIT.class); @@ -99,7 +97,7 @@ public class CatalogPaimonKerberosFilesystemIT extends AbstractIT { private static final String FILESYSTEM_COL_NAME3 = "col3"; @BeforeAll - public static void startIntegrationTest() { + public void startIntegrationTest() { containerSuite.startKerberosHiveContainer(); kerberosHiveContainer = containerSuite.getKerberosHiveContainer(); @@ -122,14 +120,14 @@ public static void startIntegrationTest() { addKerberosConfig(); // Start Gravitino server - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); } catch (Exception e) { throw new RuntimeException(e); } } @AfterAll - public static void stop() { + public void stop() { // Reset the UGI UserGroupInformation.reset(); @@ -138,7 +136,7 @@ public static void stop() { System.clearProperty("java.security.krb5.conf"); System.clearProperty("sun.security.krb5.debug"); - AbstractIT.client = null; + client = null; } private static void prepareKerberosConfig() throws Exception { @@ -194,14 +192,12 @@ private static void refreshKerberosConfig() { } } - private static void addKerberosConfig() { - AbstractIT.customConfigs.put(Configs.AUTHENTICATORS.getKey(), "kerberos"); - AbstractIT.customConfigs.put( - "gravitino.authenticator.kerberos.principal", GRAVITINO_SERVER_PRINCIPAL); - AbstractIT.customConfigs.put( - "gravitino.authenticator.kerberos.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); - AbstractIT.customConfigs.put(SDK_KERBEROS_KEYTAB_KEY, TMP_DIR + GRAVITINO_CLIENT_KEYTAB); - AbstractIT.customConfigs.put(SDK_KERBEROS_PRINCIPAL_KEY, GRAVITINO_CLIENT_PRINCIPAL); + private void addKerberosConfig() { + customConfigs.put(Configs.AUTHENTICATORS.getKey(), "kerberos"); + customConfigs.put("gravitino.authenticator.kerberos.principal", GRAVITINO_SERVER_PRINCIPAL); + customConfigs.put("gravitino.authenticator.kerberos.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); + customConfigs.put(SDK_KERBEROS_KEYTAB_KEY, TMP_DIR + GRAVITINO_CLIENT_KEYTAB); + customConfigs.put(SDK_KERBEROS_PRINCIPAL_KEY, GRAVITINO_CLIENT_PRINCIPAL); } @Test diff --git a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonS3IT.java b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonS3IT.java index f3786b391ef..a435de4b573 100644 --- a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonS3IT.java +++ b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonS3IT.java @@ -29,12 +29,10 @@ import org.apache.gravitino.storage.S3Properties; import org.apache.spark.sql.SparkSession; import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.TestInstance; import org.testcontainers.containers.Container; import org.testcontainers.shaded.org.awaitility.Awaitility; @Tag("gravitino-docker-test") -@TestInstance(TestInstance.Lifecycle.PER_CLASS) public class CatalogPaimonS3IT extends CatalogPaimonBaseIT { private static final String S3_BUCKET_NAME = "my-test-bucket"; diff --git a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/utils/TestCatalogUtils.java b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/utils/TestCatalogUtils.java index 8072c165478..e8fe66551ba 100644 --- a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/utils/TestCatalogUtils.java +++ b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/utils/TestCatalogUtils.java @@ -32,6 +32,7 @@ import org.apache.paimon.catalog.Catalog; import org.apache.paimon.catalog.FileSystemCatalog; import org.apache.paimon.factories.FactoryException; +import org.apache.paimon.jdbc.JdbcCatalog; import org.junit.jupiter.api.Test; /** Tests for {@link org.apache.gravitino.catalog.lakehouse.paimon.utils.CatalogUtils}. */ @@ -41,6 +42,8 @@ public class TestCatalogUtils { void testLoadCatalogBackend() throws Exception { // Test load FileSystemCatalog for filesystem metastore. assertCatalog(PaimonCatalogBackend.FILESYSTEM.name(), FileSystemCatalog.class); + // Test load JdbcCatalog for jdbc metastore. + assertCatalog(PaimonCatalogBackend.JDBC.name(), JdbcCatalog.class); // Test load catalog exception for other metastore. assertThrowsExactly(FactoryException.class, () -> assertCatalog("other", catalog -> {})); } @@ -63,7 +66,11 @@ private void assertCatalog(String metastore, Consumer consumer) throws System.getProperty("java.io.tmpdir"), "paimon_catalog_warehouse"), PaimonConfig.CATALOG_URI.getKey(), - "uri"))) + "jdbc:h2:mem:testdb", + PaimonConfig.CATALOG_JDBC_USER.getKey(), + "user", + PaimonConfig.CATALOG_JDBC_PASSWORD.getKey(), + "password"))) .getCatalog()) { consumer.accept(catalog); } diff --git a/catalogs/hive-metastore-common/build.gradle.kts b/catalogs/hive-metastore-common/build.gradle.kts index 0f023acb377..539c8291dd1 100644 --- a/catalogs/hive-metastore-common/build.gradle.kts +++ b/catalogs/hive-metastore-common/build.gradle.kts @@ -45,7 +45,7 @@ dependencies { exclude("com.google.code.findbugs", "sr305") exclude("com.tdunning", "json") exclude("com.zaxxer", "HikariCP") - exclude("io.dropwizard.metricss") + exclude("io.dropwizard.metrics") exclude("javax.transaction", "transaction-api") exclude("org.apache.ant") exclude("org.apache.avro") diff --git a/clients/client-java/src/main/java/org/apache/gravitino/client/ErrorHandlers.java b/clients/client-java/src/main/java/org/apache/gravitino/client/ErrorHandlers.java index 5ab440092ad..ca50af1b9ac 100644 --- a/clients/client-java/src/main/java/org/apache/gravitino/client/ErrorHandlers.java +++ b/clients/client-java/src/main/java/org/apache/gravitino/client/ErrorHandlers.java @@ -30,6 +30,7 @@ import org.apache.gravitino.exceptions.CatalogAlreadyExistsException; import org.apache.gravitino.exceptions.ConnectionFailedException; import org.apache.gravitino.exceptions.FilesetAlreadyExistsException; +import org.apache.gravitino.exceptions.ForbiddenException; import org.apache.gravitino.exceptions.GroupAlreadyExistsException; import org.apache.gravitino.exceptions.IllegalPrivilegeException; import org.apache.gravitino.exceptions.MetalakeAlreadyExistsException; @@ -303,9 +304,13 @@ public void accept(ErrorResponse errorResponse) { case ErrorConstants.INTERNAL_ERROR_CODE: throw new RuntimeException(errorMessage); + case ErrorConstants.UNSUPPORTED_OPERATION_CODE: throw new UnsupportedOperationException(errorMessage); + case ErrorConstants.FORBIDDEN_CODE: + throw new ForbiddenException(errorMessage); + default: super.accept(errorResponse); } @@ -343,6 +348,9 @@ public void accept(ErrorResponse errorResponse) { case ErrorConstants.UNSUPPORTED_OPERATION_CODE: throw new UnsupportedOperationException(errorMessage); + case ErrorConstants.FORBIDDEN_CODE: + throw new ForbiddenException(errorMessage); + case ErrorConstants.INTERNAL_ERROR_CODE: throw new RuntimeException(errorMessage); @@ -380,6 +388,9 @@ public void accept(ErrorResponse errorResponse) { case ErrorConstants.ALREADY_EXISTS_CODE: throw new CatalogAlreadyExistsException(errorMessage); + case ErrorConstants.FORBIDDEN_CODE: + throw new ForbiddenException(errorMessage); + case ErrorConstants.INTERNAL_ERROR_CODE: throw new RuntimeException(errorMessage); @@ -495,6 +506,9 @@ public void accept(ErrorResponse errorResponse) { case ErrorConstants.ALREADY_EXISTS_CODE: throw new FilesetAlreadyExistsException(errorMessage); + case ErrorConstants.FORBIDDEN_CODE: + throw new ForbiddenException(errorMessage); + case ErrorConstants.INTERNAL_ERROR_CODE: throw new RuntimeException(errorMessage); @@ -530,6 +544,9 @@ public void accept(ErrorResponse errorResponse) { case ErrorConstants.ALREADY_EXISTS_CODE: throw new TopicAlreadyExistsException(errorMessage); + case ErrorConstants.FORBIDDEN_CODE: + throw new ForbiddenException(errorMessage); + case ErrorConstants.INTERNAL_ERROR_CODE: throw new RuntimeException(errorMessage); @@ -652,6 +669,9 @@ public void accept(ErrorResponse errorResponse) { case ErrorConstants.UNSUPPORTED_OPERATION_CODE: throw new UnsupportedOperationException(errorMessage); + case ErrorConstants.FORBIDDEN_CODE: + throw new ForbiddenException(errorMessage); + case ErrorConstants.INTERNAL_ERROR_CODE: throw new RuntimeException(errorMessage); diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/AuditIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/AuditIT.java index a7ab09033d9..c438e0fca1e 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/AuditIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/AuditIT.java @@ -26,22 +26,22 @@ import org.apache.gravitino.MetalakeChange; import org.apache.gravitino.auth.AuthenticatorType; import org.apache.gravitino.client.GravitinoMetalake; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.utils.RandomNameUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -public class AuditIT extends AbstractIT { +public class AuditIT extends BaseIT { private static final String expectUser = System.getProperty("user.name"); @BeforeAll - public static void startIntegrationTest() throws Exception { + public void startIntegrationTest() throws Exception { Map configs = Maps.newHashMap(); configs.put(Configs.AUTHENTICATORS.getKey(), AuthenticatorType.SIMPLE.name().toLowerCase()); registerCustomConfigs(configs); - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); } @Test diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/CatalogIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/CatalogIT.java index 35783173b5c..609a1b4612f 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/CatalogIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/CatalogIT.java @@ -31,7 +31,7 @@ import org.apache.gravitino.exceptions.CatalogAlreadyExistsException; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; @@ -43,7 +43,7 @@ import org.slf4j.LoggerFactory; @Tag("gravitino-docker-test") -public class CatalogIT extends AbstractIT { +public class CatalogIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(CatalogIT.class); @@ -56,7 +56,7 @@ public class CatalogIT extends AbstractIT { private static String hmsUri; @BeforeAll - public static void startUp() { + public void startUp() { containerSuite.startHiveContainer(); hmsUri = String.format( @@ -70,7 +70,7 @@ public static void startUp() { } @AfterAll - public static void tearDown() { + public void tearDown() { client.dropMetalake(metalakeName); if (client != null) { diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/MetalakeIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/MetalakeIT.java index 7f6ed7f236e..fb9efd2ca7f 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/MetalakeIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/MetalakeIT.java @@ -33,7 +33,7 @@ import org.apache.gravitino.exceptions.IllegalNameIdentifierException; import org.apache.gravitino.exceptions.MetalakeAlreadyExistsException; import org.apache.gravitino.exceptions.NoSuchMetalakeException; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.utils.RandomNameUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; @@ -43,7 +43,7 @@ import org.junit.jupiter.api.TestMethodOrder; @TestMethodOrder(MethodOrderer.OrderAnnotation.class) -public class MetalakeIT extends AbstractIT { +public class MetalakeIT extends BaseIT { public static String metalakeNameA = RandomNameUtils.genRandomName("metalakeA"); public static String metalakeNameB = RandomNameUtils.genRandomName("metalakeB"); diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java index 8d91c369d81..0cb1f9313bf 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java @@ -35,7 +35,7 @@ import org.apache.gravitino.exceptions.TagAlreadyExistsException; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.Table; @@ -49,7 +49,7 @@ import org.junit.jupiter.api.Test; @org.junit.jupiter.api.Tag("gravitino-docker-test") -public class TagIT extends AbstractIT { +public class TagIT extends BaseIT { private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); @@ -61,7 +61,7 @@ public class TagIT extends AbstractIT { private static Table table; @BeforeAll - public static void setUp() { + public void setUp() { containerSuite.startHiveContainer(); String hmsUri = String.format( @@ -108,7 +108,7 @@ public static void setUp() { } @AfterAll - public static void tearDown() { + public void tearDown() { relationalCatalog.asTableCatalog().dropTable(NameIdentifier.of(schema.name(), table.name())); relationalCatalog.asSchemas().dropSchema(schema.name(), true); metalake.dropCatalog(relationalCatalog.name()); diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/VersionOperationsIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/VersionOperationsIT.java index 9a50d4dd293..61b1869cc4d 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/VersionOperationsIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/VersionOperationsIT.java @@ -19,12 +19,12 @@ package org.apache.gravitino.client.integration.test; import org.apache.gravitino.client.GravitinoVersion; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.ITUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; -public class VersionOperationsIT extends AbstractIT { +public class VersionOperationsIT extends BaseIT { @Test public void testGetVersion() { GravitinoVersion gravitinoVersion = client.serverVersion(); diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/AccessControlIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/AccessControlIT.java index e62cebcfdbd..685f465970b 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/AccessControlIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/AccessControlIT.java @@ -49,24 +49,24 @@ import org.apache.gravitino.exceptions.NoSuchUserException; import org.apache.gravitino.exceptions.UserAlreadyExistsException; import org.apache.gravitino.file.Fileset; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.utils.RandomNameUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -public class AccessControlIT extends AbstractIT { +public class AccessControlIT extends BaseIT { private static String metalakeName = RandomNameUtils.genRandomName("metalake"); private static GravitinoMetalake metalake; @BeforeAll - public static void startIntegrationTest() throws Exception { + public void startIntegrationTest() throws Exception { Map configs = Maps.newHashMap(); configs.put(Configs.ENABLE_AUTHORIZATION.getKey(), String.valueOf(true)); configs.put(Configs.SERVICE_ADMINS.getKey(), AuthConstants.ANONYMOUS_USER); registerCustomConfigs(configs); - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); metalake = client.createMetalake(metalakeName, "metalake comment", Collections.emptyMap()); Catalog filesetCatalog = diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/AccessControlNotAllowIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/AccessControlNotAllowIT.java index 22986458ba6..a6817b27418 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/AccessControlNotAllowIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/AccessControlNotAllowIT.java @@ -26,12 +26,12 @@ import org.apache.gravitino.authorization.Privileges; import org.apache.gravitino.authorization.SecurableObjects; import org.apache.gravitino.client.GravitinoMetalake; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.utils.RandomNameUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; -public class AccessControlNotAllowIT extends AbstractIT { +public class AccessControlNotAllowIT extends BaseIT { public static String metalakeTestName = RandomNameUtils.genRandomName("test"); diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/CheckCurrentUserIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/CheckCurrentUserIT.java new file mode 100644 index 00000000000..0b87eab8dbe --- /dev/null +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/CheckCurrentUserIT.java @@ -0,0 +1,273 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.client.integration.test.authorization; + +import static org.apache.gravitino.server.GravitinoServer.WEBSERVER_CONF_PREFIX; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import java.util.Collections; +import java.util.Map; +import org.apache.gravitino.Catalog; +import org.apache.gravitino.Configs; +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.auth.AuthConstants; +import org.apache.gravitino.authorization.Privileges; +import org.apache.gravitino.authorization.SecurableObject; +import org.apache.gravitino.authorization.SecurableObjects; +import org.apache.gravitino.client.GravitinoAdminClient; +import org.apache.gravitino.client.GravitinoMetalake; +import org.apache.gravitino.exceptions.ForbiddenException; +import org.apache.gravitino.file.Fileset; +import org.apache.gravitino.integration.test.container.ContainerSuite; +import org.apache.gravitino.integration.test.container.HiveContainer; +import org.apache.gravitino.integration.test.container.KafkaContainer; +import org.apache.gravitino.integration.test.util.BaseIT; +import org.apache.gravitino.rel.Column; +import org.apache.gravitino.rel.types.Types; +import org.apache.gravitino.server.web.JettyServerConfig; +import org.apache.gravitino.utils.RandomNameUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Tag("gravitino-docker-test") +public class CheckCurrentUserIT extends BaseIT { + + private static final Logger LOG = LoggerFactory.getLogger(CheckCurrentUserIT.class); + private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); + private static String hmsUri; + private static String kafkaBootstrapServers; + private static GravitinoMetalake metalake; + private static GravitinoMetalake anotherMetalake; + private static String metalakeName = RandomNameUtils.genRandomName("metalake"); + + @BeforeAll + public void startIntegrationTest() throws Exception { + Map configs = Maps.newHashMap(); + configs.put(Configs.ENABLE_AUTHORIZATION.getKey(), String.valueOf(true)); + configs.put(Configs.SERVICE_ADMINS.getKey(), AuthConstants.ANONYMOUS_USER); + registerCustomConfigs(configs); + super.startIntegrationTest(); + + containerSuite.startHiveContainer(); + hmsUri = + String.format( + "thrift://%s:%d", + containerSuite.getHiveContainer().getContainerIpAddress(), + HiveContainer.HIVE_METASTORE_PORT); + + containerSuite.startKafkaContainer(); + kafkaBootstrapServers = + String.format( + "%s:%d", + containerSuite.getKafkaContainer().getContainerIpAddress(), + KafkaContainer.DEFAULT_BROKER_PORT); + + JettyServerConfig jettyServerConfig = + JettyServerConfig.fromConfig(serverConfig, WEBSERVER_CONF_PREFIX); + + String uri = "http://" + jettyServerConfig.getHost() + ":" + jettyServerConfig.getHttpPort(); + System.setProperty("user.name", "test"); + GravitinoAdminClient anotherClient = GravitinoAdminClient.builder(uri).withSimpleAuth().build(); + + metalake = client.createMetalake(metalakeName, "metalake comment", Collections.emptyMap()); + anotherMetalake = anotherClient.loadMetalake(metalakeName); + } + + @AfterAll + public void tearDown() { + if (client != null) { + client.dropMetalake(metalakeName); + client.close(); + client = null; + } + + try { + closer.close(); + } catch (Exception e) { + LOG.error("Exception in closing CloseableGroup", e); + } + } + + @Test + public void testCreateTopic() { + String catalogName = RandomNameUtils.genRandomName("catalogA"); + + Map properties = Maps.newHashMap(); + properties.put("bootstrap.servers", kafkaBootstrapServers); + + // Test to create catalog with not-existed user + Assertions.assertThrows( + ForbiddenException.class, + () -> + anotherMetalake.createCatalog( + catalogName, Catalog.Type.MESSAGING, "kafka", "comment", properties)); + + Catalog catalog = + metalake.createCatalog(catalogName, Catalog.Type.MESSAGING, "kafka", "comment", properties); + + // Test to create topic with not-existed user + metalake.addUser("test"); + Catalog anotherCatalog = anotherMetalake.loadCatalog(catalogName); + metalake.removeUser("test"); + NameIdentifier topicIdent = NameIdentifier.of("default", "topic"); + Assertions.assertThrows( + ForbiddenException.class, + () -> + anotherCatalog + .asTopicCatalog() + .createTopic(topicIdent, "comment", null, Collections.emptyMap())); + + Assertions.assertDoesNotThrow( + () -> + catalog + .asTopicCatalog() + .createTopic(topicIdent, "comment", null, Collections.emptyMap())); + catalog.asTopicCatalog().dropTopic(topicIdent); + + metalake.dropCatalog(catalogName); + } + + @Test + public void testCreateFileset() { + String catalogName = RandomNameUtils.genRandomName("catalog"); + // Test to create a fileset with a not-existed user + Assertions.assertThrows( + ForbiddenException.class, + () -> + anotherMetalake.createCatalog( + catalogName, Catalog.Type.FILESET, "hadoop", "comment", Collections.emptyMap())); + + Catalog catalog = + metalake.createCatalog( + catalogName, Catalog.Type.FILESET, "hadoop", "comment", Collections.emptyMap()); + + // Test to create a schema with a not-existed user + Catalog anotherCatalog = anotherMetalake.loadCatalog(catalogName); + Assertions.assertThrows( + ForbiddenException.class, + () -> anotherCatalog.asSchemas().createSchema("schema", "comment", Collections.emptyMap())); + + catalog.asSchemas().createSchema("schema", "comment", Collections.emptyMap()); + + // Test to create a fileset with a not-existed user + NameIdentifier fileIdent = NameIdentifier.of("schema", "fileset"); + Assertions.assertThrows( + ForbiddenException.class, + () -> + anotherCatalog + .asFilesetCatalog() + .createFileset( + fileIdent, "comment", Fileset.Type.EXTERNAL, "tmp", Collections.emptyMap())); + + Assertions.assertDoesNotThrow( + () -> + catalog + .asFilesetCatalog() + .createFileset( + fileIdent, "comment", Fileset.Type.EXTERNAL, "tmp", Collections.emptyMap())); + + // Clean up + catalog.asFilesetCatalog().dropFileset(fileIdent); + catalog.asSchemas().dropSchema("schema", true); + metalake.dropCatalog(catalogName); + } + + @Test + public void testCreateRole() { + SecurableObject metalakeSecObject = + SecurableObjects.ofMetalake( + metalakeName, Lists.newArrayList(Privileges.CreateCatalog.allow())); + Assertions.assertThrows( + ForbiddenException.class, + () -> + anotherMetalake.createRole( + "role", Collections.emptyMap(), Lists.newArrayList(metalakeSecObject))); + + Assertions.assertDoesNotThrow( + () -> + metalake.createRole( + "role", Collections.emptyMap(), Lists.newArrayList(metalakeSecObject))); + metalake.deleteRole("role"); + } + + @Test + public void testCreateTable() { + String catalogName = RandomNameUtils.genRandomName("catalog"); + Map properties = Maps.newHashMap(); + properties.put("metastore.uris", hmsUri); + + // Test to create catalog with not-existed user + Assertions.assertThrows( + ForbiddenException.class, + () -> + anotherMetalake.createCatalog( + catalogName, Catalog.Type.RELATIONAL, "hive", "catalog comment", properties)); + Catalog catalog = + metalake.createCatalog( + catalogName, Catalog.Type.RELATIONAL, "hive", "catalog comment", properties); + + // Test to create schema with not-existed user + Catalog anotherCatalog = anotherMetalake.loadCatalog(catalogName); + Assertions.assertThrows( + ForbiddenException.class, + () -> anotherCatalog.asSchemas().createSchema("schema", "comment", Collections.emptyMap())); + + catalog.asSchemas().createSchema("schema", "comment", Collections.emptyMap()); + + // Test to create table with not-existed user + NameIdentifier tableIdent = NameIdentifier.of("schema", "table"); + Assertions.assertThrows( + ForbiddenException.class, + () -> + anotherCatalog + .asTableCatalog() + .createTable( + tableIdent, + new Column[] { + Column.of("col1", Types.IntegerType.get()), + Column.of("col2", Types.StringType.get()) + }, + "comment", + Collections.emptyMap())); + + Assertions.assertDoesNotThrow( + () -> + catalog + .asTableCatalog() + .createTable( + tableIdent, + new Column[] { + Column.of("col1", Types.IntegerType.get()), + Column.of("col2", Types.StringType.get()) + }, + "comment", + Collections.emptyMap())); + + // Clean up + catalog.asTableCatalog().dropTable(tableIdent); + catalog.asSchemas().dropSchema("schema", true); + metalake.dropCatalog(catalogName); + } +} diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/KerberosOperationsIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/KerberosOperationsIT.java index 3ba68e1803c..2cf313925ce 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/KerberosOperationsIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/KerberosOperationsIT.java @@ -34,7 +34,7 @@ import org.apache.gravitino.client.GravitinoAdminClient; import org.apache.gravitino.client.GravitinoVersion; import org.apache.gravitino.client.KerberosTokenProvider; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.ITUtils; import org.apache.hadoop.minikdc.KerberosSecurityTestcase; import org.junit.jupiter.api.AfterAll; @@ -43,7 +43,7 @@ import org.junit.jupiter.api.Test; import org.testcontainers.shaded.com.google.common.util.concurrent.Uninterruptibles; -public class KerberosOperationsIT extends AbstractIT { +public class KerberosOperationsIT extends BaseIT { private static final KerberosSecurityTestcase kdc = new KerberosSecurityTestcase() { @@ -64,8 +64,12 @@ public void createMiniKdcConf() { private static final String clientPrincipal = "client@EXAMPLE.COM"; + public void setGravitinoAdminClient(GravitinoAdminClient client) { + this.client = client; + } + @BeforeAll - public static void startIntegrationTest() throws Exception { + public void startIntegrationTest() throws Exception { kdc.startMiniKdc(); initKeyTab(); @@ -78,7 +82,7 @@ public static void startIntegrationTest() throws Exception { registerCustomConfigs(configs); - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); client = GravitinoAdminClient.builder(serverUri) @@ -91,8 +95,8 @@ public static void startIntegrationTest() throws Exception { } @AfterAll - public static void stopIntegrationTest() throws IOException, InterruptedException { - AbstractIT.stopIntegrationTest(); + public void stopIntegrationTest() throws IOException, InterruptedException { + super.stopIntegrationTest(); kdc.stopMiniKdc(); } diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/MultiAuthOperationsIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/MultiAuthOperationsIT.java index f0df92b6303..01628d95d19 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/MultiAuthOperationsIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/MultiAuthOperationsIT.java @@ -41,7 +41,7 @@ import org.apache.gravitino.client.GravitinoAdminClient; import org.apache.gravitino.client.GravitinoVersion; import org.apache.gravitino.client.KerberosTokenProvider; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.ITUtils; import org.apache.gravitino.integration.test.util.OAuthMockDataProvider; import org.apache.gravitino.server.authentication.OAuthConfig; @@ -55,7 +55,7 @@ import org.testcontainers.shaded.com.google.common.util.concurrent.Uninterruptibles; @Tag("gravitino-docker-test") -public class MultiAuthOperationsIT extends AbstractIT { +public class MultiAuthOperationsIT extends BaseIT { private static final KerberosSecurityTestcase kdc = new KerberosSecurityTestcase() { @Override @@ -73,7 +73,7 @@ public void createMiniKdcConf() { private static GravitinoAdminClient kerberosClient; @BeforeAll - public static void startIntegrationTest() throws Exception { + public void startIntegrationTest() throws Exception { Map configs = Maps.newHashMap(); configs.put( Configs.AUTHENTICATORS.getKey(), @@ -86,7 +86,7 @@ public static void startIntegrationTest() throws Exception { configKerberos(configs); registerCustomConfigs(configs); - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); oauthClient = GravitinoAdminClient.builder(serverUri) @@ -107,8 +107,8 @@ public static void startIntegrationTest() throws Exception { } @AfterAll - public static void stopIntegrationTest() throws IOException, InterruptedException { - AbstractIT.stopIntegrationTest(); + public void stopIntegrationTest() throws IOException, InterruptedException { + super.stopIntegrationTest(); kdc.stopMiniKdc(); } @@ -121,7 +121,10 @@ public void testMultiAuthenticationSuccess() throws Exception { final String gitCommitId = readGitCommitIdFromGitFile(); Assertions.assertEquals(gitCommitId, gravitinoVersion1.gitCommit()); } - new KerberosOperationsIT().testAuthenticationApi(); + + KerberosOperationsIT kerberosOperationsIT = new KerberosOperationsIT(); + kerberosOperationsIT.setGravitinoAdminClient(client); + kerberosOperationsIT.testAuthenticationApi(); GravitinoVersion gravitinoVersion2 = kerberosClient.serverVersion(); Assertions.assertEquals(System.getenv("PROJECT_VERSION"), gravitinoVersion2.version()); diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/OAuth2OperationsIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/OAuth2OperationsIT.java index d78586fa0a0..b8e769547d8 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/OAuth2OperationsIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/OAuth2OperationsIT.java @@ -30,7 +30,7 @@ import org.apache.gravitino.Configs; import org.apache.gravitino.auth.AuthenticatorType; import org.apache.gravitino.client.GravitinoVersion; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.ITUtils; import org.apache.gravitino.integration.test.util.OAuthMockDataProvider; import org.apache.gravitino.server.authentication.OAuthConfig; @@ -38,7 +38,7 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -public class OAuth2OperationsIT extends AbstractIT { +public class OAuth2OperationsIT extends BaseIT { private static final KeyPair keyPair = Keys.keyPairFor(SignatureAlgorithm.RS256); private static final String publicKey = @@ -49,7 +49,7 @@ public class OAuth2OperationsIT extends AbstractIT { @SuppressWarnings("JavaUtilDate") @BeforeAll - public static void startIntegrationTest() throws Exception { + public void startIntegrationTest() throws Exception { Map configs = Maps.newHashMap(); token = Jwts.builder() @@ -68,7 +68,7 @@ public static void startIntegrationTest() throws Exception { registerCustomConfigs(configs); OAuthMockDataProvider mockDataProvider = OAuthMockDataProvider.getInstance(); mockDataProvider.setTokenData(token.getBytes(StandardCharsets.UTF_8)); - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); } @Test diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/OwnerIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/OwnerIT.java index ca9d96b8b10..877d891737f 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/OwnerIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/OwnerIT.java @@ -39,7 +39,7 @@ import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; import org.apache.gravitino.integration.test.container.KafkaContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.types.Types; import org.apache.gravitino.utils.RandomNameUtils; @@ -52,7 +52,7 @@ import org.slf4j.LoggerFactory; @Tag("gravitino-docker-test") -public class OwnerIT extends AbstractIT { +public class OwnerIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(OwnerIT.class); @@ -61,12 +61,12 @@ public class OwnerIT extends AbstractIT { private static String kafkaBootstrapServers; @BeforeAll - public static void startIntegrationTest() throws Exception { + public void startIntegrationTest() throws Exception { Map configs = Maps.newHashMap(); configs.put(Configs.ENABLE_AUTHORIZATION.getKey(), String.valueOf(true)); configs.put(Configs.SERVICE_ADMINS.getKey(), AuthConstants.ANONYMOUS_USER); registerCustomConfigs(configs); - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); containerSuite.startHiveContainer(); hmsUri = @@ -84,7 +84,7 @@ public static void startIntegrationTest() throws Exception { } @AfterAll - public static void tearDown() { + public void tearDown() { if (client != null) { client.close(); client = null; diff --git a/clients/filesystem-hadoop3/build.gradle.kts b/clients/filesystem-hadoop3/build.gradle.kts index d7905cd3b35..cae1888185a 100644 --- a/clients/filesystem-hadoop3/build.gradle.kts +++ b/clients/filesystem-hadoop3/build.gradle.kts @@ -26,6 +26,10 @@ plugins { dependencies { compileOnly(project(":clients:client-java-runtime", configuration = "shadow")) compileOnly(libs.hadoop3.common) + implementation(project(":catalogs:catalog-hadoop")) { + exclude(group = "*") + } + implementation(libs.caffeine) testImplementation(project(":api")) @@ -35,6 +39,7 @@ dependencies { testImplementation(project(":server-common")) testImplementation(project(":clients:client-java")) testImplementation(project(":integration-test-common", "testArtifacts")) + testImplementation(project(":bundles:gcp-bundle")) testImplementation(libs.awaitility) testImplementation(libs.bundles.jetty) testImplementation(libs.bundles.jersey) @@ -71,6 +76,11 @@ tasks.build { dependsOn("javadoc") } +tasks.compileJava { + dependsOn(":catalogs:catalog-hadoop:jar") + dependsOn(":catalogs:catalog-hadoop:runtimeJars") +} + tasks.test { val skipITs = project.hasProperty("skipITs") if (skipITs) { diff --git a/clients/filesystem-hadoop3/src/main/java/org/apache/gravitino/filesystem/hadoop/GravitinoVirtualFileSystem.java b/clients/filesystem-hadoop3/src/main/java/org/apache/gravitino/filesystem/hadoop/GravitinoVirtualFileSystem.java index de0eb758edc..05e769667da 100644 --- a/clients/filesystem-hadoop3/src/main/java/org/apache/gravitino/filesystem/hadoop/GravitinoVirtualFileSystem.java +++ b/clients/filesystem-hadoop3/src/main/java/org/apache/gravitino/filesystem/hadoop/GravitinoVirtualFileSystem.java @@ -18,6 +18,9 @@ */ package org.apache.gravitino.filesystem.hadoop; +import static org.apache.gravitino.filesystem.hadoop.GravitinoVirtualFileSystemConfiguration.FS_FILESYSTEM_PROVIDERS; +import static org.apache.gravitino.filesystem.hadoop.GravitinoVirtualFileSystemConfiguration.GVFS_CONFIG_PREFIX; + import com.github.benmanes.caffeine.cache.Cache; import com.github.benmanes.caffeine.cache.Caffeine; import com.github.benmanes.caffeine.cache.Scheduler; @@ -41,6 +44,8 @@ import org.apache.gravitino.audit.FilesetAuditConstants; import org.apache.gravitino.audit.FilesetDataOperation; import org.apache.gravitino.audit.InternalClientType; +import org.apache.gravitino.catalog.hadoop.fs.FileSystemProvider; +import org.apache.gravitino.catalog.hadoop.fs.FileSystemUtils; import org.apache.gravitino.client.DefaultOAuth2TokenProvider; import org.apache.gravitino.client.GravitinoClient; import org.apache.gravitino.client.KerberosTokenProvider; @@ -81,6 +86,8 @@ public class GravitinoVirtualFileSystem extends FileSystem { private static final Pattern IDENTIFIER_PATTERN = Pattern.compile("^(?:gvfs://fileset)?/([^/]+)/([^/]+)/([^/]+)(?>/[^/]+)*/?$"); private static final String SLASH = "/"; + private final Map fileSystemProvidersMap = Maps.newHashMap(); + private static final String GRAVITINO_BYPASS_PREFIX = "gravitino.bypass."; @Override public void initialize(URI name, Configuration configuration) throws IOException { @@ -125,6 +132,10 @@ public void initialize(URI name, Configuration configuration) throws IOException initializeClient(configuration); + // Register the default local and HDFS FileSystemProvider + String fileSystemProviders = configuration.get(FS_FILESYSTEM_PROVIDERS); + fileSystemProvidersMap.putAll(FileSystemUtils.getFileSystemProviders(fileSystemProviders)); + this.workingDirectory = new Path(name); this.uri = URI.create(name.getScheme() + "://" + name.getAuthority()); @@ -351,7 +362,6 @@ private FilesetContextPair getFilesetContext(Path virtualPath, FilesetDataOperat Preconditions.checkArgument( filesetCatalog != null, String.format("Loaded fileset catalog: %s is null.", catalogIdent)); - // set the thread local audit info Map contextMap = Maps.newHashMap(); contextMap.put( FilesetAuditConstants.HTTP_HEADER_INTERNAL_CLIENT_TYPE, @@ -364,7 +374,8 @@ private FilesetContextPair getFilesetContext(Path virtualPath, FilesetDataOperat filesetCatalog.getFileLocation( NameIdentifier.of(identifier.namespace().level(2), identifier.name()), subPath); - URI uri = new Path(actualFileLocation).toUri(); + Path filePath = new Path(actualFileLocation); + URI uri = filePath.toUri(); // we cache the fs for the same scheme, so we can reuse it String scheme = uri.getScheme(); Preconditions.checkArgument( @@ -374,7 +385,14 @@ private FilesetContextPair getFilesetContext(Path virtualPath, FilesetDataOperat scheme, str -> { try { - return FileSystem.newInstance(uri, getConf()); + Map maps = getConfigMap(getConf()); + FileSystemProvider provider = fileSystemProvidersMap.get(scheme); + if (provider == null) { + throw new GravitinoRuntimeException( + "Unsupported file system scheme: %s for %s.", + scheme, GravitinoVirtualFileSystemConfiguration.GVFS_SCHEME); + } + return provider.getFileSystem(filePath, maps); } catch (IOException ioe) { throw new GravitinoRuntimeException( "Exception occurs when create new FileSystem for actual uri: %s, msg: %s", @@ -385,6 +403,21 @@ private FilesetContextPair getFilesetContext(Path virtualPath, FilesetDataOperat return new FilesetContextPair(new Path(actualFileLocation), fs); } + private Map getConfigMap(Configuration configuration) { + Map maps = Maps.newHashMap(); + configuration.forEach( + entry -> { + String key = entry.getKey(); + if (key.startsWith(GRAVITINO_BYPASS_PREFIX)) { + maps.put(key.substring(GRAVITINO_BYPASS_PREFIX.length()), entry.getValue()); + } else if (!key.startsWith(GVFS_CONFIG_PREFIX)) { + maps.put(key, entry.getValue()); + } + }); + + return maps; + } + private String getSubPathFromVirtualPath(NameIdentifier identifier, String virtualPathString) { return virtualPathString.startsWith(GravitinoVirtualFileSystemConfiguration.GVFS_FILESET_PREFIX) ? virtualPathString.substring( diff --git a/clients/filesystem-hadoop3/src/main/java/org/apache/gravitino/filesystem/hadoop/GravitinoVirtualFileSystemConfiguration.java b/clients/filesystem-hadoop3/src/main/java/org/apache/gravitino/filesystem/hadoop/GravitinoVirtualFileSystemConfiguration.java index 8076c02c36a..95ce4df2a8f 100644 --- a/clients/filesystem-hadoop3/src/main/java/org/apache/gravitino/filesystem/hadoop/GravitinoVirtualFileSystemConfiguration.java +++ b/clients/filesystem-hadoop3/src/main/java/org/apache/gravitino/filesystem/hadoop/GravitinoVirtualFileSystemConfiguration.java @@ -18,11 +18,23 @@ */ package org.apache.gravitino.filesystem.hadoop; +import org.apache.gravitino.catalog.hadoop.fs.FileSystemProvider; + /** Configuration class for Gravitino Virtual File System. */ -class GravitinoVirtualFileSystemConfiguration { +public class GravitinoVirtualFileSystemConfiguration { + + /** + * The prefix of the Gravitino fileset URI. The URI of the Gravitino fileset should start with + * this prefix. + */ public static final String GVFS_FILESET_PREFIX = "gvfs://fileset"; + + /** The scheme of the Gravitino Virtual File System. */ public static final String GVFS_SCHEME = "gvfs"; + /** The prefix of the Gravitino Virtual File System. */ + public static final String GVFS_CONFIG_PREFIX = "fs.gvfs."; + /** The configuration key for the Gravitino server URI. */ public static final String FS_GRAVITINO_SERVER_URI_KEY = "fs.gravitino.server.uri"; @@ -32,8 +44,19 @@ class GravitinoVirtualFileSystemConfiguration { /** The configuration key for the Gravitino client auth type. */ public static final String FS_GRAVITINO_CLIENT_AUTH_TYPE_KEY = "fs.gravitino.client.authType"; + /** + * File system provider names configuration key. The value is a comma separated list of file + * system provider name which is defined in the service loader. Users can custom their own file + * system by implementing the {@link FileSystemProvider} interface. + */ + public static final String FS_FILESYSTEM_PROVIDERS = "fs.gvfs.filesystem.providers"; + + /** The authentication type for simple authentication. */ public static final String SIMPLE_AUTH_TYPE = "simple"; + /** The authentication type for oauth2 authentication. */ public static final String OAUTH2_AUTH_TYPE = "oauth2"; + + /** The authentication type for kerberos authentication. */ public static final String KERBEROS_AUTH_TYPE = "kerberos"; // oauth2 /** The configuration key for the URI of the default OAuth server. */ @@ -64,6 +87,10 @@ class GravitinoVirtualFileSystemConfiguration { public static final String FS_GRAVITINO_FILESET_CACHE_MAX_CAPACITY_KEY = "fs.gravitino.fileset.cache.maxCapacity"; + /** + * The default value for the maximum capacity of the Gravitino fileset cache. The default value is + * 20. + */ public static final int FS_GRAVITINO_FILESET_CACHE_MAX_CAPACITY_DEFAULT = 20; /** @@ -73,6 +100,10 @@ class GravitinoVirtualFileSystemConfiguration { public static final String FS_GRAVITINO_FILESET_CACHE_EVICTION_MILLS_AFTER_ACCESS_KEY = "fs.gravitino.fileset.cache.evictionMillsAfterAccess"; + /** + * The default value for the eviction time of the Gravitino fileset cache, measured in mills after + * access. + */ public static final long FS_GRAVITINO_FILESET_CACHE_EVICTION_MILLS_AFTER_ACCESS_DEFAULT = 1000L * 60 * 60; diff --git a/clients/filesystem-hadoop3/src/test/java/org/apache/gravitino/filesystem/hadoop/integration/test/GravitinoVirtualFileSystemGCSIT.java b/clients/filesystem-hadoop3/src/test/java/org/apache/gravitino/filesystem/hadoop/integration/test/GravitinoVirtualFileSystemGCSIT.java new file mode 100644 index 00000000000..a42d1c4b7b3 --- /dev/null +++ b/clients/filesystem-hadoop3/src/test/java/org/apache/gravitino/filesystem/hadoop/integration/test/GravitinoVirtualFileSystemGCSIT.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.filesystem.hadoop.integration.test; + +import static org.apache.gravitino.catalog.hadoop.HadoopCatalogPropertiesMetadata.FILESYSTEM_PROVIDERS; +import static org.apache.gravitino.filesystem.hadoop.GravitinoVirtualFileSystemConfiguration.FS_FILESYSTEM_PROVIDERS; + +import com.google.common.collect.Maps; +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import org.apache.gravitino.Catalog; +import org.apache.gravitino.integration.test.util.DownloaderUtils; +import org.apache.gravitino.integration.test.util.GravitinoITUtils; +import org.apache.gravitino.integration.test.util.ITUtils; +import org.apache.hadoop.conf.Configuration; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Disabled( + "Disabled due to we don't have a real GCP account to test. If you have a GCP account," + + "please change the configuration(YOUR_KEY_FILE, YOUR_BUCKET) and enable this test.") +public class GravitinoVirtualFileSystemGCSIT extends GravitinoVirtualFileSystemIT { + private static final Logger LOG = LoggerFactory.getLogger(GravitinoVirtualFileSystemGCSIT.class); + + public static final String BUCKET_NAME = "YOUR_BUCKET"; + public static final String SERVICE_ACCOUNT_FILE = "YOUR_KEY_FILE"; + + @BeforeAll + public void startIntegrationTest() { + // Do nothing + } + + @BeforeAll + public void startUp() throws Exception { + copyGCPJars(); + // Need to download jars to gravitino server + super.startIntegrationTest(); + + // This value can be by tune by the user, please change it accordingly. + defaultBockSize = 64 * 1024 * 1024; + + metalakeName = GravitinoITUtils.genRandomName("gvfs_it_metalake"); + catalogName = GravitinoITUtils.genRandomName("catalog"); + schemaName = GravitinoITUtils.genRandomName("schema"); + + Assertions.assertFalse(client.metalakeExists(metalakeName)); + metalake = client.createMetalake(metalakeName, "metalake comment", Collections.emptyMap()); + Assertions.assertTrue(client.metalakeExists(metalakeName)); + + Map properties = Maps.newHashMap(); + properties.put(FILESYSTEM_PROVIDERS, "gcs"); + properties.put( + "gravitino.bypass.fs.gs.auth.service.account.json.keyfile", SERVICE_ACCOUNT_FILE); + + Catalog catalog = + metalake.createCatalog( + catalogName, Catalog.Type.FILESET, "hadoop", "catalog comment", properties); + Assertions.assertTrue(metalake.catalogExists(catalogName)); + + catalog.asSchemas().createSchema(schemaName, "schema comment", properties); + Assertions.assertTrue(catalog.asSchemas().schemaExists(schemaName)); + + conf.set("fs.gvfs.impl", "org.apache.gravitino.filesystem.hadoop.GravitinoVirtualFileSystem"); + conf.set("fs.AbstractFileSystem.gvfs.impl", "org.apache.gravitino.filesystem.hadoop.Gvfs"); + conf.set("fs.gvfs.impl.disable.cache", "true"); + conf.set("fs.gravitino.server.uri", serverUri); + conf.set("fs.gravitino.client.metalake", metalakeName); + + // Pass this configuration to the real file system + conf.set("gravitino.bypass.fs.gs.auth.service.account.enable", "true"); + conf.set("gravitino.bypass.fs.gs.auth.service.account.json.keyfile", SERVICE_ACCOUNT_FILE); + conf.set(FS_FILESYSTEM_PROVIDERS, "gcs"); + } + + @AfterAll + public void tearDown() throws IOException { + Catalog catalog = metalake.loadCatalog(catalogName); + catalog.asSchemas().dropSchema(schemaName, true); + metalake.dropCatalog(catalogName); + client.dropMetalake(metalakeName); + + if (client != null) { + client.close(); + client = null; + } + + try { + closer.close(); + } catch (Exception e) { + LOG.error("Exception in closing CloseableGroup", e); + } + } + + /** + * Remove the `gravitino.bypass` prefix from the configuration and pass it to the real file system + * This method corresponds to the method org.apache.gravitino.filesystem.hadoop + * .GravitinoVirtualFileSystem#getConfigMap(Configuration) in the original code. + */ + protected Configuration convertGvfsConfigToRealFileSystemConfig(Configuration gvfsConf) { + Configuration gcsConf = new Configuration(); + gvfsConf.forEach( + entry -> { + gcsConf.set(entry.getKey().replace("gravitino.bypass.", ""), entry.getValue()); + }); + + return gcsConf; + } + + protected String genStorageLocation(String fileset) { + return String.format("gs://%s/%s", BUCKET_NAME, fileset); + } + + private static boolean isDeploy() { + String mode = + System.getProperty(ITUtils.TEST_MODE) == null + ? ITUtils.EMBEDDED_TEST_MODE + : System.getProperty(ITUtils.TEST_MODE); + + return Objects.equals(mode, ITUtils.DEPLOY_TEST_MODE); + } + + private void copyGCPJars() { + if (!isDeploy()) { + return; + } + + String gravitinoHome = System.getenv("GRAVITINO_HOME"); + String jarName = String.format("gravitino-gcp-bundle-%s.jar", System.getenv("PROJECT_VERSION")); + String gcsJars = + ITUtils.joinPath( + gravitinoHome, "..", "..", "bundles", "gcp-bundle", "build", "libs", jarName); + gcsJars = "file://" + gcsJars; + try { + if (!ITUtils.EMBEDDED_TEST_MODE.equals(testMode)) { + String hadoopLibDirs = ITUtils.joinPath(gravitinoHome, "catalogs", "hadoop", "libs"); + DownloaderUtils.downloadFile(gcsJars, hadoopLibDirs); + } + } catch (Exception e) { + throw new RuntimeException( + String.format("Failed to copy the gcs dependency jars: %s", gcsJars), e); + } + } + + @Disabled( + "GCS does not support append, java.io.IOException: The append operation is not supported") + public void testAppend() throws IOException {} +} diff --git a/clients/filesystem-hadoop3/src/test/java/org/apache/gravitino/filesystem/hadoop/integration/test/GravitinoVirtualFileSystemIT.java b/clients/filesystem-hadoop3/src/test/java/org/apache/gravitino/filesystem/hadoop/integration/test/GravitinoVirtualFileSystemIT.java index fae41264cbb..ced9a0b8b89 100644 --- a/clients/filesystem-hadoop3/src/test/java/org/apache/gravitino/filesystem/hadoop/integration/test/GravitinoVirtualFileSystemIT.java +++ b/clients/filesystem-hadoop3/src/test/java/org/apache/gravitino/filesystem/hadoop/integration/test/GravitinoVirtualFileSystemIT.java @@ -37,7 +37,7 @@ import org.apache.gravitino.file.Fileset; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -54,17 +54,18 @@ import org.slf4j.LoggerFactory; @Tag("gravitino-docker-test") -public class GravitinoVirtualFileSystemIT extends AbstractIT { +public class GravitinoVirtualFileSystemIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(GravitinoVirtualFileSystemIT.class); private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); - private static final String metalakeName = GravitinoITUtils.genRandomName("gvfs_it_metalake"); - private static final String catalogName = GravitinoITUtils.genRandomName("catalog"); - private static final String schemaName = GravitinoITUtils.genRandomName("schema"); - private static GravitinoMetalake metalake; - private static Configuration conf = new Configuration(); + protected String metalakeName = GravitinoITUtils.genRandomName("gvfs_it_metalake"); + protected String catalogName = GravitinoITUtils.genRandomName("catalog"); + protected String schemaName = GravitinoITUtils.genRandomName("schema"); + protected GravitinoMetalake metalake; + protected Configuration conf = new Configuration(); + protected int defaultBockSize = 128 * 1024 * 1024; @BeforeAll - public static void startUp() { + public void startUp() throws Exception { containerSuite.startHiveContainer(); Assertions.assertFalse(client.metalakeExists(metalakeName)); metalake = client.createMetalake(metalakeName, "metalake comment", Collections.emptyMap()); @@ -87,7 +88,7 @@ public static void startUp() { } @AfterAll - public static void tearDown() throws IOException { + public void tearDown() throws IOException { Catalog catalog = metalake.loadCatalog(catalogName); catalog.asSchemas().dropSchema(schemaName, true); metalake.dropCatalog(catalogName); @@ -112,10 +113,14 @@ public static void tearDown() throws IOException { } } + protected Configuration convertGvfsConfigToRealFileSystemConfig(Configuration gvfsConf) { + return gvfsConf; + } + @Test public void testCreate() throws IOException { // create fileset - String filesetName = "test_fileset_create"; + String filesetName = GravitinoITUtils.genRandomName("test_fileset_create"); NameIdentifier filesetIdent = NameIdentifier.of(schemaName, filesetName); Catalog catalog = metalake.loadCatalog(catalogName); String storageLocation = genStorageLocation(filesetName); @@ -131,25 +136,28 @@ public void testCreate() throws IOException { // test gvfs create Path hdfsPath = new Path(storageLocation); - try (FileSystem fs = hdfsPath.getFileSystem(conf)) { + try (FileSystem fs = hdfsPath.getFileSystem(convertGvfsConfigToRealFileSystemConfig(conf))) { Assertions.assertTrue(fs.exists(hdfsPath)); Path gvfsPath = genGvfsPath(filesetName); try (FileSystem gvfs = gvfsPath.getFileSystem(conf)) { Assertions.assertTrue(gvfs.exists(gvfsPath)); String fileName = "test.txt"; Path createPath = new Path(gvfsPath + "/" + fileName); - gvfs.create(createPath); + // GCS need to close the stream to create the file manually. + gvfs.create(createPath).close(); Assertions.assertTrue(gvfs.exists(createPath)); Assertions.assertTrue(gvfs.getFileStatus(createPath).isFile()); Assertions.assertTrue(fs.exists(new Path(storageLocation + "/" + fileName))); } } + + catalog.asFilesetCatalog().dropFileset(filesetIdent); } @Test public void testAppend() throws IOException { // create fileset - String filesetName = "test_fileset_append"; + String filesetName = GravitinoITUtils.genRandomName("test_fileset_append"); NameIdentifier filesetIdent = NameIdentifier.of(schemaName, filesetName); Catalog catalog = metalake.loadCatalog(catalogName); String storageLocation = genStorageLocation(filesetName); @@ -165,7 +173,7 @@ public void testAppend() throws IOException { // test gvfs append Path hdfsPath = new Path(storageLocation); - try (FileSystem fs = hdfsPath.getFileSystem(conf)) { + try (FileSystem fs = hdfsPath.getFileSystem(convertGvfsConfigToRealFileSystemConfig(conf))) { Assertions.assertTrue(fs.exists(hdfsPath)); Path gvfsPath = genGvfsPath(filesetName); String fileName = "test.txt"; @@ -173,7 +181,7 @@ public void testAppend() throws IOException { try (FileSystem gvfs = gvfsPath.getFileSystem(conf)) { Assertions.assertTrue(gvfs.exists(gvfsPath)); - gvfs.create(appendPath); + gvfs.create(appendPath).close(); Assertions.assertTrue(gvfs.exists(appendPath)); Assertions.assertTrue(gvfs.getFileStatus(appendPath).isFile()); Assertions.assertTrue(fs.exists(new Path(storageLocation + "/" + fileName))); @@ -203,12 +211,14 @@ public void testAppend() throws IOException { } } } + + catalog.asFilesetCatalog().dropFileset(filesetIdent); } @Test public void testDelete() throws IOException { // create fileset - String filesetName = "test_fileset_delete"; + String filesetName = GravitinoITUtils.genRandomName("test_fileset_delete"); NameIdentifier filesetIdent = NameIdentifier.of(schemaName, filesetName); Catalog catalog = metalake.loadCatalog(catalogName); String storageLocation = genStorageLocation(filesetName); @@ -224,14 +234,14 @@ public void testDelete() throws IOException { // test gvfs delete Path hdfsPath = new Path(storageLocation); - try (FileSystem fs = hdfsPath.getFileSystem(conf)) { + try (FileSystem fs = hdfsPath.getFileSystem(convertGvfsConfigToRealFileSystemConfig(conf))) { Assertions.assertTrue(fs.exists(hdfsPath)); Path gvfsPath = genGvfsPath(filesetName); String fileName = "test.txt"; Path deletePath = new Path(gvfsPath + "/" + fileName); try (FileSystem gvfs = gvfsPath.getFileSystem(conf)) { Assertions.assertTrue(gvfs.exists(gvfsPath)); - gvfs.create(deletePath); + gvfs.create(deletePath).close(); Assertions.assertTrue(gvfs.exists(deletePath)); Assertions.assertTrue(gvfs.getFileStatus(deletePath).isFile()); Assertions.assertTrue(fs.exists(new Path(storageLocation + "/" + fileName))); @@ -242,12 +252,14 @@ public void testDelete() throws IOException { Assertions.assertFalse(fs.exists(new Path(storageLocation + "/" + fileName))); } } + + catalog.asFilesetCatalog().dropFileset(filesetIdent); } @Test public void testGetStatus() throws IOException { // create fileset - String filesetName = "test_fileset_get_status"; + String filesetName = GravitinoITUtils.genRandomName("test_fileset_get_status"); NameIdentifier filesetIdent = NameIdentifier.of(schemaName, filesetName); Catalog catalog = metalake.loadCatalog(catalogName); String storageLocation = genStorageLocation(filesetName); @@ -263,14 +275,14 @@ public void testGetStatus() throws IOException { // test gvfs get status Path hdfsPath = new Path(storageLocation); - try (FileSystem fs = hdfsPath.getFileSystem(conf)) { + try (FileSystem fs = hdfsPath.getFileSystem(convertGvfsConfigToRealFileSystemConfig(conf))) { Assertions.assertTrue(fs.exists(hdfsPath)); Path gvfsPath = genGvfsPath(filesetName); String fileName = "test.txt"; Path statusPath = new Path(gvfsPath + "/" + fileName); try (FileSystem gvfs = gvfsPath.getFileSystem(conf)) { Assertions.assertTrue(gvfs.exists(gvfsPath)); - gvfs.create(statusPath); + gvfs.create(statusPath).close(); Assertions.assertTrue(gvfs.exists(statusPath)); Assertions.assertTrue(gvfs.getFileStatus(statusPath).isFile()); Assertions.assertTrue(fs.exists(new Path(storageLocation + "/" + fileName))); @@ -284,12 +296,14 @@ public void testGetStatus() throws IOException { .replaceFirst(genGvfsPath(filesetName).toString(), storageLocation)); } } + + catalog.asFilesetCatalog().dropFileset(filesetIdent); } @Test public void testListStatus() throws IOException { // create fileset - String filesetName = "test_fileset_list_status"; + String filesetName = GravitinoITUtils.genRandomName("test_fileset_list_status"); NameIdentifier filesetIdent = NameIdentifier.of(schemaName, filesetName); Catalog catalog = metalake.loadCatalog(catalogName); String storageLocation = genStorageLocation(filesetName); @@ -305,7 +319,7 @@ public void testListStatus() throws IOException { // test gvfs list status Path hdfsPath = new Path(storageLocation); - try (FileSystem fs = hdfsPath.getFileSystem(conf)) { + try (FileSystem fs = hdfsPath.getFileSystem(convertGvfsConfigToRealFileSystemConfig(conf))) { Assertions.assertTrue(fs.exists(hdfsPath)); Path gvfsPath = genGvfsPath(filesetName); for (int i = 0; i < 10; i++) { @@ -313,7 +327,7 @@ public void testListStatus() throws IOException { Path statusPath = new Path(gvfsPath + "/" + fileName); try (FileSystem gvfs = gvfsPath.getFileSystem(conf)) { Assertions.assertTrue(gvfs.exists(gvfsPath)); - gvfs.create(statusPath); + gvfs.create(statusPath).close(); Assertions.assertTrue(gvfs.exists(statusPath)); Assertions.assertTrue(gvfs.getFileStatus(statusPath).isFile()); Assertions.assertTrue(fs.exists(new Path(storageLocation + "/" + fileName))); @@ -340,12 +354,14 @@ public void testListStatus() throws IOException { } } } + + catalog.asFilesetCatalog().dropFileset(filesetIdent); } @Test public void testMkdirs() throws IOException { // create fileset - String filesetName = "test_fileset_mkdirs"; + String filesetName = GravitinoITUtils.genRandomName("test_fileset_mkdirs"); NameIdentifier filesetIdent = NameIdentifier.of(schemaName, filesetName); Catalog catalog = metalake.loadCatalog(catalogName); String storageLocation = genStorageLocation(filesetName); @@ -361,7 +377,7 @@ public void testMkdirs() throws IOException { // test gvfs mkdirs Path hdfsPath = new Path(storageLocation); - try (FileSystem fs = hdfsPath.getFileSystem(conf)) { + try (FileSystem fs = hdfsPath.getFileSystem(convertGvfsConfigToRealFileSystemConfig(conf))) { Assertions.assertTrue(fs.exists(hdfsPath)); Path gvfsPath = genGvfsPath(filesetName); try (FileSystem gvfs = gvfsPath.getFileSystem(conf)) { @@ -374,12 +390,14 @@ public void testMkdirs() throws IOException { Assertions.assertTrue(fs.exists(new Path(storageLocation + "/" + dirName))); } } + + catalog.asFilesetCatalog().dropFileset(filesetIdent); } @Test public void testRename() throws IOException { // create fileset - String filesetName = "test_fileset_rename"; + String filesetName = GravitinoITUtils.genRandomName("test_fileset_rename"); NameIdentifier filesetIdent = NameIdentifier.of(schemaName, filesetName); Catalog catalog = metalake.loadCatalog(catalogName); String storageLocation = genStorageLocation(filesetName); @@ -395,7 +413,7 @@ public void testRename() throws IOException { // test gvfs rename Path hdfsPath = new Path(storageLocation); - try (FileSystem fs = hdfsPath.getFileSystem(conf)) { + try (FileSystem fs = hdfsPath.getFileSystem(convertGvfsConfigToRealFileSystemConfig(conf))) { Assertions.assertTrue(fs.exists(hdfsPath)); Path gvfsPath = genGvfsPath(filesetName); String srcName = "test_src"; @@ -420,11 +438,13 @@ public void testRename() throws IOException { Assertions.assertFalse(fs.exists(new Path(storageLocation + "/" + srcName))); } } + + catalog.asFilesetCatalog().dropFileset(filesetIdent); } @Test public void testGetDefaultReplications() throws IOException { - String filesetName = "test_get_default_replications"; + String filesetName = GravitinoITUtils.genRandomName("test_get_default_replications"); NameIdentifier filesetIdent = NameIdentifier.of(schemaName, filesetName); Catalog catalog = metalake.loadCatalog(catalogName); String storageLocation = genStorageLocation(filesetName); @@ -441,11 +461,13 @@ public void testGetDefaultReplications() throws IOException { try (FileSystem gvfs = gvfsPath.getFileSystem(conf)) { assertEquals(3, gvfs.getDefaultReplication(gvfsPath)); } + + catalog.asFilesetCatalog().dropFileset(filesetIdent); } @Test public void testGetDefaultBlockSizes() throws IOException { - String filesetName = "test_get_default_block_sizes"; + String filesetName = GravitinoITUtils.genRandomName("test_get_default_block_sizes"); NameIdentifier filesetIdent = NameIdentifier.of(schemaName, filesetName); Catalog catalog = metalake.loadCatalog(catalogName); String storageLocation = genStorageLocation(filesetName); @@ -460,15 +482,17 @@ public void testGetDefaultBlockSizes() throws IOException { Assertions.assertTrue(catalog.asFilesetCatalog().filesetExists(filesetIdent)); Path gvfsPath = genGvfsPath(filesetName); try (FileSystem gvfs = gvfsPath.getFileSystem(conf)) { - assertEquals(128 * 1024 * 1024, gvfs.getDefaultBlockSize(gvfsPath)); + assertEquals(defaultBockSize, gvfs.getDefaultBlockSize(gvfsPath)); } + + catalog.asFilesetCatalog().dropFileset(filesetIdent); } - private String genStorageLocation(String fileset) { + protected String genStorageLocation(String fileset) { return String.format("%s/%s", baseHdfsPath(), fileset); } - private static String baseHdfsPath() { + private String baseHdfsPath() { return String.format( "hdfs://%s:%d/%s/%s", containerSuite.getHiveContainer().getContainerIpAddress(), @@ -477,7 +501,7 @@ private static String baseHdfsPath() { schemaName); } - private Path genGvfsPath(String fileset) { + protected Path genGvfsPath(String fileset) { return new Path(String.format("gvfs://fileset/%s/%s/%s", catalogName, schemaName, fileset)); } } diff --git a/common/src/main/java/org/apache/gravitino/dto/responses/ErrorConstants.java b/common/src/main/java/org/apache/gravitino/dto/responses/ErrorConstants.java index 8772a09d1ed..db799ac187c 100644 --- a/common/src/main/java/org/apache/gravitino/dto/responses/ErrorConstants.java +++ b/common/src/main/java/org/apache/gravitino/dto/responses/ErrorConstants.java @@ -45,6 +45,9 @@ public class ErrorConstants { /** Error codes for connect to catalog failed. */ public static final int CONNECTION_FAILED_CODE = 1007; + /** Error codes for forbidden operation. */ + public static final int FORBIDDEN_CODE = 1008; + /** Error codes for invalid state. */ public static final int UNKNOWN_ERROR_CODE = 1100; diff --git a/common/src/main/java/org/apache/gravitino/dto/responses/ErrorResponse.java b/common/src/main/java/org/apache/gravitino/dto/responses/ErrorResponse.java index 86e619bc786..2c1e1e9ef84 100644 --- a/common/src/main/java/org/apache/gravitino/dto/responses/ErrorResponse.java +++ b/common/src/main/java/org/apache/gravitino/dto/responses/ErrorResponse.java @@ -28,6 +28,7 @@ import lombok.EqualsAndHashCode; import lombok.Getter; import org.apache.gravitino.exceptions.ConnectionFailedException; +import org.apache.gravitino.exceptions.ForbiddenException; import org.apache.gravitino.exceptions.RESTException; /** Represents an error response. */ @@ -305,6 +306,21 @@ public static ErrorResponse unsupportedOperation(String message, Throwable throw getStackTrace(throwable)); } + /** + * Create a new forbidden operation error instance of {@link ErrorResponse}. + * + * @param message The message of the error. + * @param throwable The throwable that caused the error. + * @return The new instance. + */ + public static ErrorResponse forbidden(String message, Throwable throwable) { + return new ErrorResponse( + ErrorConstants.FORBIDDEN_CODE, + ForbiddenException.class.getSimpleName(), + message, + getStackTrace(throwable)); + } + private static List getStackTrace(Throwable throwable) { if (throwable == null) { return null; diff --git a/common/src/main/java/org/apache/gravitino/dto/util/DTOConverters.java b/common/src/main/java/org/apache/gravitino/dto/util/DTOConverters.java index adc1f5f03e0..d12b141ff3a 100644 --- a/common/src/main/java/org/apache/gravitino/dto/util/DTOConverters.java +++ b/common/src/main/java/org/apache/gravitino/dto/util/DTOConverters.java @@ -849,7 +849,7 @@ public static SortOrder fromDTO(SortOrderDTO sortOrderDTO) { */ public static SortOrder[] fromDTOs(SortOrderDTO[] sortOrderDTO) { if (ArrayUtils.isEmpty(sortOrderDTO)) { - return new SortOrder[0]; + return SortOrders.NONE; } return Arrays.stream(sortOrderDTO).map(DTOConverters::fromDTO).toArray(SortOrder[]::new); @@ -863,7 +863,7 @@ public static SortOrder[] fromDTOs(SortOrderDTO[] sortOrderDTO) { */ public static Transform[] fromDTOs(Partitioning[] partitioning) { if (ArrayUtils.isEmpty(partitioning)) { - return new Transform[0]; + return Transforms.EMPTY_TRANSFORM; } return Arrays.stream(partitioning).map(DTOConverters::fromDTO).toArray(Transform[]::new); } diff --git a/common/src/main/java/org/apache/gravitino/json/JsonUtils.java b/common/src/main/java/org/apache/gravitino/json/JsonUtils.java index ce80965b6a2..f0f79bd4545 100644 --- a/common/src/main/java/org/apache/gravitino/json/JsonUtils.java +++ b/common/src/main/java/org/apache/gravitino/json/JsonUtils.java @@ -33,6 +33,7 @@ import com.fasterxml.jackson.databind.SerializerProvider; import com.fasterxml.jackson.databind.cfg.EnumFeature; import com.fasterxml.jackson.databind.json.JsonMapper; +import com.fasterxml.jackson.databind.module.SimpleModule; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.datatype.jdk8.Jdk8Module; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; @@ -291,7 +292,13 @@ private static class AnyFieldMapperHolder { .build() .setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY) .registerModule(new JavaTimeModule()) - .registerModule(new Jdk8Module()); + .registerModule(new Jdk8Module()) + .registerModule( + new SimpleModule() + .addDeserializer(Type.class, new TypeDeserializer()) + .addSerializer(Type.class, new TypeSerializer()) + .addDeserializer(Expression.class, new ColumnDefaultValueDeserializer()) + .addSerializer(Expression.class, new ColumnDefaultValueSerializer())); } /** diff --git a/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java b/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java index 51f5cae6217..81447abfd3b 100644 --- a/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java +++ b/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java @@ -36,10 +36,12 @@ import org.apache.gravitino.connector.authorization.AuthorizationPlugin; import org.apache.gravitino.dto.authorization.PrivilegeDTO; import org.apache.gravitino.dto.util.DTOConverters; +import org.apache.gravitino.exceptions.ForbiddenException; import org.apache.gravitino.exceptions.IllegalPrivilegeException; import org.apache.gravitino.exceptions.NoSuchCatalogException; import org.apache.gravitino.exceptions.NoSuchMetadataObjectException; import org.apache.gravitino.exceptions.NoSuchMetalakeException; +import org.apache.gravitino.exceptions.NoSuchUserException; import org.apache.gravitino.utils.MetadataObjectUtil; import org.apache.gravitino.utils.NameIdentifierUtil; import org.slf4j.Logger; @@ -81,6 +83,20 @@ static void checkMetalakeExists(String metalake) throws NoSuchMetalakeException } } + public static void checkCurrentUser(String metalake, String user) { + try { + AccessControlDispatcher dispatcher = GravitinoEnv.getInstance().accessControlDispatcher(); + // Only when we enable authorization, we need to check the current user + if (dispatcher != null) { + dispatcher.getUser(metalake, user); + } + } catch (NoSuchUserException nsu) { + throw new ForbiddenException( + "Current user %s doesn't exist in the metalake %s, you should add the user to the metalake first", + user, metalake); + } + } + public static NameIdentifier ofRole(String metalake, String role) { return NameIdentifier.of( metalake, Entity.SYSTEM_CATALOG_RESERVED_NAME, Entity.ROLE_SCHEMA_NAME, role); diff --git a/core/src/main/java/org/apache/gravitino/catalog/EntityCombinedTable.java b/core/src/main/java/org/apache/gravitino/catalog/EntityCombinedTable.java index 460835f51f5..4b0da1568b9 100644 --- a/core/src/main/java/org/apache/gravitino/catalog/EntityCombinedTable.java +++ b/core/src/main/java/org/apache/gravitino/catalog/EntityCombinedTable.java @@ -128,6 +128,14 @@ public boolean imported() { return imported; } + public Table tableFromCatalog() { + return table; + } + + public TableEntity tableFromGravitino() { + return tableEntity; + } + @Override public Audit auditInfo() { AuditInfo mergedAudit = diff --git a/core/src/main/java/org/apache/gravitino/catalog/TableOperationDispatcher.java b/core/src/main/java/org/apache/gravitino/catalog/TableOperationDispatcher.java index 4472859d92f..b54f0688715 100644 --- a/core/src/main/java/org/apache/gravitino/catalog/TableOperationDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/catalog/TableOperationDispatcher.java @@ -24,9 +24,16 @@ import static org.apache.gravitino.rel.expressions.transforms.Transforms.EMPTY_TRANSFORM; import static org.apache.gravitino.utils.NameIdentifierUtil.getCatalogIdentifier; +import com.google.common.base.Objects; +import com.google.common.collect.Lists; import java.time.Instant; import java.util.Arrays; +import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.commons.lang3.tuple.Pair; import org.apache.gravitino.EntityStore; import org.apache.gravitino.GravitinoEnv; import org.apache.gravitino.NameIdentifier; @@ -41,6 +48,7 @@ import org.apache.gravitino.lock.LockType; import org.apache.gravitino.lock.TreeLockUtils; import org.apache.gravitino.meta.AuditInfo; +import org.apache.gravitino.meta.ColumnEntity; import org.apache.gravitino.meta.TableEntity; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.Table; @@ -97,26 +105,31 @@ public NameIdentifier[] listTables(Namespace namespace) throws NoSuchSchemaExcep */ @Override public Table loadTable(NameIdentifier ident) throws NoSuchTableException { - EntityCombinedTable table = + EntityCombinedTable entityCombinedTable = TreeLockUtils.doWithTreeLock(ident, LockType.READ, () -> internalLoadTable(ident)); - if (!table.imported()) { + if (!entityCombinedTable.imported()) { // Load the schema to make sure the schema is imported. SchemaDispatcher schemaDispatcher = GravitinoEnv.getInstance().schemaDispatcher(); NameIdentifier schemaIdent = NameIdentifier.of(ident.namespace().levels()); schemaDispatcher.loadSchema(schemaIdent); // Import the table. - TreeLockUtils.doWithTreeLock( - schemaIdent, - LockType.WRITE, - () -> { - importTable(ident); - return null; - }); + entityCombinedTable = + TreeLockUtils.doWithTreeLock(schemaIdent, LockType.WRITE, () -> importTable(ident)); } - return table; + // Update the column entities in Gravitino store if the columns are different from the ones + // fetching from the underlying source. + TableEntity updatedEntity = updateColumnsIfNecessaryWhenLoad(ident, entityCombinedTable); + + return EntityCombinedTable.of(entityCombinedTable.tableFromCatalog(), updatedEntity) + .withHiddenPropertiesSet( + getHiddenPropertyNames( + getCatalogIdentifier(ident), + HasPropertyMetadata::tablePropertiesMetadata, + entityCombinedTable.tableFromCatalog().properties())) + .withImported(entityCombinedTable.imported()); } /** @@ -215,11 +228,15 @@ public Table alterTable(NameIdentifier ident, TableChange... changes) .map(c -> ((TableChange.RenameTable) c).getNewName()) .reduce((c1, c2) -> c2) .orElse(tableEntity.name()); + // Update the columns + Pair> columnsUpdateResult = + updateColumnsIfNecessary(alteredTable, tableEntity); return TableEntity.builder() .withId(tableEntity.id()) .withName(newName) .withNamespace(ident.namespace()) + .withColumns(columnsUpdateResult.getRight()) .withAuditInfo( AuditInfo.builder() .withCreator(tableEntity.auditInfo().creator()) @@ -328,11 +345,11 @@ public boolean purgeTable(NameIdentifier ident) throws UnsupportedOperationExcep : droppedFromCatalog; } - private void importTable(NameIdentifier identifier) { + private EntityCombinedTable importTable(NameIdentifier identifier) { EntityCombinedTable table = internalLoadTable(identifier); if (table.imported()) { - return; + return table; } StringIdentifier stringId = null; @@ -348,8 +365,8 @@ private void importTable(NameIdentifier identifier) { // of external system to correct it. LOG.warn( "The Table uid {} existed but still need to be imported, this could be happened " - + "when Table is renamed by external systems not controlled by Gravitino. In this case, " - + "we need to overwrite the stored entity to keep the consistency.", + + "when Table is renamed by external systems not controlled by Gravitino. In this " + + "case, we need to overwrite the stored entity to keep the consistency.", stringId); uid = stringId.id(); } else { @@ -357,18 +374,22 @@ private void importTable(NameIdentifier identifier) { uid = idGenerator.nextId(); } + AuditInfo audit = + AuditInfo.builder() + .withCreator(table.auditInfo().creator()) + .withCreateTime(table.auditInfo().createTime()) + .withLastModifier(table.auditInfo().lastModifier()) + .withLastModifiedTime(table.auditInfo().lastModifiedTime()) + .build(); + List columnEntityList = + toColumnEntities(table.tableFromCatalog().columns(), audit); TableEntity tableEntity = TableEntity.builder() .withId(uid) .withName(identifier.name()) .withNamespace(identifier.namespace()) - .withAuditInfo( - AuditInfo.builder() - .withCreator(table.auditInfo().creator()) - .withCreateTime(table.auditInfo().createTime()) - .withLastModifier(table.auditInfo().lastModifier()) - .withLastModifiedTime(table.auditInfo().lastModifiedTime()) - .build()) + .withColumns(columnEntityList) + .withAuditInfo(audit) .build(); try { store.put(tableEntity, true); @@ -376,6 +397,13 @@ private void importTable(NameIdentifier identifier) { LOG.error(FormattedErrorMessages.STORE_OP_FAILURE, "put", identifier, e); throw new RuntimeException("Fail to import the table entity to the store.", e); } + + return EntityCombinedTable.of(table.tableFromCatalog(), tableEntity) + .withHiddenPropertiesSet( + getHiddenPropertyNames( + getCatalogIdentifier(identifier), + HasPropertyMetadata::tablePropertiesMetadata, + table.tableFromCatalog().properties())); } private EntityCombinedTable internalLoadTable(NameIdentifier ident) { @@ -465,16 +493,23 @@ private Table internalCreateTable( NoSuchSchemaException.class, TableAlreadyExistsException.class); + AuditInfo audit = + AuditInfo.builder() + .withCreator(PrincipalUtils.getCurrentPrincipal().getName()) + .withCreateTime(Instant.now()) + .build(); + List columnEntityList = + Arrays.stream(columns) + .map(c -> ColumnEntity.toColumnEntity(c, idGenerator.nextId(), audit)) + .collect(Collectors.toList()); + TableEntity tableEntity = TableEntity.builder() .withId(uid) .withName(ident.name()) .withNamespace(ident.namespace()) - .withAuditInfo( - AuditInfo.builder() - .withCreator(PrincipalUtils.getCurrentPrincipal().getName()) - .withCreateTime(Instant.now()) - .build()) + .withColumns(columnEntityList) + .withAuditInfo(audit) .build(); try { @@ -492,4 +527,153 @@ private Table internalCreateTable( getHiddenPropertyNames( catalogIdent, HasPropertyMetadata::tablePropertiesMetadata, table.properties())); } + + private List toColumnEntities(Column[] columns, AuditInfo audit) { + return columns == null + ? Collections.emptyList() + : Arrays.stream(columns) + .map(c -> ColumnEntity.toColumnEntity(c, idGenerator.nextId(), audit)) + .collect(Collectors.toList()); + } + + private boolean isSameColumn(Column left, ColumnEntity right) { + return Objects.equal(left.name(), right.name()) + && Objects.equal(left.dataType(), right.dataType()) + && Objects.equal(left.comment(), right.comment()) + && left.nullable() == right.nullable() + && left.autoIncrement() == right.autoIncrement() + && Objects.equal(left.defaultValue(), right.defaultValue()); + } + + private Pair> updateColumnsIfNecessary( + Table tableFromCatalog, TableEntity tableFromGravitino) { + if (tableFromCatalog == null || tableFromGravitino == null) { + LOG.warn( + "Cannot update columns for table when altering because table or table entity is " + + "null"); + return Pair.of(false, Collections.emptyList()); + } + + Map columnsFromCatalogTable = + tableFromCatalog.columns() == null + ? Collections.emptyMap() + : Arrays.stream(tableFromCatalog.columns()) + .collect(Collectors.toMap(Column::name, Function.identity())); + Map columnsFromTableEntity = + tableFromGravitino.columns() == null + ? Collections.emptyMap() + : tableFromGravitino.columns().stream() + .collect(Collectors.toMap(ColumnEntity::name, Function.identity())); + + // Check if columns need to be updated in Gravitino store + List columnsToInsert = Lists.newArrayList(); + boolean columnsNeedsUpdate = false; + for (Map.Entry entry : columnsFromTableEntity.entrySet()) { + Column column = columnsFromCatalogTable.get(entry.getKey()); + if (column == null) { + LOG.debug( + "Column {} is not found in the table from underlying source, it will be removed" + + " from the table entity", + entry.getKey()); + columnsNeedsUpdate = true; + + } else if (!isSameColumn(column, entry.getValue())) { + // If the column need to be updated, we create a new ColumnEntity with the same id + LOG.debug( + "Column {} is found in the table from underlying source, but it is different " + + "from the one in the table entity, it will be updated", + entry.getKey()); + + ColumnEntity updatedColumnEntity = + ColumnEntity.builder() + .withId(entry.getValue().id()) + .withName(column.name()) + .withDataType(column.dataType()) + .withComment(column.comment()) + .withNullable(column.nullable()) + .withAutoIncrement(column.autoIncrement()) + .withDefaultValue(column.defaultValue()) + .withAuditInfo( + AuditInfo.builder() + .withCreator(entry.getValue().auditInfo().creator()) + .withCreateTime(entry.getValue().auditInfo().createTime()) + .withLastModifier(PrincipalUtils.getCurrentPrincipal().getName()) + .withLastModifiedTime(Instant.now()) + .build()) + .build(); + + columnsNeedsUpdate = true; + columnsToInsert.add(updatedColumnEntity); + + } else { + // If the column is the same, we keep the original ColumnEntity + columnsToInsert.add(entry.getValue()); + } + } + + // Check if there are new columns in the table from the underlying source + for (Map.Entry entry : columnsFromCatalogTable.entrySet()) { + if (!columnsFromTableEntity.containsKey(entry.getKey())) { + LOG.debug( + "Column {} is found in the table from underlying source but not in the table " + + "entity, it will be added to the table entity", + entry.getKey()); + ColumnEntity newColumnEntity = + ColumnEntity.toColumnEntity( + entry.getValue(), + idGenerator.nextId(), + AuditInfo.builder() + .withCreator(PrincipalUtils.getCurrentPrincipal().getName()) + .withCreateTime(Instant.now()) + .build()); + + columnsNeedsUpdate = true; + columnsToInsert.add(newColumnEntity); + } + } + + return Pair.of(columnsNeedsUpdate, columnsToInsert); + } + + private TableEntity updateColumnsIfNecessaryWhenLoad( + NameIdentifier tableIdent, EntityCombinedTable combinedTable) { + Pair> columnsUpdateResult = + updateColumnsIfNecessary( + combinedTable.tableFromCatalog(), combinedTable.tableFromGravitino()); + + // No need to update the columns + if (!columnsUpdateResult.getLeft()) { + return combinedTable.tableFromGravitino(); + } + + // Update the columns in the Gravitino store + return TreeLockUtils.doWithTreeLock( + tableIdent, + LockType.WRITE, + () -> + operateOnEntity( + tableIdent, + id -> + store.update( + id, + TableEntity.class, + TABLE, + entity -> + TableEntity.builder() + .withId(entity.id()) + .withName(entity.name()) + .withNamespace(entity.namespace()) + .withColumns(columnsUpdateResult.getRight()) + .withAuditInfo( + AuditInfo.builder() + .withCreator(entity.auditInfo().creator()) + .withCreateTime(entity.auditInfo().createTime()) + .withLastModifier( + PrincipalUtils.getCurrentPrincipal().getName()) + .withLastModifiedTime(Instant.now()) + .build()) + .build()), + "UPDATE", + combinedTable.tableFromGravitino().id())); + } } diff --git a/core/src/main/java/org/apache/gravitino/credential/CatalogCredentialContext.java b/core/src/main/java/org/apache/gravitino/credential/CatalogCredentialContext.java new file mode 100644 index 00000000000..a39dbba01bd --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/credential/CatalogCredentialContext.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.credential; + +import com.google.common.base.Preconditions; +import javax.validation.constraints.NotNull; + +/** CatalogCredentialContext is generated when user requesting catalog credentials. */ +public class CatalogCredentialContext implements CredentialContext { + @NotNull private final String userName; + + public CatalogCredentialContext(String userName) { + Preconditions.checkNotNull(userName, "User name should not be null"); + this.userName = userName; + } + + @Override + public String getUserName() { + return userName; + } +} diff --git a/core/src/main/java/org/apache/gravitino/credential/CredentialContext.java b/core/src/main/java/org/apache/gravitino/credential/CredentialContext.java new file mode 100644 index 00000000000..6e82efea0f1 --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/credential/CredentialContext.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.credential; + +/** Contains credential context information to get credential from a credential provider. */ +public interface CredentialContext { + /** + * Providing the username. + * + * @return A string identifying user name. + */ + String getUserName(); +} diff --git a/core/src/main/java/org/apache/gravitino/credential/CredentialProvider.java b/core/src/main/java/org/apache/gravitino/credential/CredentialProvider.java new file mode 100644 index 00000000000..4056cd00b1b --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/credential/CredentialProvider.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.credential; + +import java.io.Closeable; +import java.util.Map; +import javax.annotation.Nullable; + +/** + * Interface for credential providers. + * + *

A credential provider is responsible for managing and retrieving credentials. + */ +public interface CredentialProvider extends Closeable { + /** + * Initializes the credential provider with catalog properties. + * + * @param properties catalog properties that can be used to configure the provider. The specific + * properties required vary by implementation. + */ + void initialize(Map properties); + + /** + * Returns the type of credential, it should be identical in Gravitino. + * + * @return A string identifying the type of credentials. + */ + String credentialType(); + + /** + * Obtains a credential based on the provided context information. + * + * @param context A context object providing necessary information for retrieving credentials. + * @return A Credential object containing the authentication information needed to access a system + * or resource. Null will be returned if no credential is available. + */ + @Nullable + Credential getCredential(CredentialContext context); +} diff --git a/core/src/main/java/org/apache/gravitino/credential/CredentialProviderFactory.java b/core/src/main/java/org/apache/gravitino/credential/CredentialProviderFactory.java new file mode 100644 index 00000000000..3833eeda9bf --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/credential/CredentialProviderFactory.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.credential; + +import com.google.common.collect.Iterables; +import com.google.common.collect.Streams; +import java.util.List; +import java.util.Map; +import java.util.ServiceLoader; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class CredentialProviderFactory { + private static final Logger LOG = LoggerFactory.getLogger(CredentialProviderFactory.class); + + public static CredentialProvider create( + String credentialType, Map catalogProperties) { + Class providerClz = lookupCredentialProvider(credentialType); + try { + CredentialProvider provider = providerClz.getDeclaredConstructor().newInstance(); + provider.initialize(catalogProperties); + return provider; + } catch (Exception e) { + LOG.warn("Create credential provider failed, {}", credentialType, e); + throw new RuntimeException(e); + } + } + + private static Class lookupCredentialProvider( + String credentialType) { + ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); + ServiceLoader serviceLoader = + ServiceLoader.load(CredentialProvider.class, classLoader); + List> providers = + Streams.stream(serviceLoader.iterator()) + .filter( + credentialProvider -> + credentialType.equalsIgnoreCase(credentialProvider.credentialType())) + .map(CredentialProvider::getClass) + .collect(Collectors.toList()); + + if (providers.isEmpty()) { + throw new IllegalArgumentException("No credential provider found for: " + credentialType); + } else if (providers.size() > 1) { + throw new IllegalArgumentException( + "Multiple credential providers found for: " + credentialType); + } else { + return Iterables.getOnlyElement(providers); + } + } +} diff --git a/core/src/main/java/org/apache/gravitino/credential/PathBasedCredentialContext.java b/core/src/main/java/org/apache/gravitino/credential/PathBasedCredentialContext.java new file mode 100644 index 00000000000..03e7bbe0e31 --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/credential/PathBasedCredentialContext.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.credential; + +import com.google.common.base.Preconditions; +import java.util.Set; +import javax.validation.constraints.NotNull; + +/** + * LocationContext is generated when user requesting resources associated with storage location like + * table, fileset, etc. + */ +public class PathBasedCredentialContext implements CredentialContext { + + @NotNull private final Set writePaths; + @NotNull private final Set readPaths; + @NotNull private final String userName; + + public PathBasedCredentialContext( + String userName, Set writePaths, Set readPaths) { + Preconditions.checkNotNull(userName, "User name should not be null"); + Preconditions.checkNotNull(writePaths, "Write paths should not be null"); + Preconditions.checkNotNull(readPaths, "Read paths should not be null"); + this.userName = userName; + this.writePaths = writePaths; + this.readPaths = readPaths; + } + + @Override + public String getUserName() { + return userName; + } + + public Set getWritePaths() { + return writePaths; + } + + public Set getReadPaths() { + return readPaths; + } +} diff --git a/core/src/main/java/org/apache/gravitino/hook/AccessControlHookDispatcher.java b/core/src/main/java/org/apache/gravitino/hook/AccessControlHookDispatcher.java index 36f75cfad15..125df0b2e18 100644 --- a/core/src/main/java/org/apache/gravitino/hook/AccessControlHookDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/hook/AccessControlHookDispatcher.java @@ -145,6 +145,9 @@ public Role createRole( Map properties, List securableObjects) throws RoleAlreadyExistsException, NoSuchMetalakeException { + // Check whether the current user exists or not + AuthorizationUtils.checkCurrentUser(metalake, PrincipalUtils.getCurrentUserName()); + Role createdRole = dispatcher.createRole(metalake, role, properties, securableObjects); // Set the creator as the owner of role. diff --git a/core/src/main/java/org/apache/gravitino/hook/CatalogHookDispatcher.java b/core/src/main/java/org/apache/gravitino/hook/CatalogHookDispatcher.java index 86b42dea30f..7a9989f4edf 100644 --- a/core/src/main/java/org/apache/gravitino/hook/CatalogHookDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/hook/CatalogHookDispatcher.java @@ -25,6 +25,7 @@ import org.apache.gravitino.GravitinoEnv; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.Namespace; +import org.apache.gravitino.authorization.AuthorizationUtils; import org.apache.gravitino.authorization.FutureGrantManager; import org.apache.gravitino.authorization.Owner; import org.apache.gravitino.authorization.OwnerManager; @@ -71,6 +72,10 @@ public Catalog createCatalog( String comment, Map properties) throws NoSuchMetalakeException, CatalogAlreadyExistsException { + // Check whether the current user exists or not + AuthorizationUtils.checkCurrentUser( + ident.namespace().level(0), PrincipalUtils.getCurrentUserName()); + Catalog catalog = dispatcher.createCatalog(ident, type, provider, comment, properties); // Set the creator as the owner of the catalog. diff --git a/core/src/main/java/org/apache/gravitino/hook/FilesetHookDispatcher.java b/core/src/main/java/org/apache/gravitino/hook/FilesetHookDispatcher.java index e7780135588..e3272846df5 100644 --- a/core/src/main/java/org/apache/gravitino/hook/FilesetHookDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/hook/FilesetHookDispatcher.java @@ -23,6 +23,7 @@ import org.apache.gravitino.GravitinoEnv; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.Namespace; +import org.apache.gravitino.authorization.AuthorizationUtils; import org.apache.gravitino.authorization.Owner; import org.apache.gravitino.authorization.OwnerManager; import org.apache.gravitino.catalog.FilesetDispatcher; @@ -64,6 +65,10 @@ public Fileset createFileset( String storageLocation, Map properties) throws NoSuchSchemaException, FilesetAlreadyExistsException { + // Check whether the current user exists or not + AuthorizationUtils.checkCurrentUser( + ident.namespace().level(0), PrincipalUtils.getCurrentUserName()); + Fileset fileset = dispatcher.createFileset(ident, comment, type, storageLocation, properties); // Set the creator as the owner of the fileset. diff --git a/core/src/main/java/org/apache/gravitino/hook/SchemaHookDispatcher.java b/core/src/main/java/org/apache/gravitino/hook/SchemaHookDispatcher.java index d9bcf04175f..8b53f6e6d4f 100644 --- a/core/src/main/java/org/apache/gravitino/hook/SchemaHookDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/hook/SchemaHookDispatcher.java @@ -25,6 +25,7 @@ import org.apache.gravitino.Namespace; import org.apache.gravitino.Schema; import org.apache.gravitino.SchemaChange; +import org.apache.gravitino.authorization.AuthorizationUtils; import org.apache.gravitino.authorization.Owner; import org.apache.gravitino.authorization.OwnerManager; import org.apache.gravitino.catalog.SchemaDispatcher; @@ -55,6 +56,10 @@ public NameIdentifier[] listSchemas(Namespace namespace) throws NoSuchCatalogExc @Override public Schema createSchema(NameIdentifier ident, String comment, Map properties) throws NoSuchCatalogException, SchemaAlreadyExistsException { + // Check whether the current user exists or not + AuthorizationUtils.checkCurrentUser( + ident.namespace().level(0), PrincipalUtils.getCurrentUserName()); + Schema schema = dispatcher.createSchema(ident, comment, properties); // Set the creator as the owner of the schema. diff --git a/core/src/main/java/org/apache/gravitino/hook/TableHookDispatcher.java b/core/src/main/java/org/apache/gravitino/hook/TableHookDispatcher.java index 3a39f0a9d9f..c887746b48c 100644 --- a/core/src/main/java/org/apache/gravitino/hook/TableHookDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/hook/TableHookDispatcher.java @@ -23,6 +23,7 @@ import org.apache.gravitino.GravitinoEnv; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.Namespace; +import org.apache.gravitino.authorization.AuthorizationUtils; import org.apache.gravitino.authorization.Owner; import org.apache.gravitino.authorization.OwnerManager; import org.apache.gravitino.catalog.TableDispatcher; @@ -72,6 +73,10 @@ public Table createTable( SortOrder[] sortOrders, Index[] indexes) throws NoSuchSchemaException, TableAlreadyExistsException { + // Check whether the current user exists or not + AuthorizationUtils.checkCurrentUser( + ident.namespace().level(0), PrincipalUtils.getCurrentUserName()); + Table table = dispatcher.createTable( ident, columns, comment, properties, partitions, distribution, sortOrders, indexes); diff --git a/core/src/main/java/org/apache/gravitino/hook/TopicHookDispatcher.java b/core/src/main/java/org/apache/gravitino/hook/TopicHookDispatcher.java index c36e58e6f05..ad0ec8c58ab 100644 --- a/core/src/main/java/org/apache/gravitino/hook/TopicHookDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/hook/TopicHookDispatcher.java @@ -23,6 +23,7 @@ import org.apache.gravitino.GravitinoEnv; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.Namespace; +import org.apache.gravitino.authorization.AuthorizationUtils; import org.apache.gravitino.authorization.Owner; import org.apache.gravitino.authorization.OwnerManager; import org.apache.gravitino.catalog.TopicDispatcher; @@ -61,6 +62,10 @@ public Topic loadTopic(NameIdentifier ident) throws NoSuchTopicException { public Topic createTopic( NameIdentifier ident, String comment, DataLayout dataLayout, Map properties) throws NoSuchSchemaException, TopicAlreadyExistsException { + // Check whether the current user exists or not + AuthorizationUtils.checkCurrentUser( + ident.namespace().level(0), PrincipalUtils.getCurrentUserName()); + Topic topic = dispatcher.createTopic(ident, comment, dataLayout, properties); // Set the creator as the owner of the topic. diff --git a/core/src/main/java/org/apache/gravitino/listener/AsyncQueueListener.java b/core/src/main/java/org/apache/gravitino/listener/AsyncQueueListener.java index 641bc3eb532..18043964ddd 100644 --- a/core/src/main/java/org/apache/gravitino/listener/AsyncQueueListener.java +++ b/core/src/main/java/org/apache/gravitino/listener/AsyncQueueListener.java @@ -29,7 +29,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import org.apache.gravitino.listener.api.EventListenerPlugin; +import org.apache.gravitino.listener.api.event.BaseEvent; import org.apache.gravitino.listener.api.event.Event; +import org.apache.gravitino.listener.api.event.PreEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,7 +46,7 @@ public class AsyncQueueListener implements EventListenerPlugin { private static final String NAME_PREFIX = "async-queue-listener-"; private final List eventListeners; - private final BlockingQueue queue; + private final BlockingQueue queue; private final Thread asyncProcessor; private final int dispatcherJoinSeconds; private final AtomicBoolean stopped = new AtomicBoolean(false); @@ -68,20 +70,13 @@ public AsyncQueueListener( } @Override - public void onPostEvent(Event event) { - if (stopped.get()) { - LOG.warn( - "{} drop event: {}, since AsyncQueueListener is stopped", - asyncQueueListenerName, - event.getClass().getSimpleName()); - return; - } - - if (queue.offer(event)) { - return; - } + public void onPreEvent(PreEvent event) { + enqueueEvent(event); + } - logDropEventsIfNecessary(); + @Override + public void onPostEvent(Event event) { + enqueueEvent(event); } @Override @@ -117,8 +112,14 @@ List getEventListeners() { private void processEvents() { while (!Thread.currentThread().isInterrupted()) { try { - Event event = queue.take(); - this.eventListeners.forEach(listener -> listener.onPostEvent(event)); + BaseEvent baseEvent = queue.take(); + if (baseEvent instanceof PreEvent) { + this.eventListeners.forEach(listener -> listener.onPreEvent((PreEvent) baseEvent)); + } else if (baseEvent instanceof Event) { + this.eventListeners.forEach(listener -> listener.onPostEvent((Event) baseEvent)); + } else { + LOG.warn("Unknown event type: {}", baseEvent.getClass().getSimpleName()); + } } catch (InterruptedException e) { LOG.warn("{} event dispatcher thread is interrupted.", asyncQueueListenerName); break; @@ -154,4 +155,20 @@ private void logDropEventsIfNecessary() { } } } + + private void enqueueEvent(BaseEvent baseEvent) { + if (stopped.get()) { + LOG.warn( + "{} drop event: {}, since AsyncQueueListener is stopped", + asyncQueueListenerName, + baseEvent.getClass().getSimpleName()); + return; + } + + if (queue.offer(baseEvent)) { + return; + } + + logDropEventsIfNecessary(); + } } diff --git a/core/src/main/java/org/apache/gravitino/listener/EventBus.java b/core/src/main/java/org/apache/gravitino/listener/EventBus.java index 6b18f9a5aca..d851dc29271 100644 --- a/core/src/main/java/org/apache/gravitino/listener/EventBus.java +++ b/core/src/main/java/org/apache/gravitino/listener/EventBus.java @@ -21,8 +21,11 @@ import com.google.common.annotations.VisibleForTesting; import java.util.List; +import org.apache.gravitino.exceptions.ForbiddenException; import org.apache.gravitino.listener.api.EventListenerPlugin; +import org.apache.gravitino.listener.api.event.BaseEvent; import org.apache.gravitino.listener.api.event.Event; +import org.apache.gravitino.listener.api.event.PreEvent; /** * The {@code EventBus} class serves as a mechanism to dispatch events to registered listeners. It @@ -34,26 +37,32 @@ public class EventBus { // EventListenerPluginWrapper, // which are meant for synchronous event listening, or AsyncQueueListener, designed for // asynchronous event processing. - private final List postEventListeners; + private final List eventListeners; /** * Constructs an EventBus with a predefined list of event listeners. * - * @param postEventListeners A list of {@link EventListenerPlugin} instances that are to be - * registered with this EventBus for event dispatch. + * @param eventListeners A list of {@link EventListenerPlugin} instances that are to be registered + * with this EventBus for event dispatch. */ - public EventBus(List postEventListeners) { - this.postEventListeners = postEventListeners; + public EventBus(List eventListeners) { + this.eventListeners = eventListeners; } /** * Dispatches an event to all registered listeners. Each listener processes the event based on its * implementation, which could be either synchronous or asynchronous. * - * @param event The event to be dispatched to all registered listeners. + * @param baseEvent The event to be dispatched to all registered listeners. */ - public void dispatchEvent(Event event) { - postEventListeners.forEach(postEventListener -> postEventListener.onPostEvent(event)); + public void dispatchEvent(BaseEvent baseEvent) { + if (baseEvent instanceof PreEvent) { + dispatchPreEvent((PreEvent) baseEvent); + } else if (baseEvent instanceof Event) { + dispatchPostEvent((Event) baseEvent); + } else { + throw new RuntimeException("Unknown event type:" + baseEvent.getClass().getSimpleName()); + } } /** @@ -64,7 +73,15 @@ public void dispatchEvent(Event event) { * EventBus. */ @VisibleForTesting - List getPostEventListeners() { - return postEventListeners; + List getEventListeners() { + return eventListeners; + } + + private void dispatchPostEvent(Event postEvent) { + eventListeners.forEach(eventListener -> eventListener.onPostEvent(postEvent)); + } + + private void dispatchPreEvent(PreEvent preEvent) throws ForbiddenException { + eventListeners.forEach(eventListener -> eventListener.onPreEvent(preEvent)); } } diff --git a/core/src/main/java/org/apache/gravitino/listener/EventListenerPluginWrapper.java b/core/src/main/java/org/apache/gravitino/listener/EventListenerPluginWrapper.java index a1483396384..8e0a2ffbc45 100644 --- a/core/src/main/java/org/apache/gravitino/listener/EventListenerPluginWrapper.java +++ b/core/src/main/java/org/apache/gravitino/listener/EventListenerPluginWrapper.java @@ -21,8 +21,11 @@ import com.google.common.annotations.VisibleForTesting; import java.util.Map; +import org.apache.gravitino.exceptions.ForbiddenException; import org.apache.gravitino.listener.api.EventListenerPlugin; +import org.apache.gravitino.listener.api.event.BaseEvent; import org.apache.gravitino.listener.api.event.Event; +import org.apache.gravitino.listener.api.event.PreEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -67,11 +70,27 @@ public void onPostEvent(Event event) { try { userEventListener.onPostEvent(event); } catch (Exception e) { - LOG.warn( - "Event listener {} process event {} failed,", - listenerName, - event.getClass().getSimpleName(), - e); + printExceptionInEventProcess(listenerName, event, e); + } + } + + @Override + public void onPreEvent(PreEvent preEvent) { + try { + userEventListener.onPreEvent(preEvent); + } catch (ForbiddenException e) { + if (Mode.SYNC.equals(mode())) { + LOG.warn( + "Event listener {} process pre event {} throws ForbiddenException, will skip the " + + "operation.", + listenerName, + preEvent.getClass().getSimpleName(), + e); + throw e; + } + printExceptionInEventProcess(listenerName, preEvent, e); + } catch (Exception e) { + printExceptionInEventProcess(listenerName, preEvent, e); } } @@ -79,4 +98,12 @@ public void onPostEvent(Event event) { EventListenerPlugin getUserEventListener() { return userEventListener; } + + private void printExceptionInEventProcess(String listenerName, BaseEvent baseEvent, Exception e) { + LOG.warn( + "Event listener {} process event {} failed,", + listenerName, + baseEvent.getClass().getSimpleName(), + e); + } } diff --git a/core/src/main/java/org/apache/gravitino/listener/api/EventListenerPlugin.java b/core/src/main/java/org/apache/gravitino/listener/api/EventListenerPlugin.java index 8a0b8d98286..06d5b444019 100644 --- a/core/src/main/java/org/apache/gravitino/listener/api/EventListenerPlugin.java +++ b/core/src/main/java/org/apache/gravitino/listener/api/EventListenerPlugin.java @@ -21,7 +21,9 @@ import java.util.Map; import org.apache.gravitino.annotation.DeveloperApi; +import org.apache.gravitino.exceptions.ForbiddenException; import org.apache.gravitino.listener.api.event.Event; +import org.apache.gravitino.listener.api.event.PreEvent; /** * Defines an interface for event listeners that manage the lifecycle and state of a plugin, @@ -95,17 +97,29 @@ enum Mode { void stop() throws RuntimeException; /** - * Handles events generated after the completion of an operation. Implementers are responsible for - * processing these events, which may involve additional logic to respond to the operation - * outcomes. + * Handle post-events generated after the completion of an operation. * - *

This method provides a hook for post-operation event processing, allowing plugins to react - * or adapt based on the event details. + *

This method provides a hook for post-operation event processing, you couldn't change the + * resource in the event. * - * @param event The event to be processed. - * @throws RuntimeException Indicates issues encountered during event processing. + * @param postEvent The post event to be processed. + * @throws RuntimeException Indicates issues encountered during event processing, this has no + * affect to the operation. */ - void onPostEvent(Event event) throws RuntimeException; + default void onPostEvent(Event postEvent) throws RuntimeException {} + + /** + * Handle pre-events generated before the operation. + * + *

This method handles pre-operation events in SYNC or ASYNC mode, any changes to resources in + * the event will affect the subsequent operations. + * + * @param preEvent The pre event to be processed. + * @throws ForbiddenException The subsequent operation will be skipped if and only if the event + * listener throwing {@code org.apache.gravitino.exceptions.ForbiddenException} and the event + * listener is SYNC mode, the exception will be ignored and logged only in other conditions. + */ + default void onPreEvent(PreEvent preEvent) throws ForbiddenException {} /** * Specifies the default operational mode for event processing by the plugin. The default diff --git a/core/src/main/java/org/apache/gravitino/listener/api/event/BaseEvent.java b/core/src/main/java/org/apache/gravitino/listener/api/event/BaseEvent.java new file mode 100644 index 00000000000..973323a0591 --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/listener/api/event/BaseEvent.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.listener.api.event; + +import javax.annotation.Nullable; +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.annotation.DeveloperApi; + +/** + * The abstract base class for all events. It encapsulates common information such as the user who + * generated the event and the identifier for the resource associated with the event. Subclasses + * should provide specific details related to their individual event types. + */ +@DeveloperApi +public abstract class BaseEvent { + private final String user; + @Nullable private final NameIdentifier identifier; + private final long eventTime; + + /** + * Constructs an Event instance with the specified user and resource identifier details. + * + * @param user The user associated with this event. It provides context about who triggered the + * event. + * @param identifier The resource identifier associated with this event. This may refer to various + * types of resources such as a metalake, catalog, schema, or table, etc. + */ + protected BaseEvent(String user, NameIdentifier identifier) { + this.user = user; + this.identifier = identifier; + this.eventTime = System.currentTimeMillis(); + } + + /** + * Retrieves the user associated with this event. + * + * @return A string representing the user associated with this event. + */ + public String user() { + return user; + } + + /** + * Retrieves the resource identifier associated with this event. + * + *

For list operations within a namespace, the identifier is the identifier corresponds to that + * namespace. For metalake list operation, identifier is null. + * + * @return A NameIdentifier object that represents the resource, like a metalake, catalog, schema, + * table, etc., associated with the event. + */ + @Nullable + public NameIdentifier identifier() { + return identifier; + } + + /** + * Returns the timestamp when the event was created. + * + * @return The event creation time in milliseconds since epoch. + */ + public long eventTime() { + return eventTime; + } +} diff --git a/core/src/main/java/org/apache/gravitino/listener/api/event/Event.java b/core/src/main/java/org/apache/gravitino/listener/api/event/Event.java index 89e233b430e..7dba616d42b 100644 --- a/core/src/main/java/org/apache/gravitino/listener/api/event/Event.java +++ b/core/src/main/java/org/apache/gravitino/listener/api/event/Event.java @@ -19,64 +19,13 @@ package org.apache.gravitino.listener.api.event; -import javax.annotation.Nullable; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.annotation.DeveloperApi; -/** - * The abstract base class for all events. It encapsulates common information such as the user who - * generated the event and the identifier for the resource associated with the event. Subclasses - * should provide specific details related to their individual event types. - */ +/** Represents a post event. */ @DeveloperApi -public abstract class Event { - private final String user; - @Nullable private final NameIdentifier identifier; - private final long eventTime; - - /** - * Constructs an Event instance with the specified user and resource identifier details. - * - * @param user The user associated with this event. It provides context about who triggered the - * event. - * @param identifier The resource identifier associated with this event. This may refer to various - * types of resources such as a metalake, catalog, schema, or table, etc. - */ +public abstract class Event extends BaseEvent { protected Event(String user, NameIdentifier identifier) { - this.user = user; - this.identifier = identifier; - this.eventTime = System.currentTimeMillis(); - } - - /** - * Retrieves the user associated with this event. - * - * @return A string representing the user associated with this event. - */ - public String user() { - return user; - } - - /** - * Retrieves the resource identifier associated with this event. - * - *

For list operations within a namespace, the identifier is the identifier corresponds to that - * namespace. For metalake list operation, identifier is null. - * - * @return A NameIdentifier object that represents the resource, like a metalake, catalog, schema, - * table, etc., associated with the event. - */ - @Nullable - public NameIdentifier identifier() { - return identifier; - } - - /** - * Returns the timestamp when the event was created. - * - * @return The event creation time in milliseconds since epoch. - */ - public long eventTime() { - return eventTime; + super(user, identifier); } } diff --git a/core/src/main/java/org/apache/gravitino/listener/api/event/PreEvent.java b/core/src/main/java/org/apache/gravitino/listener/api/event/PreEvent.java new file mode 100644 index 00000000000..52e26aec346 --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/listener/api/event/PreEvent.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.listener.api.event; + +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.annotation.DeveloperApi; + +/** Represents a pre event. */ +@DeveloperApi +public abstract class PreEvent extends BaseEvent { + protected PreEvent(String user, NameIdentifier identifier) { + super(user, identifier); + } +} diff --git a/core/src/main/java/org/apache/gravitino/meta/ColumnEntity.java b/core/src/main/java/org/apache/gravitino/meta/ColumnEntity.java index eb56636178b..37904426013 100644 --- a/core/src/main/java/org/apache/gravitino/meta/ColumnEntity.java +++ b/core/src/main/java/org/apache/gravitino/meta/ColumnEntity.java @@ -28,6 +28,7 @@ import org.apache.gravitino.Auditable; import org.apache.gravitino.Entity; import org.apache.gravitino.Field; +import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.expressions.Expression; import org.apache.gravitino.rel.types.Type; @@ -149,6 +150,19 @@ public static Builder builder() { return new Builder(); } + public static ColumnEntity toColumnEntity(Column column, long uid, AuditInfo audit) { + return builder() + .withId(uid) + .withName(column.name()) + .withComment(column.comment()) + .withDataType(column.dataType()) + .withNullable(column.nullable()) + .withAutoIncrement(column.autoIncrement()) + .withDefaultValue(column.defaultValue()) + .withAuditInfo(audit) + .build(); + } + public static class Builder { private final ColumnEntity columnEntity; @@ -198,6 +212,11 @@ public Builder withAuditInfo(AuditInfo auditInfo) { public ColumnEntity build() { columnEntity.validate(); + + if (columnEntity.defaultValue == null) { + columnEntity.defaultValue = Column.DEFAULT_VALUE_NOT_SET; + } + return columnEntity; } } diff --git a/core/src/main/java/org/apache/gravitino/meta/TableEntity.java b/core/src/main/java/org/apache/gravitino/meta/TableEntity.java index e5a66277031..197eed6d33d 100644 --- a/core/src/main/java/org/apache/gravitino/meta/TableEntity.java +++ b/core/src/main/java/org/apache/gravitino/meta/TableEntity.java @@ -20,7 +20,8 @@ import com.google.common.base.Objects; import com.google.common.collect.Maps; -import java.util.Arrays; +import java.util.Collections; +import java.util.List; import java.util.Map; import lombok.ToString; import org.apache.gravitino.Auditable; @@ -38,7 +39,7 @@ public class TableEntity implements Entity, Auditable, HasIdentifier { public static final Field AUDIT_INFO = Field.required("audit_info", AuditInfo.class, "The audit details of the table"); public static final Field COLUMNS = - Field.optional("columns", ColumnEntity[].class, "The columns of the table"); + Field.optional("columns", List.class, "The columns of the table"); private Long id; @@ -48,7 +49,7 @@ public class TableEntity implements Entity, Auditable, HasIdentifier { private Namespace namespace; - private ColumnEntity[] columns; + private List columns; /** * Returns a map of the fields and their corresponding values for this table. @@ -116,7 +117,7 @@ public Namespace namespace() { return namespace; } - public ColumnEntity[] columns() { + public List columns() { return columns; } @@ -134,12 +135,12 @@ public boolean equals(Object o) { && Objects.equal(name, baseTable.name) && Objects.equal(namespace, baseTable.namespace) && Objects.equal(auditInfo, baseTable.auditInfo) - && Arrays.equals(columns, baseTable.columns); + && Objects.equal(columns, baseTable.columns); } @Override public int hashCode() { - return Objects.hashCode(id, name, auditInfo, Arrays.hashCode(columns)); + return Objects.hashCode(id, name, auditInfo, columns); } public static class Builder { @@ -170,13 +171,18 @@ public Builder withNamespace(Namespace namespace) { return this; } - public Builder withColumns(ColumnEntity[] columns) { + public Builder withColumns(List columns) { tableEntity.columns = columns; return this; } public TableEntity build() { tableEntity.validate(); + + if (tableEntity.columns == null) { + tableEntity.columns = Collections.emptyList(); + } + return tableEntity; } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/JDBCBackend.java b/core/src/main/java/org/apache/gravitino/storage/relational/JDBCBackend.java index 1ed13855572..5a6eb2e09a7 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/JDBCBackend.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/JDBCBackend.java @@ -57,6 +57,7 @@ import org.apache.gravitino.storage.relational.service.OwnerMetaService; import org.apache.gravitino.storage.relational.service.RoleMetaService; import org.apache.gravitino.storage.relational.service.SchemaMetaService; +import org.apache.gravitino.storage.relational.service.TableColumnMetaService; import org.apache.gravitino.storage.relational.service.TableMetaService; import org.apache.gravitino.storage.relational.service.TagMetaService; import org.apache.gravitino.storage.relational.service.TopicMetaService; @@ -292,6 +293,8 @@ public int hardDeleteLegacyData(Entity.EntityType entityType, long legacyTimelin .deleteTagMetasByLegacyTimeline( legacyTimeline, GARBAGE_COLLECTOR_SINGLE_DELETION_LIMIT); case COLUMN: + return TableColumnMetaService.getInstance() + .deleteColumnsByLegacyTimeline(legacyTimeline, GARBAGE_COLLECTOR_SINGLE_DELETION_LIMIT); case AUDIT: return 0; // TODO: Implement hard delete logic for these entity types. diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/database/H2Database.java b/core/src/main/java/org/apache/gravitino/storage/relational/database/H2Database.java index 4214a9baeda..c2c2bbf63cd 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/database/H2Database.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/database/H2Database.java @@ -58,7 +58,7 @@ public String startH2Database(Config config) { Statement statement = connection.createStatement()) { String sqlContent = FileUtils.readFileToString( - new File(gravitinoHome + "/scripts/h2/schema-0.6.0-h2.sql"), StandardCharsets.UTF_8); + new File(gravitinoHome + "/scripts/h2/schema-0.7.0-h2.sql"), StandardCharsets.UTF_8); statement.execute(sqlContent); } catch (Exception e) { diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnMapper.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnMapper.java new file mode 100644 index 00000000000..2214d8fd359 --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnMapper.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.storage.relational.mapper; + +import java.util.List; +import org.apache.gravitino.storage.relational.po.ColumnPO; +import org.apache.ibatis.annotations.DeleteProvider; +import org.apache.ibatis.annotations.InsertProvider; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.SelectProvider; +import org.apache.ibatis.annotations.UpdateProvider; + +public interface TableColumnMapper { + + String COLUMN_TABLE_NAME = "table_column_version_info"; + + @SelectProvider( + type = TableColumnSQLProviderFactory.class, + method = "listColumnPOsByTableIdAndVersion") + List listColumnPOsByTableIdAndVersion( + @Param("tableId") Long tableId, @Param("tableVersion") Long tableVersion); + + @InsertProvider(type = TableColumnSQLProviderFactory.class, method = "insertColumnPOs") + void insertColumnPOs(@Param("columnPOs") List columnPOs); + + @UpdateProvider(type = TableColumnSQLProviderFactory.class, method = "softDeleteColumnsByTableId") + Integer softDeleteColumnsByTableId(@Param("tableId") Long tableId); + + @UpdateProvider( + type = TableColumnSQLProviderFactory.class, + method = "softDeleteColumnsByMetalakeId") + Integer softDeleteColumnsByMetalakeId(@Param("metalakeId") Long metalakeId); + + @UpdateProvider( + type = TableColumnSQLProviderFactory.class, + method = "softDeleteColumnsByCatalogId") + Integer softDeleteColumnsByCatalogId(@Param("catalogId") Long catalogId); + + @UpdateProvider( + type = TableColumnSQLProviderFactory.class, + method = "softDeleteColumnsBySchemaId") + Integer softDeleteColumnsBySchemaId(@Param("schemaId") Long schemaId); + + @DeleteProvider( + type = TableColumnSQLProviderFactory.class, + method = "deleteColumnPOsByLegacyTimeline") + Integer deleteColumnPOsByLegacyTimeline( + @Param("legacyTimeline") Long legacyTimeline, @Param("limit") int limit); +} diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnSQLProviderFactory.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnSQLProviderFactory.java new file mode 100644 index 00000000000..f85cf72d837 --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnSQLProviderFactory.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.storage.relational.mapper; + +import com.google.common.collect.ImmutableMap; +import java.util.List; +import java.util.Map; +import org.apache.gravitino.storage.relational.JDBCBackend; +import org.apache.gravitino.storage.relational.mapper.provider.base.TableColumnBaseSQLProvider; +import org.apache.gravitino.storage.relational.mapper.provider.postgresql.TableColumnPostgreSQLProvider; +import org.apache.gravitino.storage.relational.po.ColumnPO; +import org.apache.gravitino.storage.relational.session.SqlSessionFactoryHelper; +import org.apache.ibatis.annotations.Param; + +public class TableColumnSQLProviderFactory { + + static class TableColumnH2Provider extends TableColumnBaseSQLProvider {} + + static class TableColumnMySQLProvider extends TableColumnBaseSQLProvider {} + + private static final Map + TABLE_COLUMN_SQL_PROVIDERS = + ImmutableMap.of( + JDBCBackend.JDBCBackendType.MYSQL, new TableColumnMySQLProvider(), + JDBCBackend.JDBCBackendType.H2, new TableColumnH2Provider(), + JDBCBackend.JDBCBackendType.POSTGRESQL, new TableColumnPostgreSQLProvider()); + + public static TableColumnBaseSQLProvider getProvider() { + String databaseId = + SqlSessionFactoryHelper.getInstance() + .getSqlSessionFactory() + .getConfiguration() + .getDatabaseId(); + JDBCBackend.JDBCBackendType jdbcBackendType = + JDBCBackend.JDBCBackendType.fromString(databaseId); + return TABLE_COLUMN_SQL_PROVIDERS.get(jdbcBackendType); + } + + public static String listColumnPOsByTableIdAndVersion( + @Param("tableId") Long tableId, @Param("tableVersion") Long tableVersion) { + return getProvider().listColumnPOsByTableIdAndVersion(tableId, tableVersion); + } + + public static String insertColumnPOs(@Param("columnPOs") List columnPOs) { + return getProvider().insertColumnPOs(columnPOs); + } + + public static String softDeleteColumnsByTableId(@Param("tableId") Long tableId) { + return getProvider().softDeleteColumnsByTableId(tableId); + } + + public static String deleteColumnPOsByLegacyTimeline( + @Param("legacyTimeline") Long legacyTimeline, @Param("limit") int limit) { + return getProvider().deleteColumnPOsByLegacyTimeline(legacyTimeline, limit); + } + + public static String softDeleteColumnsByMetalakeId(@Param("metalakeId") Long metalakeId) { + return getProvider().softDeleteColumnsByMetalakeId(metalakeId); + } + + public static String softDeleteColumnsByCatalogId(@Param("catalogId") Long catalogId) { + return getProvider().softDeleteColumnsByCatalogId(catalogId); + } + + public static String softDeleteColumnsBySchemaId(@Param("schemaId") Long schemaId) { + return getProvider().softDeleteColumnsBySchemaId(schemaId); + } +} diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/TableColumnBaseSQLProvider.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/TableColumnBaseSQLProvider.java new file mode 100644 index 00000000000..0af9889ba6a --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/TableColumnBaseSQLProvider.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.storage.relational.mapper.provider.base; + +import java.util.List; +import org.apache.gravitino.storage.relational.mapper.TableColumnMapper; +import org.apache.gravitino.storage.relational.po.ColumnPO; +import org.apache.ibatis.annotations.Param; + +public class TableColumnBaseSQLProvider { + + public String listColumnPOsByTableIdAndVersion( + @Param("tableId") Long tableId, @Param("tableVersion") Long tableVersion) { + return "SELECT t1.column_id AS columnId, t1.column_name AS columnName," + + " t1.metalake_id AS metalakeId, t1.catalog_id AS catalogId," + + " t1.schema_id AS schemaId, t1.table_id AS tableId," + + " t1.table_version AS tableVersion, t1.column_type AS columnType," + + " t1.column_comment AS columnComment, t1.column_nullable AS nullable," + + " t1.column_auto_increment AS autoIncrement," + + " t1.column_default_value AS defaultValue, t1.column_op_type AS columnOpType," + + " t1.deleted_at AS deletedAt, t1.audit_info AS auditInfo" + + " FROM " + + TableColumnMapper.COLUMN_TABLE_NAME + + " t1 JOIN (" + + " SELECT column_id, MAX(table_version) AS max_table_version" + + " FROM " + + TableColumnMapper.COLUMN_TABLE_NAME + + " WHERE table_id = #{tableId} AND table_version <= #{tableVersion} AND deleted_at = 0" + + " GROUP BY column_id) t2" + + " ON t1.column_id = t2.column_id AND t1.table_version = t2.max_table_version"; + } + + public String insertColumnPOs(@Param("columnPOs") List columnPOs) { + return ""; + } + + public String softDeleteColumnsByTableId(@Param("tableId") Long tableId) { + return "UPDATE " + + TableColumnMapper.COLUMN_TABLE_NAME + + " SET deleted_at = (UNIX_TIMESTAMP() * 1000.0)" + + " + EXTRACT(MICROSECOND FROM CURRENT_TIMESTAMP(3)) / 1000" + + " WHERE table_id = #{tableId} AND deleted_at = 0"; + } + + public String softDeleteColumnsByMetalakeId(@Param("metalakeId") Long metalakeId) { + return "UPDATE " + + TableColumnMapper.COLUMN_TABLE_NAME + + " SET deleted_at = (UNIX_TIMESTAMP() * 1000.0)" + + " + EXTRACT(MICROSECOND FROM CURRENT_TIMESTAMP(3)) / 1000" + + " WHERE metalake_id = #{metalakeId} AND deleted_at = 0"; + } + + public String softDeleteColumnsByCatalogId(@Param("catalogId") Long catalogId) { + return "UPDATE " + + TableColumnMapper.COLUMN_TABLE_NAME + + " SET deleted_at = (UNIX_TIMESTAMP() * 1000.0)" + + " + EXTRACT(MICROSECOND FROM CURRENT_TIMESTAMP(3)) / 1000" + + " WHERE catalog_id = #{catalogId} AND deleted_at = 0"; + } + + public String softDeleteColumnsBySchemaId(@Param("schemaId") Long schemaId) { + return "UPDATE " + + TableColumnMapper.COLUMN_TABLE_NAME + + " SET deleted_at = (UNIX_TIMESTAMP() * 1000.0)" + + " + EXTRACT(MICROSECOND FROM CURRENT_TIMESTAMP(3)) / 1000" + + " WHERE schema_id = #{schemaId} AND deleted_at = 0"; + } + + public String deleteColumnPOsByLegacyTimeline( + @Param("legacyTimeline") Long legacyTimeline, @Param("limit") int limit) { + return "DELETE FROM " + + TableColumnMapper.COLUMN_TABLE_NAME + + " WHERE deleted_at > 0 AND deleted_at < #{legacyTimeline} LIMIT #{limit}"; + } +} diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/TableColumnPostgreSQLProvider.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/TableColumnPostgreSQLProvider.java new file mode 100644 index 00000000000..bf99438c8b2 --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/TableColumnPostgreSQLProvider.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.storage.relational.mapper.provider.postgresql; + +import org.apache.gravitino.storage.relational.mapper.TableColumnMapper; +import org.apache.gravitino.storage.relational.mapper.provider.base.TableColumnBaseSQLProvider; +import org.apache.ibatis.annotations.Param; + +public class TableColumnPostgreSQLProvider extends TableColumnBaseSQLProvider { + + @Override + public String softDeleteColumnsByTableId(@Param("tableId") Long tableId) { + return "UPDATE " + + TableColumnMapper.COLUMN_TABLE_NAME + + " SET deleted_at = floor(extract(epoch from((current_timestamp -" + + " timestamp '1970-01-01 00:00:00')*1000)))" + + " WHERE table_id = #{tableId} AND deleted_at = 0"; + } + + @Override + public String softDeleteColumnsByMetalakeId(@Param("metalakeId") Long metalakeId) { + return "UPDATE " + + TableColumnMapper.COLUMN_TABLE_NAME + + " SET deleted_at = floor(extract(epoch from((current_timestamp -" + + " timestamp '1970-01-01 00:00:00')*1000)))" + + " WHERE metalake_id = #{metalakeId} AND deleted_at = 0"; + } + + @Override + public String softDeleteColumnsByCatalogId(@Param("catalogId") Long catalogId) { + return "UPDATE " + + TableColumnMapper.COLUMN_TABLE_NAME + + " SET deleted_at = floor(extract(epoch from((current_timestamp -" + + " timestamp '1970-01-01 00:00:00')*1000)))" + + " WHERE catalog_id = #{catalogId} AND deleted_at = 0"; + } + + @Override + public String softDeleteColumnsBySchemaId(@Param("schemaId") Long schemaId) { + return "UPDATE " + + TableColumnMapper.COLUMN_TABLE_NAME + + " SET deleted_at = floor(extract(epoch from((current_timestamp -" + + " timestamp '1970-01-01 00:00:00')*1000)))" + + " WHERE schema_id = #{schemaId} AND deleted_at = 0"; + } +} diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/po/ColumnPO.java b/core/src/main/java/org/apache/gravitino/storage/relational/po/ColumnPO.java new file mode 100644 index 00000000000..46c79f97320 --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/storage/relational/po/ColumnPO.java @@ -0,0 +1,269 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.storage.relational.po; + +import com.google.common.base.Preconditions; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import org.apache.commons.lang3.StringUtils; + +@EqualsAndHashCode +@Getter +public class ColumnPO { + + public enum ColumnOpType { + CREATE((byte) 1), + UPDATE((byte) 2), + DELETE((byte) 3); + + private final byte value; + + ColumnOpType(byte value) { + this.value = value; + } + + public byte value() { + return value; + } + } + + public enum Nullable { + TRUE((byte) 0, true), + FALSE((byte) 1, false); + + private final byte value; + + private final boolean nullable; + + Nullable(byte value, boolean nullable) { + this.value = value; + this.nullable = nullable; + } + + public Byte value() { + return value; + } + + public boolean nullable() { + return nullable; + } + + public static Nullable fromValue(byte value) { + for (Nullable nullable : values()) { + if (nullable.value == value) { + return nullable; + } + } + throw new IllegalArgumentException("Invalid nullable value: " + value); + } + + public static Nullable fromBoolean(boolean nullable) { + for (Nullable nullableEnum : values()) { + if (nullableEnum.nullable == nullable) { + return nullableEnum; + } + } + throw new IllegalArgumentException("Invalid nullable boolean value: " + nullable); + } + } + + public enum AutoIncrement { + TRUE((byte) 0, true), + FALSE((byte) 1, false); + + private final byte value; + + private final boolean autoIncrement; + + AutoIncrement(Byte value, boolean autoIncrement) { + this.value = value; + this.autoIncrement = autoIncrement; + } + + public Byte value() { + return value; + } + + public boolean autoIncrement() { + return autoIncrement; + } + + public static AutoIncrement fromValue(byte value) { + for (AutoIncrement autoIncrement : values()) { + if (autoIncrement.value == value) { + return autoIncrement; + } + } + throw new IllegalArgumentException("Invalid auto increment value: " + value); + } + + public static AutoIncrement fromBoolean(boolean autoIncrement) { + for (AutoIncrement autoIncrementEnum : values()) { + if (autoIncrementEnum.autoIncrement == autoIncrement) { + return autoIncrementEnum; + } + } + throw new IllegalArgumentException("Invalid auto increment boolean value: " + autoIncrement); + } + } + + private Long columnId; + + private String columnName; + + private Long metalakeId; + + private Long catalogId; + + private Long schemaId; + + private Long tableId; + + private Long tableVersion; + + private String columnType; + + private String columnComment; + + private Byte nullable; + + private Byte autoIncrement; + + private String defaultValue; + + private Byte columnOpType; + + private Long deletedAt; + + private String auditInfo; + + public static Builder builder() { + return new Builder(); + } + + private ColumnPO() {} + + public static class Builder { + + private final ColumnPO columnPO; + + private Builder() { + columnPO = new ColumnPO(); + } + + public Builder withColumnId(Long columnId) { + columnPO.columnId = columnId; + return this; + } + + public Builder withColumnName(String columnName) { + columnPO.columnName = columnName; + return this; + } + + public Builder withMetalakeId(Long metalakeId) { + columnPO.metalakeId = metalakeId; + return this; + } + + public Builder withCatalogId(Long catalogId) { + columnPO.catalogId = catalogId; + return this; + } + + public Builder withSchemaId(Long schemaId) { + columnPO.schemaId = schemaId; + return this; + } + + public Builder withTableId(Long tableId) { + columnPO.tableId = tableId; + return this; + } + + public Builder withTableVersion(Long tableVersion) { + columnPO.tableVersion = tableVersion; + return this; + } + + public Builder withColumnType(String columnType) { + columnPO.columnType = columnType; + return this; + } + + public Builder withColumnComment(String columnComment) { + columnPO.columnComment = columnComment; + return this; + } + + public Builder withNullable(Byte nullable) { + columnPO.nullable = nullable; + return this; + } + + public Builder withAutoIncrement(Byte autoIncrement) { + columnPO.autoIncrement = autoIncrement; + return this; + } + + public Builder withDefaultValue(String defaultValue) { + columnPO.defaultValue = defaultValue; + return this; + } + + public Builder withColumnOpType(Byte columnOpType) { + columnPO.columnOpType = columnOpType; + return this; + } + + public Builder withDeletedAt(Long deletedAt) { + columnPO.deletedAt = deletedAt; + return this; + } + + public Builder withAuditInfo(String auditInfo) { + columnPO.auditInfo = auditInfo; + return this; + } + + public ColumnPO build() { + Preconditions.checkArgument(columnPO.columnId != null, "Column id is required"); + Preconditions.checkArgument( + StringUtils.isNotBlank(columnPO.columnName), + "Column name is required and cannot be blank"); + Preconditions.checkArgument(columnPO.metalakeId != null, "Metalake id is required"); + Preconditions.checkArgument(columnPO.catalogId != null, "Catalog id is required"); + Preconditions.checkArgument(columnPO.schemaId != null, "Schema id is required"); + Preconditions.checkArgument(columnPO.tableId != null, "Table id is required"); + Preconditions.checkArgument(columnPO.tableVersion != null, "Table version is required"); + Preconditions.checkArgument( + StringUtils.isNotBlank(columnPO.columnType), + "Column type is required and cannot be blank"); + Preconditions.checkArgument(columnPO.nullable != null, "Nullable is required"); + Preconditions.checkArgument(columnPO.autoIncrement != null, "Auto increment is required"); + Preconditions.checkArgument( + columnPO.columnOpType != null, "Column operation type is required"); + Preconditions.checkArgument( + StringUtils.isNotBlank(columnPO.auditInfo), "Audit info is required and cannot be blank"); + Preconditions.checkArgument(columnPO.deletedAt != null, "Deleted at is required"); + + return columnPO; + } + } +} diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/CatalogMetaService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/CatalogMetaService.java index a6d462d4292..6e6e8aaa41f 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/CatalogMetaService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/CatalogMetaService.java @@ -38,6 +38,7 @@ import org.apache.gravitino.storage.relational.mapper.FilesetVersionMapper; import org.apache.gravitino.storage.relational.mapper.OwnerMetaMapper; import org.apache.gravitino.storage.relational.mapper.SchemaMetaMapper; +import org.apache.gravitino.storage.relational.mapper.TableColumnMapper; import org.apache.gravitino.storage.relational.mapper.TableMetaMapper; import org.apache.gravitino.storage.relational.mapper.TopicMetaMapper; import org.apache.gravitino.storage.relational.po.CatalogPO; @@ -211,6 +212,10 @@ public boolean deleteCatalog(NameIdentifier identifier, boolean cascade) { SessionUtils.doWithoutCommit( TableMetaMapper.class, mapper -> mapper.softDeleteTableMetasByCatalogId(catalogId)), + () -> + SessionUtils.doWithoutCommit( + TableColumnMapper.class, + mapper -> mapper.softDeleteColumnsByCatalogId(catalogId)), () -> SessionUtils.doWithoutCommit( FilesetMetaMapper.class, diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/MetalakeMetaService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/MetalakeMetaService.java index d7bdb9d1ff8..dab3ba1dc54 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/MetalakeMetaService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/MetalakeMetaService.java @@ -41,6 +41,7 @@ import org.apache.gravitino.storage.relational.mapper.RoleMetaMapper; import org.apache.gravitino.storage.relational.mapper.SchemaMetaMapper; import org.apache.gravitino.storage.relational.mapper.SecurableObjectMapper; +import org.apache.gravitino.storage.relational.mapper.TableColumnMapper; import org.apache.gravitino.storage.relational.mapper.TableMetaMapper; import org.apache.gravitino.storage.relational.mapper.TagMetaMapper; import org.apache.gravitino.storage.relational.mapper.TagMetadataObjectRelMapper; @@ -191,6 +192,10 @@ public boolean deleteMetalake(NameIdentifier ident, boolean cascade) { SessionUtils.doWithoutCommit( TableMetaMapper.class, mapper -> mapper.softDeleteTableMetasByMetalakeId(metalakeId)), + () -> + SessionUtils.doWithoutCommit( + TableColumnMapper.class, + mapper -> mapper.softDeleteColumnsByMetalakeId(metalakeId)), () -> SessionUtils.doWithoutCommit( FilesetMetaMapper.class, diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/SchemaMetaService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/SchemaMetaService.java index 3e116781162..d2b125f1c36 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/SchemaMetaService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/SchemaMetaService.java @@ -37,6 +37,7 @@ import org.apache.gravitino.storage.relational.mapper.FilesetVersionMapper; import org.apache.gravitino.storage.relational.mapper.OwnerMetaMapper; import org.apache.gravitino.storage.relational.mapper.SchemaMetaMapper; +import org.apache.gravitino.storage.relational.mapper.TableColumnMapper; import org.apache.gravitino.storage.relational.mapper.TableMetaMapper; import org.apache.gravitino.storage.relational.mapper.TopicMetaMapper; import org.apache.gravitino.storage.relational.po.SchemaPO; @@ -197,6 +198,10 @@ public boolean deleteSchema(NameIdentifier identifier, boolean cascade) { SessionUtils.doWithoutCommit( TableMetaMapper.class, mapper -> mapper.softDeleteTableMetasBySchemaId(schemaId)), + () -> + SessionUtils.doWithoutCommit( + TableColumnMapper.class, + mapper -> mapper.softDeleteColumnsBySchemaId(schemaId)), () -> SessionUtils.doWithoutCommit( FilesetMetaMapper.class, diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/TableColumnMetaService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/TableColumnMetaService.java new file mode 100644 index 00000000000..7ec975d45f8 --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/TableColumnMetaService.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.storage.relational.service; + +import com.google.common.collect.Lists; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.gravitino.meta.ColumnEntity; +import org.apache.gravitino.meta.TableEntity; +import org.apache.gravitino.storage.relational.mapper.TableColumnMapper; +import org.apache.gravitino.storage.relational.po.ColumnPO; +import org.apache.gravitino.storage.relational.po.TablePO; +import org.apache.gravitino.storage.relational.utils.POConverters; +import org.apache.gravitino.storage.relational.utils.SessionUtils; + +public class TableColumnMetaService { + + private static final TableColumnMetaService INSTANCE = new TableColumnMetaService(); + + private TableColumnMetaService() {} + + public static TableColumnMetaService getInstance() { + return INSTANCE; + } + + List getColumnsByTableIdAndVersion(Long tableId, Long version) { + List columnPOs = + SessionUtils.getWithoutCommit( + TableColumnMapper.class, + mapper -> mapper.listColumnPOsByTableIdAndVersion(tableId, version)); + + // Filter out the deleted columns + return columnPOs.stream() + .filter(c -> c.getColumnOpType() != ColumnPO.ColumnOpType.DELETE.value()) + .collect(Collectors.toList()); + } + + void insertColumnPOs(TablePO tablePO, List columnEntities) { + List columnPOs = + POConverters.initializeColumnPOs(tablePO, columnEntities, ColumnPO.ColumnOpType.CREATE); + + // insertColumnPOs will be done in insertTable transaction, so we don't do commit here. + SessionUtils.doWithoutCommit( + TableColumnMapper.class, mapper -> mapper.insertColumnPOs(columnPOs)); + } + + boolean deleteColumnsByTableId(Long tableId) { + // deleteColumns will be done in deleteTable transaction, so we don't do commit here. + Integer result = + SessionUtils.doWithCommitAndFetchResult( + TableColumnMapper.class, mapper -> mapper.softDeleteColumnsByTableId(tableId)); + return result > 0; + } + + public int deleteColumnsByLegacyTimeline(Long legacyTimeline, int limit) { + // deleteColumns will be done in the outside transaction, so we don't do commit here. + return SessionUtils.doWithoutCommitAndFetchResult( + TableColumnMapper.class, + mapper -> mapper.deleteColumnPOsByLegacyTimeline(legacyTimeline, limit)); + } + + boolean isColumnUpdated(TableEntity oldTable, TableEntity newTable) { + Map oldColumns = + oldTable.columns() == null + ? Collections.emptyMap() + : oldTable.columns().stream() + .collect(Collectors.toMap(ColumnEntity::id, Function.identity())); + + Map newColumns = + newTable.columns() == null + ? Collections.emptyMap() + : newTable.columns().stream() + .collect(Collectors.toMap(ColumnEntity::id, Function.identity())); + + return oldColumns.size() != newColumns.size() || !oldColumns.equals(newColumns); + } + + void updateColumnPOsFromTableDiff( + TableEntity oldTable, TableEntity newTable, TablePO newTablePO) { + Map oldColumns = + oldTable.columns() == null + ? Collections.emptyMap() + : oldTable.columns().stream() + .collect(Collectors.toMap(ColumnEntity::id, Function.identity())); + Map newColumns = + newTable.columns() == null + ? Collections.emptyMap() + : newTable.columns().stream() + .collect(Collectors.toMap(ColumnEntity::id, Function.identity())); + + List columnPOsToInsert = Lists.newArrayList(); + for (ColumnEntity newColumn : newColumns.values()) { + ColumnEntity oldColumn = oldColumns.get(newColumn.id()); + if (oldColumn == null || !oldColumn.equals(newColumn)) { + columnPOsToInsert.add( + POConverters.initializeColumnPO(newTablePO, newColumn, ColumnPO.ColumnOpType.UPDATE)); + } + } + + for (ColumnEntity oldColumn : oldColumns.values()) { + if (!newColumns.containsKey(oldColumn.id())) { + columnPOsToInsert.add( + POConverters.initializeColumnPO(newTablePO, oldColumn, ColumnPO.ColumnOpType.DELETE)); + } + } + + // If there is no change, directly return + if (columnPOsToInsert.isEmpty()) { + return; + } + + // updateColumns will be done in updateTable transaction, so we don't do commit here. + SessionUtils.doWithoutCommit( + TableColumnMapper.class, mapper -> mapper.insertColumnPOs(columnPOsToInsert)); + } +} diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/TableMetaService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/TableMetaService.java index 87a2df50ee8..ed7afe748de 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/TableMetaService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/TableMetaService.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.util.List; import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import org.apache.gravitino.Entity; import org.apache.gravitino.HasIdentifier; @@ -32,6 +34,7 @@ import org.apache.gravitino.meta.TableEntity; import org.apache.gravitino.storage.relational.mapper.OwnerMetaMapper; import org.apache.gravitino.storage.relational.mapper.TableMetaMapper; +import org.apache.gravitino.storage.relational.po.ColumnPO; import org.apache.gravitino.storage.relational.po.TablePO; import org.apache.gravitino.storage.relational.utils.ExceptionUtils; import org.apache.gravitino.storage.relational.utils.POConverters; @@ -49,21 +52,6 @@ public static TableMetaService getInstance() { private TableMetaService() {} - public TablePO getTablePOBySchemaIdAndName(Long schemaId, String tableName) { - TablePO tablePO = - SessionUtils.getWithoutCommit( - TableMetaMapper.class, - mapper -> mapper.selectTableMetaBySchemaIdAndName(schemaId, tableName)); - - if (tablePO == null) { - throw new NoSuchEntityException( - NoSuchEntityException.NO_SUCH_ENTITY_MESSAGE, - Entity.EntityType.TABLE.name().toLowerCase(), - tableName); - } - return tablePO; - } - // Table may be deleted, so the TablePO may be null. public TablePO getTablePOById(Long tableId) { TablePO tablePO = @@ -94,8 +82,11 @@ public TableEntity getTableByIdentifier(NameIdentifier identifier) { CommonMetaService.getInstance().getParentEntityIdByNamespace(identifier.namespace()); TablePO tablePO = getTablePOBySchemaIdAndName(schemaId, identifier.name()); + List columnPOs = + TableColumnMetaService.getInstance() + .getColumnsByTableIdAndVersion(tablePO.getTableId(), tablePO.getCurrentVersion()); - return POConverters.fromTablePO(tablePO, identifier.namespace()); + return POConverters.fromTableAndColumnPOs(tablePO, columnPOs, identifier.namespace()); } public List listTablesByNamespace(Namespace namespace) { @@ -117,16 +108,34 @@ public void insertTable(TableEntity tableEntity, boolean overwrite) throws IOExc TablePO.Builder builder = TablePO.builder(); fillTablePOBuilderParentEntityId(builder, tableEntity.namespace()); - SessionUtils.doWithCommit( - TableMetaMapper.class, - mapper -> { - TablePO po = POConverters.initializeTablePOWithVersion(tableEntity, builder); + AtomicReference tablePORef = new AtomicReference<>(); + SessionUtils.doMultipleWithCommit( + () -> + SessionUtils.doWithoutCommit( + TableMetaMapper.class, + mapper -> { + TablePO po = POConverters.initializeTablePOWithVersion(tableEntity, builder); + tablePORef.set(po); + if (overwrite) { + mapper.insertTableMetaOnDuplicateKeyUpdate(po); + } else { + mapper.insertTableMeta(po); + } + }), + () -> { + // We need to delete the columns first if we want to overwrite the table. if (overwrite) { - mapper.insertTableMetaOnDuplicateKeyUpdate(po); - } else { - mapper.insertTableMeta(po); + TableColumnMetaService.getInstance() + .deleteColumnsByTableId(tablePORef.get().getTableId()); + } + }, + () -> { + if (tableEntity.columns() != null && !tableEntity.columns().isEmpty()) { + TableColumnMetaService.getInstance() + .insertColumnPOs(tablePORef.get(), tableEntity.columns()); } }); + } catch (RuntimeException re) { ExceptionUtils.checkSQLException( re, Entity.EntityType.TABLE, tableEntity.nameIdentifier().toString()); @@ -144,30 +153,47 @@ public TableEntity updateTable( CommonMetaService.getInstance().getParentEntityIdByNamespace(identifier.namespace()); TablePO oldTablePO = getTablePOBySchemaIdAndName(schemaId, tableName); - TableEntity oldTableEntity = POConverters.fromTablePO(oldTablePO, identifier.namespace()); - TableEntity newEntity = (TableEntity) updater.apply((E) oldTableEntity); + List oldTableColumns = + TableColumnMetaService.getInstance() + .getColumnsByTableIdAndVersion(oldTablePO.getTableId(), oldTablePO.getCurrentVersion()); + TableEntity oldTableEntity = + POConverters.fromTableAndColumnPOs(oldTablePO, oldTableColumns, identifier.namespace()); + + TableEntity newTableEntity = (TableEntity) updater.apply((E) oldTableEntity); Preconditions.checkArgument( - Objects.equals(oldTableEntity.id(), newEntity.id()), + Objects.equals(oldTableEntity.id(), newTableEntity.id()), "The updated table entity id: %s should be same with the table entity id before: %s", - newEntity.id(), + newTableEntity.id(), oldTableEntity.id()); - Integer updateResult; + boolean isColumnChanged = + TableColumnMetaService.getInstance().isColumnUpdated(oldTableEntity, newTableEntity); + TablePO newTablePO = + POConverters.updateTablePOWithVersion(oldTablePO, newTableEntity, isColumnChanged); + + final AtomicInteger updateResult = new AtomicInteger(0); try { - updateResult = - SessionUtils.doWithCommitAndFetchResult( - TableMetaMapper.class, - mapper -> - mapper.updateTableMeta( - POConverters.updateTablePOWithVersion(oldTablePO, newEntity), oldTablePO)); + SessionUtils.doMultipleWithCommit( + () -> + updateResult.set( + SessionUtils.doWithoutCommitAndFetchResult( + TableMetaMapper.class, + mapper -> mapper.updateTableMeta(newTablePO, oldTablePO))), + () -> { + if (updateResult.get() > 0 && isColumnChanged) { + TableColumnMetaService.getInstance() + .updateColumnPOsFromTableDiff(oldTableEntity, newTableEntity, newTablePO); + } + }); + } catch (RuntimeException re) { ExceptionUtils.checkSQLException( - re, Entity.EntityType.TABLE, newEntity.nameIdentifier().toString()); + re, Entity.EntityType.TABLE, newTableEntity.nameIdentifier().toString()); throw re; } - if (updateResult > 0) { - return newEntity; + if (updateResult.get() > 0) { + return newTableEntity; } else { throw new IOException("Failed to update the entity: " + identifier); } @@ -183,26 +209,35 @@ public boolean deleteTable(NameIdentifier identifier) { Long tableId = getTableIdBySchemaIdAndName(schemaId, tableName); + AtomicInteger deleteResult = new AtomicInteger(0); SessionUtils.doMultipleWithCommit( () -> - SessionUtils.doWithoutCommit( - TableMetaMapper.class, mapper -> mapper.softDeleteTableMetasByTableId(tableId)), - () -> + deleteResult.set( + SessionUtils.doWithCommitAndFetchResult( + TableMetaMapper.class, + mapper -> mapper.softDeleteTableMetasByTableId(tableId))), + () -> { + if (deleteResult.get() > 0) { SessionUtils.doWithoutCommit( OwnerMetaMapper.class, mapper -> mapper.softDeleteOwnerRelByMetadataObjectIdAndType( - tableId, MetadataObject.Type.TABLE.name()))); + tableId, MetadataObject.Type.TABLE.name())); + } + }, + () -> { + if (deleteResult.get() > 0) { + TableColumnMetaService.getInstance().deleteColumnsByTableId(tableId); + } + }); - return true; + return deleteResult.get() > 0; } public int deleteTableMetasByLegacyTimeline(Long legacyTimeline, int limit) { return SessionUtils.doWithCommitAndFetchResult( TableMetaMapper.class, - mapper -> { - return mapper.deleteTableMetasByLegacyTimeline(legacyTimeline, limit); - }); + mapper -> mapper.deleteTableMetasByLegacyTimeline(legacyTimeline, limit)); } private void fillTablePOBuilderParentEntityId(TablePO.Builder builder, Namespace namespace) { @@ -229,4 +264,19 @@ private void fillTablePOBuilderParentEntityId(TablePO.Builder builder, Namespace } } } + + private TablePO getTablePOBySchemaIdAndName(Long schemaId, String tableName) { + TablePO tablePO = + SessionUtils.getWithoutCommit( + TableMetaMapper.class, + mapper -> mapper.selectTableMetaBySchemaIdAndName(schemaId, tableName)); + + if (tablePO == null) { + throw new NoSuchEntityException( + NoSuchEntityException.NO_SUCH_ENTITY_MESSAGE, + Entity.EntityType.TABLE.name().toLowerCase(), + tableName); + } + return tablePO; + } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/session/SqlSessionFactoryHelper.java b/core/src/main/java/org/apache/gravitino/storage/relational/session/SqlSessionFactoryHelper.java index 9d928271623..4fe53dba381 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/session/SqlSessionFactoryHelper.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/session/SqlSessionFactoryHelper.java @@ -37,6 +37,7 @@ import org.apache.gravitino.storage.relational.mapper.RoleMetaMapper; import org.apache.gravitino.storage.relational.mapper.SchemaMetaMapper; import org.apache.gravitino.storage.relational.mapper.SecurableObjectMapper; +import org.apache.gravitino.storage.relational.mapper.TableColumnMapper; import org.apache.gravitino.storage.relational.mapper.TableMetaMapper; import org.apache.gravitino.storage.relational.mapper.TagMetaMapper; import org.apache.gravitino.storage.relational.mapper.TagMetadataObjectRelMapper; @@ -110,6 +111,7 @@ public void init(Config config) { configuration.addMapper(CatalogMetaMapper.class); configuration.addMapper(SchemaMetaMapper.class); configuration.addMapper(TableMetaMapper.class); + configuration.addMapper(TableColumnMapper.class); configuration.addMapper(FilesetMetaMapper.class); configuration.addMapper(FilesetVersionMapper.class); configuration.addMapper(TopicMetaMapper.class); diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/utils/POConverters.java b/core/src/main/java/org/apache/gravitino/storage/relational/utils/POConverters.java index f09f6751cf5..dabce09cb2f 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/utils/POConverters.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/utils/POConverters.java @@ -22,6 +22,7 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.google.common.collect.Lists; import java.time.Instant; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -34,11 +35,14 @@ import org.apache.gravitino.authorization.Privileges; import org.apache.gravitino.authorization.SecurableObject; import org.apache.gravitino.authorization.SecurableObjects; +import org.apache.gravitino.dto.rel.expressions.FunctionArg; +import org.apache.gravitino.dto.util.DTOConverters; import org.apache.gravitino.file.Fileset; import org.apache.gravitino.json.JsonUtils; import org.apache.gravitino.meta.AuditInfo; import org.apache.gravitino.meta.BaseMetalake; import org.apache.gravitino.meta.CatalogEntity; +import org.apache.gravitino.meta.ColumnEntity; import org.apache.gravitino.meta.FilesetEntity; import org.apache.gravitino.meta.GroupEntity; import org.apache.gravitino.meta.RoleEntity; @@ -48,7 +52,11 @@ import org.apache.gravitino.meta.TagEntity; import org.apache.gravitino.meta.TopicEntity; import org.apache.gravitino.meta.UserEntity; +import org.apache.gravitino.rel.Column; +import org.apache.gravitino.rel.expressions.Expression; +import org.apache.gravitino.rel.types.Type; import org.apache.gravitino.storage.relational.po.CatalogPO; +import org.apache.gravitino.storage.relational.po.ColumnPO; import org.apache.gravitino.storage.relational.po.ExtendedGroupPO; import org.apache.gravitino.storage.relational.po.ExtendedUserPO; import org.apache.gravitino.storage.relational.po.FilesetPO; @@ -380,12 +388,21 @@ public static TablePO initializeTablePOWithVersion( * * @param oldTablePO the old TablePO object * @param newTable the new TableEntity object + * @param needUpdateVersion whether need to update the version * @return TablePO object with updated version */ - public static TablePO updateTablePOWithVersion(TablePO oldTablePO, TableEntity newTable) { - Long lastVersion = oldTablePO.getLastVersion(); - // Will set the version to the last version + 1 when having some fields need be multiple version - Long nextVersion = lastVersion; + public static TablePO updateTablePOWithVersion( + TablePO oldTablePO, TableEntity newTable, boolean needUpdateVersion) { + Long lastVersion; + Long currentVersion; + if (needUpdateVersion) { + lastVersion = oldTablePO.getLastVersion() + 1; + currentVersion = lastVersion; + } else { + lastVersion = oldTablePO.getLastVersion(); + currentVersion = oldTablePO.getCurrentVersion(); + } + try { return TablePO.builder() .withTableId(oldTablePO.getTableId()) @@ -394,8 +411,8 @@ public static TablePO updateTablePOWithVersion(TablePO oldTablePO, TableEntity n .withCatalogId(oldTablePO.getCatalogId()) .withSchemaId(oldTablePO.getSchemaId()) .withAuditInfo(JsonUtils.anyFieldMapper().writeValueAsString(newTable.auditInfo())) - .withCurrentVersion(nextVersion) - .withLastVersion(nextVersion) + .withCurrentVersion(currentVersion) + .withLastVersion(lastVersion) .withDeletedAt(DEFAULT_DELETED_AT) .build(); } catch (JsonProcessingException e) { @@ -411,11 +428,17 @@ public static TablePO updateTablePOWithVersion(TablePO oldTablePO, TableEntity n * @return TableEntity object from TablePO object */ public static TableEntity fromTablePO(TablePO tablePO, Namespace namespace) { + return fromTableAndColumnPOs(tablePO, Collections.emptyList(), namespace); + } + + public static TableEntity fromTableAndColumnPOs( + TablePO tablePO, List columnPOs, Namespace namespace) { try { return TableEntity.builder() .withId(tablePO.getTableId()) .withName(tablePO.getTableName()) .withNamespace(namespace) + .withColumns(fromColumnPOs(columnPOs)) .withAuditInfo( JsonUtils.anyFieldMapper().readValue(tablePO.getAuditInfo(), AuditInfo.class)) .build(); @@ -424,6 +447,73 @@ public static TableEntity fromTablePO(TablePO tablePO, Namespace namespace) { } } + public static ColumnEntity fromColumnPO(ColumnPO columnPO) { + try { + return ColumnEntity.builder() + .withId(columnPO.getColumnId()) + .withName(columnPO.getColumnName()) + .withDataType(JsonUtils.anyFieldMapper().readValue(columnPO.getColumnType(), Type.class)) + .withComment(columnPO.getColumnComment()) + .withAutoIncrement( + ColumnPO.AutoIncrement.fromValue(columnPO.getAutoIncrement()).autoIncrement()) + .withNullable(ColumnPO.Nullable.fromValue(columnPO.getNullable()).nullable()) + .withDefaultValue( + columnPO.getDefaultValue() == null + ? Column.DEFAULT_VALUE_NOT_SET + : DTOConverters.fromFunctionArg( + (FunctionArg) + JsonUtils.anyFieldMapper() + .readValue(columnPO.getDefaultValue(), Expression.class))) + .withAuditInfo( + JsonUtils.anyFieldMapper().readValue(columnPO.getAuditInfo(), AuditInfo.class)) + .build(); + } catch (JsonProcessingException e) { + throw new RuntimeException("Failed to deserialize json object:", e); + } + } + + public static List fromColumnPOs(List columnPOs) { + return columnPOs.stream().map(POConverters::fromColumnPO).collect(Collectors.toList()); + } + + public static ColumnPO initializeColumnPO( + TablePO tablePO, ColumnEntity columnEntity, ColumnPO.ColumnOpType opType) { + try { + return ColumnPO.builder() + .withColumnId(columnEntity.id()) + .withColumnName(columnEntity.name()) + .withMetalakeId(tablePO.getMetalakeId()) + .withCatalogId(tablePO.getCatalogId()) + .withSchemaId(tablePO.getSchemaId()) + .withTableId(tablePO.getTableId()) + .withTableVersion(tablePO.getCurrentVersion()) + .withColumnType(JsonUtils.anyFieldMapper().writeValueAsString(columnEntity.dataType())) + .withColumnComment(columnEntity.comment()) + .withNullable(ColumnPO.Nullable.fromBoolean(columnEntity.nullable()).value()) + .withAutoIncrement( + ColumnPO.AutoIncrement.fromBoolean(columnEntity.autoIncrement()).value()) + .withDefaultValue( + columnEntity.defaultValue() == null + || columnEntity.defaultValue().equals(Column.DEFAULT_VALUE_NOT_SET) + ? null + : JsonUtils.anyFieldMapper() + .writeValueAsString(DTOConverters.toFunctionArg(columnEntity.defaultValue()))) + .withColumnOpType(opType.value()) + .withAuditInfo(JsonUtils.anyFieldMapper().writeValueAsString(columnEntity.auditInfo())) + .withDeletedAt(DEFAULT_DELETED_AT) + .build(); + } catch (JsonProcessingException e) { + throw new RuntimeException("Failed to serialize json object:", e); + } + } + + public static List initializeColumnPOs( + TablePO tablePO, List columnEntities, ColumnPO.ColumnOpType opType) { + return columnEntities.stream() + .map(columnEntity -> initializeColumnPO(tablePO, columnEntity, opType)) + .collect(Collectors.toList()); + } + /** * Convert list of {@link TablePO} to list of {@link TableEntity} * diff --git a/core/src/test/java/org/apache/gravitino/TestColumn.java b/core/src/test/java/org/apache/gravitino/TestColumn.java index 93af75c0510..7085da6d3fb 100644 --- a/core/src/test/java/org/apache/gravitino/TestColumn.java +++ b/core/src/test/java/org/apache/gravitino/TestColumn.java @@ -40,6 +40,7 @@ protected TestColumn internalBuild() { column.comment = comment; column.dataType = dataType; column.nullable = nullable; + column.autoIncrement = autoIncrement; column.defaultValue = defaultValue; return column; diff --git a/core/src/test/java/org/apache/gravitino/catalog/TestTableNormalizeDispatcher.java b/core/src/test/java/org/apache/gravitino/catalog/TestTableNormalizeDispatcher.java index 2c8938edc65..c45f5cab27d 100644 --- a/core/src/test/java/org/apache/gravitino/catalog/TestTableNormalizeDispatcher.java +++ b/core/src/test/java/org/apache/gravitino/catalog/TestTableNormalizeDispatcher.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.util.Arrays; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import org.apache.gravitino.MetadataObjects; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.Namespace; @@ -178,8 +180,11 @@ public void testNameSpec() { private void assertTableCaseInsensitive( NameIdentifier tableIdent, Column[] expectedColumns, Table table) { Assertions.assertEquals(tableIdent.name().toLowerCase(), table.name()); - Assertions.assertEquals(expectedColumns[0].name().toLowerCase(), table.columns()[0].name()); - Assertions.assertEquals(expectedColumns[1].name().toLowerCase(), table.columns()[1].name()); + Set expectedColumnNames = + Arrays.stream(expectedColumns).map(c -> c.name().toLowerCase()).collect(Collectors.toSet()); + Set actualColumnNames = + Arrays.stream(table.columns()).map(Column::name).collect(Collectors.toSet()); + Assertions.assertEquals(expectedColumnNames, actualColumnNames); Assertions.assertEquals( expectedColumns[0].name().toLowerCase(), table.partitioning()[0].references()[0].fieldName()[0]); diff --git a/core/src/test/java/org/apache/gravitino/catalog/TestTableOperationDispatcher.java b/core/src/test/java/org/apache/gravitino/catalog/TestTableOperationDispatcher.java index 1dd31fd33fb..f31b95e1e78 100644 --- a/core/src/test/java/org/apache/gravitino/catalog/TestTableOperationDispatcher.java +++ b/core/src/test/java/org/apache/gravitino/catalog/TestTableOperationDispatcher.java @@ -38,8 +38,11 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.function.Function; +import java.util.stream.Collectors; import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.gravitino.Config; import org.apache.gravitino.GravitinoEnv; @@ -52,10 +55,12 @@ import org.apache.gravitino.exceptions.NoSuchEntityException; import org.apache.gravitino.lock.LockManager; import org.apache.gravitino.meta.AuditInfo; +import org.apache.gravitino.meta.ColumnEntity; import org.apache.gravitino.meta.TableEntity; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.Table; import org.apache.gravitino.rel.TableChange; +import org.apache.gravitino.rel.expressions.literals.Literals; import org.apache.gravitino.rel.expressions.transforms.Transform; import org.apache.gravitino.rel.types.Types; import org.junit.jupiter.api.Assertions; @@ -345,4 +350,463 @@ public void testCreateTableNeedImportingSchema() throws IOException { Assertions.assertTrue(entityStore.exists(NameIdentifier.of(tableNs.levels()), SCHEMA)); Assertions.assertTrue(entityStore.exists(tableIdent, TABLE)); } + + @Test + public void testCreateAndLoadTableWithColumn() throws IOException { + Namespace tableNs = Namespace.of(metalake, catalog, "schema91"); + Map props = ImmutableMap.of("k1", "v1", "k2", "v2"); + schemaOperationDispatcher.createSchema(NameIdentifier.of(tableNs.levels()), "comment", props); + + NameIdentifier tableIdent = NameIdentifier.of(tableNs, "table41"); + Column[] columns = + new Column[] { + TestColumn.builder() + .withName("col1") + .withType(Types.StringType.get()) + .withComment("comment1") + .withNullable(true) + .withAutoIncrement(true) + .withDefaultValue(Literals.stringLiteral("1")) + .build(), + TestColumn.builder() + .withName("col2") + .withType(Types.StringType.get()) + .withComment("comment2") + .withNullable(false) + .withAutoIncrement(false) + .withDefaultValue(Literals.stringLiteral("2")) + .build() + }; + + Table table1 = + tableOperationDispatcher.createTable( + tableIdent, columns, "comment", props, new Transform[0]); + + Table loadedTable1 = tableOperationDispatcher.loadTable(tableIdent); + Assertions.assertEquals(table1.name(), loadedTable1.name()); + Assertions.assertEquals(table1.comment(), loadedTable1.comment()); + testProperties(table1.properties(), loadedTable1.properties()); + testColumns(columns, loadedTable1.columns()); + + // The columns from table and table entity should be the same after creating. + TableEntity tableEntity = entityStore.get(tableIdent, TABLE, TableEntity.class); + Assertions.assertNotNull(tableEntity); + Assertions.assertEquals("table41", tableEntity.name()); + testColumnAndColumnEntities(columns, tableEntity.columns()); + + // Test if the column from table is not matched with the column from table entity + TestCatalog testCatalog = + (TestCatalog) catalogManager.loadCatalog(NameIdentifier.of(metalake, catalog)); + TestCatalogOperations testCatalogOperations = (TestCatalogOperations) testCatalog.ops(); + + // 1. Update the existing column + Table alteredTable2 = + testCatalogOperations.alterTable( + tableIdent, TableChange.renameColumn(new String[] {"col1"}, "col3")); + Table loadedTable2 = tableOperationDispatcher.loadTable(tableIdent); + testColumns(alteredTable2.columns(), loadedTable2.columns()); + + // columns in table entity should be updated to match the columns in table + TableEntity tableEntity2 = entityStore.get(tableIdent, TABLE, TableEntity.class); + testColumnAndColumnEntities(alteredTable2.columns(), tableEntity2.columns()); + + // 2. Add a new column + Table alteredTable3 = + testCatalogOperations.alterTable( + tableIdent, + TableChange.addColumn( + new String[] {"col4"}, + Types.StringType.get(), + "comment4", + TableChange.ColumnPosition.first(), + true, + true, + Literals.stringLiteral("4"))); + + Table loadedTable3 = tableOperationDispatcher.loadTable(tableIdent); + testColumns(alteredTable3.columns(), loadedTable3.columns()); + + TableEntity tableEntity3 = entityStore.get(tableIdent, TABLE, TableEntity.class); + testColumnAndColumnEntities(alteredTable3.columns(), tableEntity3.columns()); + + // 3. Drop a column + Table alteredTable4 = + testCatalogOperations.alterTable( + tableIdent, TableChange.deleteColumn(new String[] {"col2"}, true)); + Table loadedTable4 = tableOperationDispatcher.loadTable(tableIdent); + testColumns(alteredTable4.columns(), loadedTable4.columns()); + + TableEntity tableEntity4 = entityStore.get(tableIdent, TABLE, TableEntity.class); + testColumnAndColumnEntities(alteredTable4.columns(), tableEntity4.columns()); + + // No column for the table + Table alteredTable5 = + testCatalogOperations.alterTable( + tableIdent, + TableChange.deleteColumn(new String[] {"col3"}, true), + TableChange.deleteColumn(new String[] {"col4"}, true)); + Table loadedTable5 = tableOperationDispatcher.loadTable(tableIdent); + Assertions.assertEquals(0, alteredTable5.columns().length); + Assertions.assertEquals(0, loadedTable5.columns().length); + + TableEntity tableEntity5 = entityStore.get(tableIdent, TABLE, TableEntity.class); + Assertions.assertEquals(0, tableEntity5.columns().size()); + + // Re-add columns to the table + Table alteredTable6 = + testCatalogOperations.alterTable( + tableIdent, + TableChange.addColumn( + new String[] {"col5"}, + Types.StringType.get(), + "comment5", + TableChange.ColumnPosition.first(), + true, + true, + Literals.stringLiteral("5")), + TableChange.addColumn( + new String[] {"col6"}, + Types.StringType.get(), + "comment6", + TableChange.ColumnPosition.first(), + false, + false, + Literals.stringLiteral("2"))); + Table loadedTable6 = tableOperationDispatcher.loadTable(tableIdent); + testColumns(alteredTable6.columns(), loadedTable6.columns()); + + TableEntity tableEntity6 = entityStore.get(tableIdent, TABLE, TableEntity.class); + testColumnAndColumnEntities(alteredTable6.columns(), tableEntity6.columns()); + } + + @Test + public void testCreateAndAlterTableWithColumn() throws IOException { + Namespace tableNs = Namespace.of(metalake, catalog, "schema101"); + Map props = ImmutableMap.of("k1", "v1", "k2", "v2"); + schemaOperationDispatcher.createSchema(NameIdentifier.of(tableNs.levels()), "comment", props); + + NameIdentifier tableIdent = NameIdentifier.of(tableNs, "table51"); + Column[] columns = + new Column[] { + TestColumn.builder() + .withName("col1") + .withType(Types.StringType.get()) + .withComment("comment1") + .withNullable(true) + .withAutoIncrement(true) + .withDefaultValue(Literals.stringLiteral("1")) + .build(), + TestColumn.builder() + .withName("col2") + .withType(Types.StringType.get()) + .withComment("comment2") + .withNullable(false) + .withAutoIncrement(false) + .withDefaultValue(Literals.stringLiteral("2")) + .build() + }; + + Table table1 = + tableOperationDispatcher.createTable( + tableIdent, columns, "comment", props, new Transform[0]); + testColumns(columns, table1.columns()); + + // 1. Rename the column + Table alteredTable1 = + tableOperationDispatcher.alterTable( + tableIdent, TableChange.renameColumn(new String[] {"col1"}, "col3")); + Column[] expectedColumns = + new Column[] { + TestColumn.builder() + .withName("col3") + .withType(Types.StringType.get()) + .withComment("comment1") + .withNullable(true) + .withAutoIncrement(true) + .withDefaultValue(Literals.stringLiteral("1")) + .build(), + TestColumn.builder() + .withName("col2") + .withType(Types.StringType.get()) + .withComment("comment2") + .withNullable(false) + .withAutoIncrement(false) + .withDefaultValue(Literals.stringLiteral("2")) + .build() + }; + testColumns(expectedColumns, alteredTable1.columns()); + + TableEntity tableEntity1 = entityStore.get(tableIdent, TABLE, TableEntity.class); + testColumnAndColumnEntities(expectedColumns, tableEntity1.columns()); + + // 2. Add a new column + Table alteredTable2 = + tableOperationDispatcher.alterTable( + tableIdent, + TableChange.addColumn( + new String[] {"col4"}, + Types.StringType.get(), + "comment4", + TableChange.ColumnPosition.first(), + true, + true, + Literals.stringLiteral("4"))); + Column[] expectedColumns2 = + new Column[] { + TestColumn.builder() + .withName("col4") + .withType(Types.StringType.get()) + .withComment("comment4") + .withNullable(true) + .withAutoIncrement(true) + .withDefaultValue(Literals.stringLiteral("4")) + .build(), + TestColumn.builder() + .withName("col3") + .withType(Types.StringType.get()) + .withComment("comment1") + .withNullable(true) + .withAutoIncrement(true) + .withDefaultValue(Literals.stringLiteral("1")) + .build(), + TestColumn.builder() + .withName("col2") + .withType(Types.StringType.get()) + .withComment("comment2") + .withNullable(false) + .withAutoIncrement(false) + .withDefaultValue(Literals.stringLiteral("2")) + .build() + }; + + testColumns(expectedColumns2, alteredTable2.columns()); + + TableEntity tableEntity2 = entityStore.get(tableIdent, TABLE, TableEntity.class); + testColumnAndColumnEntities(expectedColumns2, tableEntity2.columns()); + + // 3. Drop a column + Table alteredTable3 = + tableOperationDispatcher.alterTable( + tableIdent, + TableChange.deleteColumn(new String[] {"col2"}, true), + TableChange.deleteColumn(new String[] {"col3"}, true)); + Column[] expectedColumns3 = + new Column[] { + TestColumn.builder() + .withName("col4") + .withType(Types.StringType.get()) + .withComment("comment4") + .withNullable(true) + .withAutoIncrement(true) + .withDefaultValue(Literals.stringLiteral("4")) + .build() + }; + testColumns(expectedColumns3, alteredTable3.columns()); + + TableEntity tableEntity3 = entityStore.get(tableIdent, TABLE, TableEntity.class); + testColumnAndColumnEntities(expectedColumns3, tableEntity3.columns()); + + // 4. Update column default value + Table alteredTable4 = + tableOperationDispatcher.alterTable( + tableIdent, + TableChange.updateColumnDefaultValue( + new String[] {"col4"}, Literals.stringLiteral("5"))); + + Column[] expectedColumns4 = + new Column[] { + TestColumn.builder() + .withName("col4") + .withType(Types.StringType.get()) + .withComment("comment4") + .withNullable(true) + .withAutoIncrement(true) + .withDefaultValue(Literals.stringLiteral("5")) + .build() + }; + testColumns(expectedColumns4, alteredTable4.columns()); + + TableEntity tableEntity4 = entityStore.get(tableIdent, TABLE, TableEntity.class); + testColumnAndColumnEntities(expectedColumns4, tableEntity4.columns()); + + // 5. Update column type + Table alteredTable5 = + tableOperationDispatcher.alterTable( + tableIdent, + TableChange.updateColumnType(new String[] {"col4"}, Types.IntegerType.get())); + + Column[] expectedColumns5 = + new Column[] { + TestColumn.builder() + .withName("col4") + .withType(Types.IntegerType.get()) + .withComment("comment4") + .withNullable(true) + .withAutoIncrement(true) + .withDefaultValue(Literals.stringLiteral("5")) + .build() + }; + + testColumns(expectedColumns5, alteredTable5.columns()); + + TableEntity tableEntity5 = entityStore.get(tableIdent, TABLE, TableEntity.class); + testColumnAndColumnEntities(expectedColumns5, tableEntity5.columns()); + + // 6. Update column comment + Table alteredTable6 = + tableOperationDispatcher.alterTable( + tableIdent, TableChange.updateColumnComment(new String[] {"col4"}, "new comment")); + + Column[] expectedColumns6 = + new Column[] { + TestColumn.builder() + .withName("col4") + .withType(Types.IntegerType.get()) + .withComment("new comment") + .withNullable(true) + .withAutoIncrement(true) + .withDefaultValue(Literals.stringLiteral("5")) + .build() + }; + + testColumns(expectedColumns6, alteredTable6.columns()); + + TableEntity tableEntity6 = entityStore.get(tableIdent, TABLE, TableEntity.class); + testColumnAndColumnEntities(expectedColumns6, tableEntity6.columns()); + + // 7. Update column nullable + Table alteredTable7 = + tableOperationDispatcher.alterTable( + tableIdent, TableChange.updateColumnNullability(new String[] {"col4"}, false)); + + Column[] expectedColumns7 = + new Column[] { + TestColumn.builder() + .withName("col4") + .withType(Types.IntegerType.get()) + .withComment("new comment") + .withNullable(false) + .withAutoIncrement(true) + .withDefaultValue(Literals.stringLiteral("5")) + .build() + }; + + testColumns(expectedColumns7, alteredTable7.columns()); + + TableEntity tableEntity7 = entityStore.get(tableIdent, TABLE, TableEntity.class); + testColumnAndColumnEntities(expectedColumns7, tableEntity7.columns()); + + // 8. Update column auto increment + Table alteredTable8 = + tableOperationDispatcher.alterTable( + tableIdent, TableChange.updateColumnAutoIncrement(new String[] {"col4"}, false)); + + Column[] expectedColumns8 = + new Column[] { + TestColumn.builder() + .withName("col4") + .withType(Types.IntegerType.get()) + .withComment("new comment") + .withNullable(false) + .withAutoIncrement(false) + .withDefaultValue(Literals.stringLiteral("5")) + .build() + }; + + testColumns(expectedColumns8, alteredTable8.columns()); + + TableEntity tableEntity8 = entityStore.get(tableIdent, TABLE, TableEntity.class); + testColumnAndColumnEntities(expectedColumns8, tableEntity8.columns()); + } + + @Test + public void testCreateAndDropTableWithColumn() throws IOException { + Namespace tableNs = Namespace.of(metalake, catalog, "schema111"); + Map props = ImmutableMap.of("k1", "v1", "k2", "v2"); + schemaOperationDispatcher.createSchema(NameIdentifier.of(tableNs.levels()), "comment", props); + + NameIdentifier tableIdent = NameIdentifier.of(tableNs, "table61"); + Column[] columns = + new Column[] { + TestColumn.builder() + .withName("col1") + .withType(Types.StringType.get()) + .withComment("comment1") + .withNullable(true) + .withAutoIncrement(true) + .withDefaultValue(Literals.stringLiteral("1")) + .build(), + TestColumn.builder() + .withName("col2") + .withType(Types.StringType.get()) + .withComment("comment2") + .withNullable(false) + .withAutoIncrement(false) + .withDefaultValue(Literals.stringLiteral("2")) + .build() + }; + + Table table1 = + tableOperationDispatcher.createTable( + tableIdent, columns, "comment", props, new Transform[0]); + testColumns(columns, table1.columns()); + + // Delete table + boolean dropped = tableOperationDispatcher.dropTable(tableIdent); + Assertions.assertTrue(dropped); + Assertions.assertFalse(entityStore.exists(tableIdent, TABLE)); + } + + private static void testColumns(Column[] expectedColumns, Column[] actualColumns) { + Map expectedColumnMap = + expectedColumns == null + ? Collections.emptyMap() + : Arrays.stream(expectedColumns) + .collect(Collectors.toMap(c -> c.name().toLowerCase(), Function.identity())); + Map actualColumnMap = + actualColumns == null + ? Collections.emptyMap() + : Arrays.stream(actualColumns) + .collect(Collectors.toMap(Column::name, Function.identity())); + + Assertions.assertEquals(expectedColumnMap.size(), actualColumnMap.size()); + expectedColumnMap.forEach( + (name, expectedColumn) -> { + Column actualColumn = actualColumnMap.get(name); + Assertions.assertNotNull(actualColumn); + Assertions.assertEquals(expectedColumn.name().toLowerCase(), actualColumn.name()); + Assertions.assertEquals(expectedColumn.dataType(), actualColumn.dataType()); + Assertions.assertEquals(expectedColumn.comment(), actualColumn.comment()); + Assertions.assertEquals(expectedColumn.nullable(), actualColumn.nullable()); + Assertions.assertEquals(expectedColumn.autoIncrement(), actualColumn.autoIncrement()); + Assertions.assertEquals(expectedColumn.defaultValue(), actualColumn.defaultValue()); + }); + } + + private static void testColumnAndColumnEntities( + Column[] expectedColumns, List ColumnEntities) { + Map expectedColumnMap = + expectedColumns == null + ? Collections.emptyMap() + : Arrays.stream(expectedColumns) + .collect(Collectors.toMap(Column::name, Function.identity())); + Map actualColumnMap = + ColumnEntities == null + ? Collections.emptyMap() + : ColumnEntities.stream() + .collect(Collectors.toMap(ColumnEntity::name, Function.identity())); + + Assertions.assertEquals(expectedColumnMap.size(), actualColumnMap.size()); + expectedColumnMap.forEach( + (name, expectedColumn) -> { + ColumnEntity actualColumn = actualColumnMap.get(name); + Assertions.assertNotNull(actualColumn); + Assertions.assertEquals(expectedColumn.name(), actualColumn.name()); + Assertions.assertEquals(expectedColumn.dataType(), actualColumn.dataType()); + Assertions.assertEquals(expectedColumn.comment(), actualColumn.comment()); + Assertions.assertEquals(expectedColumn.nullable(), actualColumn.nullable()); + Assertions.assertEquals(expectedColumn.autoIncrement(), actualColumn.autoIncrement()); + Assertions.assertEquals(expectedColumn.defaultValue(), actualColumn.defaultValue()); + }); + } } diff --git a/core/src/test/java/org/apache/gravitino/connector/TestCatalogOperations.java b/core/src/test/java/org/apache/gravitino/connector/TestCatalogOperations.java index d6bbd81c344..13c4652058a 100644 --- a/core/src/test/java/org/apache/gravitino/connector/TestCatalogOperations.java +++ b/core/src/test/java/org/apache/gravitino/connector/TestCatalogOperations.java @@ -23,14 +23,18 @@ import java.io.File; import java.io.IOException; import java.time.Instant; +import java.util.Arrays; import java.util.HashMap; import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; import org.apache.gravitino.Catalog; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.Namespace; import org.apache.gravitino.Schema; import org.apache.gravitino.SchemaChange; +import org.apache.gravitino.TestColumn; import org.apache.gravitino.TestFileset; import org.apache.gravitino.TestSchema; import org.apache.gravitino.TestTable; @@ -198,17 +202,24 @@ public Table alterTable(NameIdentifier ident, TableChange... changes) throw new TableAlreadyExistsException("Table %s already exists", ident); } } else { - throw new IllegalArgumentException("Unsupported table change: " + change); + // do nothing } } + TableChange.ColumnChange[] columnChanges = + Arrays.stream(changes) + .filter(change -> change instanceof TableChange.ColumnChange) + .map(change -> (TableChange.ColumnChange) change) + .toArray(TableChange.ColumnChange[]::new); + Column[] newColumns = updateColumns(table.columns(), columnChanges); + TestTable updatedTable = TestTable.builder() .withName(newIdent.name()) .withComment(table.comment()) .withProperties(new HashMap<>(newProps)) .withAuditInfo(updatedAuditInfo) - .withColumns(table.columns()) + .withColumns(newColumns) .withPartitioning(table.partitioning()) .withDistribution(table.distribution()) .withSortOrders(table.sortOrder()) @@ -634,4 +645,126 @@ private boolean checkSingleFile(Fileset fileset) { return false; } } + + private Column[] updateColumns(Column[] columns, TableChange.ColumnChange[] columnChanges) { + Map columnMap = + Arrays.stream(columns).collect(Collectors.toMap(Column::name, Function.identity())); + + for (TableChange.ColumnChange columnChange : columnChanges) { + if (columnChange instanceof TableChange.AddColumn) { + TableChange.AddColumn addColumn = (TableChange.AddColumn) columnChange; + TestColumn column = + TestColumn.builder() + .withName(String.join(".", addColumn.fieldName())) + .withComment(addColumn.getComment()) + .withType(addColumn.getDataType()) + .withNullable(addColumn.isNullable()) + .withAutoIncrement(addColumn.isAutoIncrement()) + .withDefaultValue(addColumn.getDefaultValue()) + .build(); + columnMap.put(column.name(), column); + + } else if (columnChange instanceof TableChange.DeleteColumn) { + columnMap.remove(String.join(".", columnChange.fieldName())); + + } else if (columnChange instanceof TableChange.RenameColumn) { + String oldName = String.join(".", columnChange.fieldName()); + String newName = ((TableChange.RenameColumn) columnChange).getNewName(); + Column column = columnMap.remove(oldName); + TestColumn newColumn = + TestColumn.builder() + .withName(newName) + .withComment(column.comment()) + .withType(column.dataType()) + .withNullable(column.nullable()) + .withAutoIncrement(column.autoIncrement()) + .withDefaultValue(column.defaultValue()) + .build(); + columnMap.put(newName, newColumn); + + } else if (columnChange instanceof TableChange.UpdateColumnDefaultValue) { + String columnName = String.join(".", columnChange.fieldName()); + TableChange.UpdateColumnDefaultValue updateColumnDefaultValue = + (TableChange.UpdateColumnDefaultValue) columnChange; + Column oldColumn = columnMap.get(columnName); + TestColumn newColumn = + TestColumn.builder() + .withName(columnName) + .withComment(oldColumn.comment()) + .withType(oldColumn.dataType()) + .withNullable(oldColumn.nullable()) + .withAutoIncrement(oldColumn.autoIncrement()) + .withDefaultValue(updateColumnDefaultValue.getNewDefaultValue()) + .build(); + columnMap.put(columnName, newColumn); + + } else if (columnChange instanceof TableChange.UpdateColumnType) { + String columnName = String.join(".", columnChange.fieldName()); + TableChange.UpdateColumnType updateColumnType = (TableChange.UpdateColumnType) columnChange; + Column oldColumn = columnMap.get(columnName); + TestColumn newColumn = + TestColumn.builder() + .withName(columnName) + .withComment(oldColumn.comment()) + .withType(updateColumnType.getNewDataType()) + .withNullable(oldColumn.nullable()) + .withAutoIncrement(oldColumn.autoIncrement()) + .withDefaultValue(oldColumn.defaultValue()) + .build(); + columnMap.put(columnName, newColumn); + + } else if (columnChange instanceof TableChange.UpdateColumnComment) { + String columnName = String.join(".", columnChange.fieldName()); + TableChange.UpdateColumnComment updateColumnComment = + (TableChange.UpdateColumnComment) columnChange; + Column oldColumn = columnMap.get(columnName); + TestColumn newColumn = + TestColumn.builder() + .withName(columnName) + .withComment(updateColumnComment.getNewComment()) + .withType(oldColumn.dataType()) + .withNullable(oldColumn.nullable()) + .withAutoIncrement(oldColumn.autoIncrement()) + .withDefaultValue(oldColumn.defaultValue()) + .build(); + columnMap.put(columnName, newColumn); + + } else if (columnChange instanceof TableChange.UpdateColumnNullability) { + String columnName = String.join(".", columnChange.fieldName()); + TableChange.UpdateColumnNullability updateColumnNullable = + (TableChange.UpdateColumnNullability) columnChange; + Column oldColumn = columnMap.get(columnName); + TestColumn newColumn = + TestColumn.builder() + .withName(columnName) + .withComment(oldColumn.comment()) + .withType(oldColumn.dataType()) + .withNullable(updateColumnNullable.nullable()) + .withAutoIncrement(oldColumn.autoIncrement()) + .withDefaultValue(oldColumn.defaultValue()) + .build(); + columnMap.put(columnName, newColumn); + + } else if (columnChange instanceof TableChange.UpdateColumnAutoIncrement) { + String columnName = String.join(".", columnChange.fieldName()); + TableChange.UpdateColumnAutoIncrement updateColumnAutoIncrement = + (TableChange.UpdateColumnAutoIncrement) columnChange; + Column oldColumn = columnMap.get(columnName); + TestColumn newColumn = + TestColumn.builder() + .withName(columnName) + .withComment(oldColumn.comment()) + .withType(oldColumn.dataType()) + .withNullable(oldColumn.nullable()) + .withAutoIncrement(updateColumnAutoIncrement.isAutoIncrement()) + .withDefaultValue(oldColumn.defaultValue()) + .build(); + columnMap.put(columnName, newColumn); + + } else { + // do nothing + } + } + return columnMap.values().toArray(new Column[0]); + } } diff --git a/core/src/test/java/org/apache/gravitino/credential/DummyCredentialProvider.java b/core/src/test/java/org/apache/gravitino/credential/DummyCredentialProvider.java new file mode 100644 index 00000000000..864635e96c9 --- /dev/null +++ b/core/src/test/java/org/apache/gravitino/credential/DummyCredentialProvider.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.credential; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.Map; +import java.util.Set; +import lombok.Getter; + +public class DummyCredentialProvider implements CredentialProvider { + Map properties; + static final String CREDENTIAL_TYPE = "dummy"; + + @Override + public void initialize(Map properties) { + this.properties = properties; + } + + @Override + public void close() {} + + @Override + public String credentialType() { + return CREDENTIAL_TYPE; + } + + @Override + public Credential getCredential(CredentialContext context) { + Preconditions.checkArgument( + context instanceof PathBasedCredentialContext + || context instanceof CatalogCredentialContext, + "Doesn't support context: " + context.getClass().getSimpleName()); + if (context instanceof PathBasedCredentialContext) { + return new DummyCredential((PathBasedCredentialContext) context); + } + return null; + } + + public static class DummyCredential implements Credential { + + @Getter private Set writeLocations; + @Getter private Set readLocations; + + public DummyCredential(PathBasedCredentialContext locationContext) { + this.writeLocations = locationContext.getWritePaths(); + this.readLocations = locationContext.getReadPaths(); + } + + @Override + public String credentialType() { + return DummyCredentialProvider.CREDENTIAL_TYPE; + } + + @Override + public long expireTimeInMs() { + return 0; + } + + @Override + public Map credentialInfo() { + return ImmutableMap.of( + "writeLocation", writeLocations.toString(), "readLocation", readLocations.toString()); + } + } +} diff --git a/core/src/test/java/org/apache/gravitino/credential/TestCredentialProvider.java b/core/src/test/java/org/apache/gravitino/credential/TestCredentialProvider.java new file mode 100644 index 00000000000..b419375b136 --- /dev/null +++ b/core/src/test/java/org/apache/gravitino/credential/TestCredentialProvider.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.credential; + +import java.util.Map; +import org.apache.gravitino.credential.DummyCredentialProvider.DummyCredential; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.testcontainers.shaded.com.google.common.collect.ImmutableMap; +import org.testcontainers.shaded.com.google.common.collect.ImmutableSet; + +public class TestCredentialProvider { + @Test + void testCredentialProvider() { + Map catalogProperties = ImmutableMap.of("a", "b"); + CredentialProvider credentialProvider = + CredentialProviderFactory.create( + DummyCredentialProvider.CREDENTIAL_TYPE, catalogProperties); + Assertions.assertEquals( + DummyCredentialProvider.CREDENTIAL_TYPE, credentialProvider.credentialType()); + Assertions.assertTrue(credentialProvider instanceof DummyCredentialProvider); + DummyCredentialProvider dummyCredentialProvider = (DummyCredentialProvider) credentialProvider; + Assertions.assertEquals(catalogProperties, dummyCredentialProvider.properties); + + ImmutableSet writeLocations = ImmutableSet.of("location1"); + ImmutableSet readLocations = ImmutableSet.of("location2"); + + PathBasedCredentialContext locationContext = + new PathBasedCredentialContext("user", writeLocations, readLocations); + Credential credential = dummyCredentialProvider.getCredential(locationContext); + Assertions.assertTrue(credential instanceof DummyCredential); + DummyCredential dummyCredential = (DummyCredential) credential; + + Assertions.assertEquals(writeLocations, dummyCredential.getWriteLocations()); + Assertions.assertEquals(readLocations, dummyCredential.getReadLocations()); + } +} diff --git a/core/src/test/java/org/apache/gravitino/listener/DummyEventListener.java b/core/src/test/java/org/apache/gravitino/listener/DummyEventListener.java index 17e3e424963..4ec7ab71523 100644 --- a/core/src/test/java/org/apache/gravitino/listener/DummyEventListener.java +++ b/core/src/test/java/org/apache/gravitino/listener/DummyEventListener.java @@ -24,14 +24,17 @@ import java.util.Map; import java.util.concurrent.TimeUnit; import lombok.Getter; +import org.apache.gravitino.exceptions.ForbiddenException; import org.apache.gravitino.listener.api.EventListenerPlugin; import org.apache.gravitino.listener.api.event.Event; +import org.apache.gravitino.listener.api.event.PreEvent; import org.awaitility.Awaitility; import org.junit.jupiter.api.Assertions; public class DummyEventListener implements EventListenerPlugin { Map properties; - @Getter LinkedList events = new LinkedList<>(); + @Getter LinkedList postEvents = new LinkedList<>(); + @Getter LinkedList preEvents = new LinkedList<>(); @Override public void init(Map properties) { @@ -46,7 +49,17 @@ public void stop() {} @Override public void onPostEvent(Event event) { - this.events.add(event); + postEvents.add(event); + } + + @Override + public void onPreEvent(PreEvent preEvent) { + if (preEvent.equals(TestEventListenerManager.DUMMY_FORBIDDEN_PRE_EVENT_INSTANCE)) { + throw new ForbiddenException(""); + } else if (preEvent.equals(TestEventListenerManager.DUMMY_EXCEPTION_PRE_EVENT_INSTANCE)) { + throw new RuntimeException(""); + } + preEvents.add(preEvent); } @Override @@ -54,18 +67,26 @@ public Mode mode() { return Mode.SYNC; } - public Event popEvent() { - Assertions.assertTrue(events.size() > 0, "No events to pop"); - return events.removeLast(); + public Event popPostEvent() { + Assertions.assertTrue(postEvents.size() > 0, "No events to pop"); + return postEvents.removeLast(); } public static class DummyAsyncEventListener extends DummyEventListener { - public List tryGetEvents() { + public List tryGetPostEvents() { + Awaitility.await() + .atMost(20, TimeUnit.SECONDS) + .pollInterval(10, TimeUnit.MILLISECONDS) + .until(() -> getPostEvents().size() > 0); + return getPostEvents(); + } + + public List tryGetPreEvents() { Awaitility.await() .atMost(20, TimeUnit.SECONDS) .pollInterval(10, TimeUnit.MILLISECONDS) - .until(() -> getEvents().size() > 0); - return getEvents(); + .until(() -> getPreEvents().size() > 0); + return getPreEvents(); } @Override diff --git a/core/src/test/java/org/apache/gravitino/listener/TestEventListenerManager.java b/core/src/test/java/org/apache/gravitino/listener/TestEventListenerManager.java index d0dda8878f9..fd7a612726f 100644 --- a/core/src/test/java/org/apache/gravitino/listener/TestEventListenerManager.java +++ b/core/src/test/java/org/apache/gravitino/listener/TestEventListenerManager.java @@ -26,22 +26,42 @@ import java.util.Set; import java.util.stream.Collectors; import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.exceptions.ForbiddenException; import org.apache.gravitino.listener.DummyEventListener.DummyAsyncEventListener; import org.apache.gravitino.listener.DummyEventListener.DummyAsyncIsolatedEventListener; import org.apache.gravitino.listener.api.EventListenerPlugin; import org.apache.gravitino.listener.api.event.Event; +import org.apache.gravitino.listener.api.event.PreEvent; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; public class TestEventListenerManager { - static class DummyEvent extends Event { - protected DummyEvent(String user, NameIdentifier identifier) { + + static class DummyPostEvent extends Event { + + protected DummyPostEvent(String user, NameIdentifier identifier) { super(user, identifier); } } - private static final DummyEvent DUMMY_EVENT_INSTANCE = - new DummyEvent("user", NameIdentifier.of("a", "b")); + static class DummyPreEvent extends PreEvent { + + protected DummyPreEvent(String user, NameIdentifier identifier) { + super(user, identifier); + } + } + + private static final DummyPostEvent DUMMY_POST_EVENT_INSTANCE = + new DummyPostEvent("user", NameIdentifier.of("a", "b")); + + private static final DummyPreEvent DUMMY_PRE_EVENT_INSTANCE = + new DummyPreEvent("user2", NameIdentifier.of("a2", "b2")); + + public static final DummyPreEvent DUMMY_FORBIDDEN_PRE_EVENT_INSTANCE = + new DummyPreEvent("user3", NameIdentifier.of("a3", "b3")); + + public static final DummyPreEvent DUMMY_EXCEPTION_PRE_EVENT_INSTANCE = + new DummyPreEvent("user4", NameIdentifier.of("a4", "b4")); @Test void testSyncListener() { @@ -54,9 +74,10 @@ void testSyncListener() { eventListenerManager.start(); EventBus eventBus = eventListenerManager.createEventBus(); - eventBus.dispatchEvent(DUMMY_EVENT_INSTANCE); - List listeners = eventBus.getPostEventListeners(); + // test post event + eventBus.dispatchEvent(DUMMY_POST_EVENT_INSTANCE); + List listeners = eventBus.getEventListeners(); Assertions.assertEquals(2, listeners.size()); Set names = listeners.stream() @@ -66,7 +87,27 @@ void testSyncListener() { EventListenerPluginWrapper wrapper = (EventListenerPluginWrapper) listener; EventListenerPlugin userListener = wrapper.getUserEventListener(); Assertions.assertTrue(userListener instanceof DummyEventListener); - checkEvents(((DummyEventListener) userListener).getEvents()); + checkPostEvents(((DummyEventListener) userListener).getPostEvents()); + Assertions.assertEquals( + 0, ((DummyEventListener) userListener).getPreEvents().size()); + return ((DummyEventListener) userListener).properties.get("name"); + }) + .collect(Collectors.toSet()); + Assertions.assertEquals(ImmutableSet.of(sync1, sync2), names); + + // test pre event + eventBus.dispatchEvent(DUMMY_PRE_EVENT_INSTANCE); + names = + listeners.stream() + .map( + listener -> { + Assertions.assertTrue(listener instanceof EventListenerPluginWrapper); + EventListenerPluginWrapper wrapper = (EventListenerPluginWrapper) listener; + EventListenerPlugin userListener = wrapper.getUserEventListener(); + Assertions.assertTrue(userListener instanceof DummyEventListener); + checkPreEvents(((DummyEventListener) userListener).getPreEvents()); + Assertions.assertEquals( + 0, ((DummyEventListener) userListener).getPostEvents().size()); return ((DummyEventListener) userListener).properties.get("name"); }) .collect(Collectors.toSet()); @@ -84,10 +125,11 @@ void testSharedAsyncListeners() { EventListenerManager eventListenerManager = new EventListenerManager(); eventListenerManager.init(properties); eventListenerManager.start(); - EventBus eventBus = eventListenerManager.createEventBus(); - eventBus.dispatchEvent(DUMMY_EVENT_INSTANCE); - List listeners = eventBus.getPostEventListeners(); + + // Test post event + eventBus.dispatchEvent(DUMMY_POST_EVENT_INSTANCE); + List listeners = eventBus.getEventListeners(); Assertions.assertEquals(1, listeners.size()); Assertions.assertTrue(listeners.get(0) instanceof AsyncQueueListener); @@ -102,12 +144,27 @@ void testSharedAsyncListeners() { EventListenerPlugin userListener = ((EventListenerPluginWrapper) shareQueueListener).getUserEventListener(); Assertions.assertTrue(userListener instanceof DummyAsyncEventListener); - checkEvents(((DummyAsyncEventListener) userListener).tryGetEvents()); + checkPostEvents(((DummyAsyncEventListener) userListener).tryGetPostEvents()); + Assertions.assertEquals( + 0, ((DummyAsyncEventListener) userListener).getPreEvents().size()); return ((DummyAsyncEventListener) userListener).properties.get("name"); }) .collect(Collectors.toSet()); Assertions.assertEquals(ImmutableSet.of(async1, async2), sharedQueueListenerNames); + // Test pre event + eventBus.dispatchEvent(DUMMY_PRE_EVENT_INSTANCE); + shareQueueListeners.forEach( + shareQueueListener -> { + Assertions.assertTrue(shareQueueListener instanceof EventListenerPluginWrapper); + EventListenerPlugin userListener = + ((EventListenerPluginWrapper) shareQueueListener).getUserEventListener(); + Assertions.assertTrue(userListener instanceof DummyAsyncEventListener); + checkPreEvents(((DummyAsyncEventListener) userListener).tryGetPreEvents()); + Assertions.assertEquals( + 0, ((DummyAsyncEventListener) userListener).getPostEvents().size()); + }); + eventListenerManager.stop(); } @@ -122,8 +179,8 @@ void testIsolatedAsyncListeners() { eventListenerManager.start(); EventBus eventBus = eventListenerManager.createEventBus(); - eventBus.dispatchEvent(DUMMY_EVENT_INSTANCE); - List listeners = eventBus.getPostEventListeners(); + eventBus.dispatchEvent(DUMMY_POST_EVENT_INSTANCE); + List listeners = eventBus.getEventListeners(); Assertions.assertEquals(2, listeners.size()); Set isolatedListenerNames = @@ -141,12 +198,49 @@ void testIsolatedAsyncListeners() { ((EventListenerPluginWrapper) internalListeners.get(0)) .getUserEventListener(); Assertions.assertTrue(userListener instanceof DummyAsyncEventListener); - checkEvents(((DummyAsyncEventListener) userListener).tryGetEvents()); + checkPostEvents(((DummyAsyncEventListener) userListener).tryGetPostEvents()); + Assertions.assertEquals( + 0, ((DummyAsyncEventListener) userListener).getPreEvents().size()); return ((DummyAsyncEventListener) userListener).properties.get("name"); }) .collect(Collectors.toSet()); Assertions.assertEquals(ImmutableSet.of(async1, async2), isolatedListenerNames); + eventBus.dispatchEvent(DUMMY_PRE_EVENT_INSTANCE); + listeners.forEach( + listener -> { + Assertions.assertTrue(listener instanceof AsyncQueueListener); + AsyncQueueListener asyncQueueListener = (AsyncQueueListener) listener; + List internalListeners = asyncQueueListener.getEventListeners(); + Assertions.assertEquals(1, internalListeners.size()); + Assertions.assertTrue(internalListeners.get(0) instanceof EventListenerPluginWrapper); + EventListenerPlugin userListener = + ((EventListenerPluginWrapper) internalListeners.get(0)).getUserEventListener(); + Assertions.assertTrue(userListener instanceof DummyAsyncEventListener); + checkPreEvents(((DummyAsyncEventListener) userListener).tryGetPreEvents()); + Assertions.assertEquals( + 0, ((DummyAsyncEventListener) userListener).getPostEvents().size()); + }); + + eventListenerManager.stop(); + } + + @Test + void testForbiddenPreEvent() { + String sync1 = "sync1"; + String sync2 = "sync2"; + Map properties = createSyncEventListenerConfig(sync1, sync2); + + EventListenerManager eventListenerManager = new EventListenerManager(); + eventListenerManager.init(properties); + eventListenerManager.start(); + + EventBus eventBus = eventListenerManager.createEventBus(); + + Assertions.assertThrowsExactly( + ForbiddenException.class, () -> eventBus.dispatchEvent(DUMMY_FORBIDDEN_PRE_EVENT_INSTANCE)); + + Assertions.assertDoesNotThrow(() -> eventBus.dispatchEvent(DUMMY_EXCEPTION_PRE_EVENT_INSTANCE)); eventListenerManager.stop(); } @@ -206,8 +300,15 @@ private Map createSyncEventListenerConfig(String sync1, String s return config; } - private void checkEvents(List events) { + private void checkPostEvents(List events) { + Assertions.assertEquals(1, events.size()); + Assertions.assertEquals(DUMMY_POST_EVENT_INSTANCE, events.get(0)); + events.clear(); + } + + private void checkPreEvents(List events) { Assertions.assertEquals(1, events.size()); - Assertions.assertEquals(DUMMY_EVENT_INSTANCE, events.get(0)); + Assertions.assertEquals(DUMMY_PRE_EVENT_INSTANCE, events.get(0)); + events.clear(); } } diff --git a/core/src/test/java/org/apache/gravitino/listener/api/event/TestCatalogEvent.java b/core/src/test/java/org/apache/gravitino/listener/api/event/TestCatalogEvent.java index ae5407329b0..d2050894368 100644 --- a/core/src/test/java/org/apache/gravitino/listener/api/event/TestCatalogEvent.java +++ b/core/src/test/java/org/apache/gravitino/listener/api/event/TestCatalogEvent.java @@ -65,7 +65,7 @@ void testCreateCatalogEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", catalog.name()); dispatcher.createCatalog( identifier, catalog.type(), catalog.provider(), catalog.comment(), catalog.properties()); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateCatalogEvent.class, event.getClass()); CatalogInfo catalogInfo = ((CreateCatalogEvent) event).createdCatalogInfo(); @@ -76,7 +76,7 @@ void testCreateCatalogEvent() { void testLoadCatalogEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", catalog.name()); dispatcher.loadCatalog(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(LoadCatalogEvent.class, event.getClass()); CatalogInfo catalogInfo = ((LoadCatalogEvent) event).loadedCatalogInfo(); @@ -88,7 +88,7 @@ void testAlterCatalogEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", catalog.name()); CatalogChange catalogChange = CatalogChange.setProperty("a", "b"); dispatcher.alterCatalog(identifier, catalogChange); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AlterCatalogEvent.class, event.getClass()); CatalogInfo catalogInfo = ((AlterCatalogEvent) event).updatedCatalogInfo(); @@ -102,7 +102,7 @@ void testAlterCatalogEvent() { void testDropCatalogEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", catalog.name()); dispatcher.dropCatalog(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropCatalogEvent.class, event.getClass()); Assertions.assertEquals(true, ((DropCatalogEvent) event).isExists()); @@ -112,7 +112,7 @@ void testDropCatalogEvent() { void testListCatalogEvent() { Namespace namespace = Namespace.of("metalake"); dispatcher.listCatalogs(namespace); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(namespace.toString(), event.identifier().toString()); Assertions.assertEquals(ListCatalogEvent.class, event.getClass()); Assertions.assertEquals(namespace, ((ListCatalogEvent) event).namespace()); @@ -122,7 +122,7 @@ void testListCatalogEvent() { void testListCatalogInfoEvent() { Namespace namespace = Namespace.of("metalake"); dispatcher.listCatalogsInfo(namespace); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(namespace.toString(), event.identifier().toString()); Assertions.assertEquals(ListCatalogEvent.class, event.getClass()); Assertions.assertEquals(namespace, ((ListCatalogEvent) event).namespace()); @@ -140,7 +140,7 @@ void testCreateCatalogFailureEvent() { catalog.provider(), catalog.comment(), catalog.properties())); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateCatalogFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -154,7 +154,7 @@ void testLoadCatalogFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.loadCatalog(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(LoadCatalogFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -168,7 +168,7 @@ void testAlterCatalogFailureEvent() { Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.alterCatalog(identifier, catalogChange)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AlterCatalogFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -183,7 +183,7 @@ void testDropCatalogFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.dropCatalog(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropCatalogFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -195,7 +195,7 @@ void testListCatalogFailureEvent() { Namespace namespace = Namespace.of("metalake", "catalog"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.listCatalogs(namespace)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(ListCatalogFailureEvent.class, event.getClass()); Assertions.assertEquals( GravitinoRuntimeException.class, ((ListCatalogFailureEvent) event).exception().getClass()); @@ -207,7 +207,7 @@ void testListCatalogInfoFailureEvent() { Namespace namespace = Namespace.of("metalake", "catalog"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.listCatalogsInfo(namespace)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(ListCatalogFailureEvent.class, event.getClass()); Assertions.assertEquals( GravitinoRuntimeException.class, ((ListCatalogFailureEvent) event).exception().getClass()); diff --git a/core/src/test/java/org/apache/gravitino/listener/api/event/TestFilesetEvent.java b/core/src/test/java/org/apache/gravitino/listener/api/event/TestFilesetEvent.java index efc073b1979..3210887117b 100644 --- a/core/src/test/java/org/apache/gravitino/listener/api/event/TestFilesetEvent.java +++ b/core/src/test/java/org/apache/gravitino/listener/api/event/TestFilesetEvent.java @@ -75,7 +75,7 @@ void testCreateFilesetEvent() { fileset.type(), fileset.storageLocation(), fileset.properties()); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateFilesetEvent.class, event.getClass()); FilesetInfo filesetInfo = ((CreateFilesetEvent) event).createdFilesetInfo(); @@ -86,7 +86,7 @@ void testCreateFilesetEvent() { void testLoadFilesetEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", fileset.name()); dispatcher.loadFileset(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(LoadFilesetEvent.class, event.getClass()); FilesetInfo filesetInfo = ((LoadFilesetEvent) event).loadedFilesetInfo(); @@ -98,7 +98,7 @@ void testAlterFilesetEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", fileset.name()); FilesetChange change = FilesetChange.setProperty("a", "b"); dispatcher.alterFileset(identifier, change); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AlterFilesetEvent.class, event.getClass()); FilesetInfo filesetInfo = ((AlterFilesetEvent) event).updatedFilesetInfo(); @@ -111,7 +111,7 @@ void testAlterFilesetEvent() { void testDropFilesetEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", fileset.name()); dispatcher.dropFileset(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropFilesetEvent.class, event.getClass()); Assertions.assertTrue(((DropFilesetEvent) event).isExists()); @@ -121,7 +121,7 @@ void testDropFilesetEvent() { void testListFilesetEvent() { Namespace namespace = Namespace.of("metalake", "catalog"); dispatcher.listFilesets(namespace); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(namespace.toString(), event.identifier().toString()); Assertions.assertEquals(ListFilesetEvent.class, event.getClass()); Assertions.assertEquals(namespace, ((ListFilesetEvent) event).namespace()); @@ -136,7 +136,7 @@ void testGetFileLocationEvent() { fileset.type(), fileset.storageLocation(), fileset.properties()); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateFilesetEvent.class, event.getClass()); FilesetInfo filesetInfo = ((CreateFilesetEvent) event).createdFilesetInfo(); @@ -152,7 +152,7 @@ void testGetFileLocationEvent() { CallerContext callerContext = CallerContext.builder().withContext(contextMap).build(); CallerContext.CallerContextHolder.set(callerContext); String fileLocation = dispatcher.getFileLocation(identifier, "test"); - Event event1 = dummyEventListener.popEvent(); + Event event1 = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event1.identifier()); Assertions.assertEquals(GetFileLocationEvent.class, event1.getClass()); String actualFileLocation = ((GetFileLocationEvent) event1).actualFileLocation(); @@ -180,7 +180,7 @@ void testCreateSchemaFailureEvent() { fileset.type(), fileset.storageLocation(), fileset.properties())); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateFilesetFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -194,7 +194,7 @@ void testLoadFilesetFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "fileset"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.loadFileset(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(LoadFilesetFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -207,7 +207,7 @@ void testAlterFilesetFailureEvent() { FilesetChange change = FilesetChange.setProperty("a", "b"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.alterFileset(identifier, change)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AlterFilesetFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -221,7 +221,7 @@ void testDropFilesetFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "fileset"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.dropFileset(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropFilesetFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -233,7 +233,7 @@ void testListFilesetFailureEvent() { Namespace namespace = Namespace.of("metalake", "catalog"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.listFilesets(namespace)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(namespace.toString(), event.identifier().toString()); Assertions.assertEquals(ListFilesetFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -247,7 +247,7 @@ void testGetFileLocationFailureEvent() { Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.getFileLocation(identifier, "/test")); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(GetFileLocationFailureEvent.class, event.getClass()); Assertions.assertEquals( diff --git a/core/src/test/java/org/apache/gravitino/listener/api/event/TestMetalakeEvent.java b/core/src/test/java/org/apache/gravitino/listener/api/event/TestMetalakeEvent.java index a31ce933890..319ac641f61 100644 --- a/core/src/test/java/org/apache/gravitino/listener/api/event/TestMetalakeEvent.java +++ b/core/src/test/java/org/apache/gravitino/listener/api/event/TestMetalakeEvent.java @@ -63,7 +63,7 @@ void init() { void testCreateMetalakeEvent() { NameIdentifier identifier = NameIdentifier.of("metalake"); dispatcher.createMetalake(identifier, metalake.comment(), metalake.properties()); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateMetalakeEvent.class, event.getClass()); MetalakeInfo metalakeInfo = ((CreateMetalakeEvent) event).createdMetalakeInfo(); @@ -74,7 +74,7 @@ void testCreateMetalakeEvent() { void testLoadMetalakeEvent() { NameIdentifier identifier = NameIdentifier.of("metalake"); dispatcher.loadMetalake(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(LoadMetalakeEvent.class, event.getClass()); MetalakeInfo metalakeInfo = ((LoadMetalakeEvent) event).loadedMetalakeInfo(); @@ -86,7 +86,7 @@ void testAlterMetalakeEvent() { NameIdentifier identifier = NameIdentifier.of("metalake"); MetalakeChange metalakeChange = MetalakeChange.setProperty("a", "b"); dispatcher.alterMetalake(identifier, metalakeChange); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AlterMetalakeEvent.class, event.getClass()); MetalakeInfo metalakeInfo = ((AlterMetalakeEvent) event).updatedMetalakeInfo(); @@ -100,7 +100,7 @@ void testAlterMetalakeEvent() { void testDropMetalakeEvent() { NameIdentifier identifier = NameIdentifier.of("metalake"); dispatcher.dropMetalake(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropMetalakeEvent.class, event.getClass()); Assertions.assertTrue(((DropMetalakeEvent) event).isExists()); @@ -109,7 +109,7 @@ void testDropMetalakeEvent() { @Test void testListMetalakeEvent() { dispatcher.listMetalakes(); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertNull(event.identifier()); Assertions.assertEquals(ListMetalakeEvent.class, event.getClass()); } @@ -122,7 +122,7 @@ void testCreateMetalakeFailureEvent() { () -> failureDispatcher.createMetalake( identifier, metalake.comment(), metalake.properties())); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateMetalakeFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -136,7 +136,7 @@ void testLoadMetalakeFailureEvent() { NameIdentifier identifier = NameIdentifier.of(metalake.name()); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.loadMetalake(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(LoadMetalakeFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -150,7 +150,7 @@ void testAlterMetalakeFailureEvent() { Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.alterMetalake(identifier, metalakeChange)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AlterMetalakeFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -166,7 +166,7 @@ void testDropMetalakeFailureEvent() { NameIdentifier identifier = NameIdentifier.of(metalake.name()); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.dropMetalake(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropMetalakeFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -177,7 +177,7 @@ void testDropMetalakeFailureEvent() { void testListMetalakeFailureEvent() { Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.listMetalakes()); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertNull(event.identifier()); Assertions.assertEquals(ListMetalakeFailureEvent.class, event.getClass()); Assertions.assertEquals( diff --git a/core/src/test/java/org/apache/gravitino/listener/api/event/TestPartitionEvent.java b/core/src/test/java/org/apache/gravitino/listener/api/event/TestPartitionEvent.java index 408330a4081..a1aa8aab2d0 100644 --- a/core/src/test/java/org/apache/gravitino/listener/api/event/TestPartitionEvent.java +++ b/core/src/test/java/org/apache/gravitino/listener/api/event/TestPartitionEvent.java @@ -110,7 +110,7 @@ void testCreatePartitionInfo() { void testAddPartitionEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema", "table"); dispatcher.addPartition(identifier, partition); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AddPartitionEvent.class, event.getClass()); PartitionInfo partitionInfo = ((AddPartitionEvent) event).createdPartitionInfo(); @@ -121,7 +121,7 @@ void testAddPartitionEvent() { void testDropPartitionEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema", "table"); dispatcher.dropPartition(identifier, partition.name()); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropPartitionEvent.class, event.getClass()); Assertions.assertEquals(false, ((DropPartitionEvent) event).isExists()); @@ -131,7 +131,7 @@ void testDropPartitionEvent() { void testPartitionExistsEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema", "table"); dispatcher.partitionExists(identifier, partition.name()); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(PartitionExistsEvent.class, event.getClass()); Assertions.assertEquals(false, ((PartitionExistsEvent) event).isExists()); @@ -141,7 +141,7 @@ void testPartitionExistsEvent() { void testListPartitionEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema", "table"); dispatcher.listPartitions(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(ListPartitionEvent.class, event.getClass()); Assertions.assertEquals(identifier, ((ListPartitionEvent) event).identifier()); @@ -151,7 +151,7 @@ void testListPartitionEvent() { void testListPartitionNamesEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema", "table"); dispatcher.listPartitionNames(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(ListPartitionNamesEvent.class, event.getClass()); Assertions.assertEquals(identifier, ((ListPartitionNamesEvent) event).identifier()); @@ -161,7 +161,7 @@ void testListPartitionNamesEvent() { void testPurgePartitionEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema", "table"); dispatcher.purgePartition(identifier, partition.name()); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(PurgePartitionEvent.class, event.getClass()); Assertions.assertEquals(identifier, ((PurgePartitionEvent) event).identifier()); @@ -173,7 +173,7 @@ void testAddPartitionFailureEvent() { Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.addPartition(identifier, partition)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(AddPartitionFailureEvent.class, event.getClass()); Assertions.assertEquals( GravitinoRuntimeException.class, ((AddPartitionFailureEvent) event).exception().getClass()); @@ -187,7 +187,7 @@ void testDropPartitionFailureEvent() { Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.dropPartition(identifier, partition.name())); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(DropPartitionFailureEvent.class, event.getClass()); Assertions.assertEquals( GravitinoRuntimeException.class, @@ -201,7 +201,7 @@ void testPartitionExistsFailureEvent() { Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.partitionExists(identifier, partition.name())); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(PartitionExistsFailureEvent.class, event.getClass()); Assertions.assertEquals( GravitinoRuntimeException.class, @@ -214,7 +214,7 @@ void testListPartitionFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema", "table"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.listPartitions(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(ListPartitionFailureEvent.class, event.getClass()); Assertions.assertEquals( GravitinoRuntimeException.class, @@ -227,7 +227,7 @@ void testListPartitionNamesFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema", "table"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.listPartitionNames(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(ListPartitionNamesFailureEvent.class, event.getClass()); Assertions.assertEquals( GravitinoRuntimeException.class, @@ -241,7 +241,7 @@ void testPurgePartitionFailureEvent() { Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.purgePartition(identifier, partition.name())); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(PurgePartitionFailureEvent.class, event.getClass()); Assertions.assertEquals( GravitinoRuntimeException.class, diff --git a/core/src/test/java/org/apache/gravitino/listener/api/event/TestSchemaEvent.java b/core/src/test/java/org/apache/gravitino/listener/api/event/TestSchemaEvent.java index d9af6a155b2..c2c0d7e4468 100644 --- a/core/src/test/java/org/apache/gravitino/listener/api/event/TestSchemaEvent.java +++ b/core/src/test/java/org/apache/gravitino/listener/api/event/TestSchemaEvent.java @@ -66,7 +66,7 @@ void init() { void testCreateSchemaEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema"); dispatcher.createSchema(identifier, "", ImmutableMap.of()); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateSchemaEvent.class, event.getClass()); SchemaInfo schemaInfo = ((CreateSchemaEvent) event).createdSchemaInfo(); @@ -77,7 +77,7 @@ void testCreateSchemaEvent() { void testLoadSchemaEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema"); dispatcher.loadSchema(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(LoadSchemaEvent.class, event.getClass()); SchemaInfo schemaInfo = ((LoadSchemaEvent) event).loadedSchemaInfo(); @@ -88,7 +88,7 @@ void testLoadSchemaEvent() { void testListSchemaEvent() { Namespace namespace = Namespace.of("metalake", "catalog"); dispatcher.listSchemas(namespace); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(ListSchemaEvent.class, event.getClass()); Assertions.assertEquals(namespace, ((ListSchemaEvent) event).namespace()); } @@ -98,7 +98,7 @@ void testAlterSchemaEvent() { SchemaChange schemaChange = SchemaChange.setProperty("a", "b"); NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema"); dispatcher.alterSchema(identifier, schemaChange); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AlterSchemaEvent.class, event.getClass()); @@ -113,7 +113,7 @@ void testAlterSchemaEvent() { void testDropSchemaEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema"); dispatcher.dropSchema(identifier, true); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropSchemaEvent.class, event.getClass()); Assertions.assertEquals(true, ((DropSchemaEvent) event).cascade()); @@ -126,7 +126,7 @@ void testCreateSchemaFailureEvent() { Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.createSchema(identifier, schema.comment(), schema.properties())); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateSchemaFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -139,7 +139,7 @@ void testLoadSchemaFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.loadSchema(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(LoadSchemaFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -154,7 +154,7 @@ void testAlterSchemaFailureEvent() { Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.alterSchema(identifier, schemaChange)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AlterSchemaFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -168,7 +168,7 @@ void testDropSchemaFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "schema"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.dropSchema(identifier, true)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropSchemaFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -181,7 +181,7 @@ void testListSchemaFailureEvent() { Namespace namespace = Namespace.of("metalake", "catalog"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.listSchemas(namespace)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(namespace.toString(), event.identifier().toString()); Assertions.assertEquals(ListSchemaFailureEvent.class, event.getClass()); Assertions.assertEquals( diff --git a/core/src/test/java/org/apache/gravitino/listener/api/event/TestTableEvent.java b/core/src/test/java/org/apache/gravitino/listener/api/event/TestTableEvent.java index bf427f01f58..11507c34376 100644 --- a/core/src/test/java/org/apache/gravitino/listener/api/event/TestTableEvent.java +++ b/core/src/test/java/org/apache/gravitino/listener/api/event/TestTableEvent.java @@ -84,7 +84,7 @@ void testCreateTableEvent() { table.distribution(), table.sortOrder(), table.index()); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateTableEvent.class, event.getClass()); TableInfo tableInfo = ((CreateTableEvent) event).createdTableInfo(); @@ -95,7 +95,7 @@ void testCreateTableEvent() { void testLoadTableEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", table.name()); dispatcher.loadTable(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(LoadTableEvent.class, event.getClass()); TableInfo tableInfo = ((LoadTableEvent) event).loadedTableInfo(); @@ -107,7 +107,7 @@ void testAlterTableEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", table.name()); TableChange change = TableChange.setProperty("a", "b"); dispatcher.alterTable(identifier, change); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AlterTableEvent.class, event.getClass()); TableInfo tableInfo = ((AlterTableEvent) event).updatedTableInfo(); @@ -120,7 +120,7 @@ void testAlterTableEvent() { void testDropTableEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", table.name()); dispatcher.dropTable(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropTableEvent.class, event.getClass()); Assertions.assertEquals(true, ((DropTableEvent) event).isExists()); @@ -130,7 +130,7 @@ void testDropTableEvent() { void testPurgeTableEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", table.name()); dispatcher.purgeTable(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(PurgeTableEvent.class, event.getClass()); Assertions.assertEquals(true, ((PurgeTableEvent) event).isExists()); @@ -140,7 +140,7 @@ void testPurgeTableEvent() { void testListTableEvent() { Namespace namespace = Namespace.of("metalake", "catalog"); dispatcher.listTables(namespace); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(namespace.toString(), event.identifier().toString()); Assertions.assertEquals(ListTableEvent.class, event.getClass()); Assertions.assertEquals(namespace, ((ListTableEvent) event).namespace()); @@ -161,7 +161,7 @@ void testCreateTableFailureEvent() { table.distribution(), table.sortOrder(), table.index())); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateTableFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -174,7 +174,7 @@ void testLoadTableFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "table", table.name()); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.loadTable(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(LoadTableFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -187,7 +187,7 @@ void testAlterTableFailureEvent() { TableChange change = TableChange.setProperty("a", "b"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.alterTable(identifier, change)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AlterTableFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -201,7 +201,7 @@ void testDropTableFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "table", table.name()); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.dropTable(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropTableFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -213,7 +213,7 @@ void testPurgeTableFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "table", table.name()); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.purgeTable(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(PurgeTableFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -225,7 +225,7 @@ void testListTableFailureEvent() { Namespace namespace = Namespace.of("metalake", "catalog"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.listTables(namespace)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(namespace.toString(), event.identifier().toString()); Assertions.assertEquals(ListTableFailureEvent.class, event.getClass()); Assertions.assertEquals( diff --git a/core/src/test/java/org/apache/gravitino/listener/api/event/TestTopicEvent.java b/core/src/test/java/org/apache/gravitino/listener/api/event/TestTopicEvent.java index cf61006481b..268c628c51a 100644 --- a/core/src/test/java/org/apache/gravitino/listener/api/event/TestTopicEvent.java +++ b/core/src/test/java/org/apache/gravitino/listener/api/event/TestTopicEvent.java @@ -65,7 +65,7 @@ void init() { void testCreateTopicEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "topic"); dispatcher.createTopic(identifier, topic.comment(), null, topic.properties()); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateTopicEvent.class, event.getClass()); TopicInfo topicInfo = ((CreateTopicEvent) event).createdTopicInfo(); @@ -76,7 +76,7 @@ void testCreateTopicEvent() { void testLoadTopicEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "topic"); dispatcher.loadTopic(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(LoadTopicEvent.class, event.getClass()); TopicInfo topicInfo = ((LoadTopicEvent) event).loadedTopicInfo(); @@ -88,7 +88,7 @@ void testAlterTopicEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "topic"); TopicChange topicChange = TopicChange.setProperty("a", "b"); dispatcher.alterTopic(identifier, topicChange); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AlterTopicEvent.class, event.getClass()); TopicInfo topicInfo = ((AlterTopicEvent) event).updatedTopicInfo(); @@ -101,7 +101,7 @@ void testAlterTopicEvent() { void testDropTopicEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "topic"); dispatcher.dropTopic(identifier); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropTopicEvent.class, event.getClass()); Assertions.assertEquals(true, ((DropTopicEvent) event).isExists()); @@ -111,7 +111,7 @@ void testDropTopicEvent() { void testListTopicEvent() { Namespace namespace = Namespace.of("metalake", "catalog"); dispatcher.listTopics(namespace); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(namespace.toString(), event.identifier().toString()); Assertions.assertEquals(ListTopicEvent.class, event.getClass()); Assertions.assertEquals(namespace, ((ListTopicEvent) event).namespace()); @@ -123,7 +123,7 @@ void testCreateTopicFailureEvent() { Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.createTopic(identifier, topic.comment(), null, topic.properties())); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(CreateTopicFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -136,7 +136,7 @@ void testLoadTopicFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "topic"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.loadTopic(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(LoadTopicFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -150,7 +150,7 @@ void testAlterTopicFailureEvent() { Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.alterTopic(identifier, topicChange)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(AlterTopicFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -164,7 +164,7 @@ void testDropTopicFailureEvent() { NameIdentifier identifier = NameIdentifier.of("metalake", "catalog", "topic"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.dropTopic(identifier)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(identifier, event.identifier()); Assertions.assertEquals(DropTopicFailureEvent.class, event.getClass()); Assertions.assertEquals( @@ -176,7 +176,7 @@ void testListTopicFailureEvent() { Namespace namespace = Namespace.of("metalake", "catalog"); Assertions.assertThrowsExactly( GravitinoRuntimeException.class, () -> failureDispatcher.listTopics(namespace)); - Event event = dummyEventListener.popEvent(); + Event event = dummyEventListener.popPostEvent(); Assertions.assertEquals(namespace.toString(), event.identifier().toString()); Assertions.assertEquals(ListTopicFailureEvent.class, event.getClass()); Assertions.assertEquals( diff --git a/core/src/test/java/org/apache/gravitino/meta/TestColumnEntity.java b/core/src/test/java/org/apache/gravitino/meta/TestColumnEntity.java index 2bae86a50bf..e7ad7e7d076 100644 --- a/core/src/test/java/org/apache/gravitino/meta/TestColumnEntity.java +++ b/core/src/test/java/org/apache/gravitino/meta/TestColumnEntity.java @@ -19,7 +19,10 @@ package org.apache.gravitino.meta; import java.time.Instant; +import java.util.Arrays; +import java.util.List; import org.apache.gravitino.Namespace; +import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.expressions.literals.Literals; import org.apache.gravitino.rel.types.Types; import org.junit.jupiter.api.Assertions; @@ -74,7 +77,7 @@ public void testColumnEntityFields() { .withAuditInfo( AuditInfo.builder().withCreator("test").withCreateTime(Instant.now()).build()) .build(); - Assertions.assertNull(columnEntity3.defaultValue()); + Assertions.assertEquals(Column.DEFAULT_VALUE_NOT_SET, columnEntity3.defaultValue()); } @Test @@ -175,7 +178,7 @@ public void testTableColumnEntity() { AuditInfo.builder().withCreator("test3").withCreateTime(Instant.now()).build()) .build(); - ColumnEntity[] columns = new ColumnEntity[] {columnEntity1, columnEntity2, columnEntity3}; + List columns = Arrays.asList(columnEntity1, columnEntity2, columnEntity3); TableEntity tableEntity = TableEntity.builder() .withId(1L) @@ -189,7 +192,7 @@ public void testTableColumnEntity() { Assertions.assertEquals(1L, tableEntity.id()); Assertions.assertEquals("test", tableEntity.name()); Assertions.assertEquals(Namespace.of("catalog", "schema"), tableEntity.namespace()); - Assertions.assertArrayEquals(columns, tableEntity.columns()); - Assertions.assertEquals(3, tableEntity.columns().length); + Assertions.assertEquals(columns, tableEntity.columns()); + Assertions.assertEquals(3, tableEntity.columns().size()); } } diff --git a/core/src/test/java/org/apache/gravitino/storage/TestEntityStorage.java b/core/src/test/java/org/apache/gravitino/storage/TestEntityStorage.java index 46bb5fb9337..6502b6931ea 100644 --- a/core/src/test/java/org/apache/gravitino/storage/TestEntityStorage.java +++ b/core/src/test/java/org/apache/gravitino/storage/TestEntityStorage.java @@ -42,6 +42,7 @@ import java.sql.Statement; import java.time.Instant; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.UUID; import org.apache.commons.io.FileUtils; @@ -65,10 +66,11 @@ import org.apache.gravitino.exceptions.NonEmptyEntityException; import org.apache.gravitino.file.Fileset; import org.apache.gravitino.integration.test.container.ContainerSuite; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.meta.AuditInfo; import org.apache.gravitino.meta.BaseMetalake; import org.apache.gravitino.meta.CatalogEntity; +import org.apache.gravitino.meta.ColumnEntity; import org.apache.gravitino.meta.FilesetEntity; import org.apache.gravitino.meta.GroupEntity; import org.apache.gravitino.meta.RoleEntity; @@ -77,6 +79,8 @@ import org.apache.gravitino.meta.TableEntity; import org.apache.gravitino.meta.TopicEntity; import org.apache.gravitino.meta.UserEntity; +import org.apache.gravitino.rel.types.Type; +import org.apache.gravitino.rel.types.Types; import org.apache.gravitino.storage.relational.converters.H2ExceptionConverter; import org.apache.gravitino.storage.relational.converters.MySQLExceptionConverter; import org.apache.gravitino.storage.relational.converters.PostgreSQLExceptionConverter; @@ -91,6 +95,7 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.testcontainers.shaded.org.apache.commons.lang3.tuple.Pair; @Tag("gravitino-docker-test") public class TestEntityStorage { @@ -125,6 +130,7 @@ private void init(String type, Config config) { Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_PATH)).thenReturn(DB_DIR); Mockito.when(config.get(STORE_DELETE_AFTER_TIME)).thenReturn(20 * 60 * 1000L); Mockito.when(config.get(VERSION_RETENTION_COUNT)).thenReturn(1L); + BaseIT baseIT = new BaseIT(); try { if (type.equalsIgnoreCase("h2")) { @@ -141,7 +147,7 @@ private void init(String type, Config config) { SQLExceptionConverterFactory.class, "converter", new H2ExceptionConverter(), true); } else if (type.equalsIgnoreCase("mysql")) { - String mysqlJdbcUrl = AbstractIT.startAndInitMySQLBackend(); + String mysqlJdbcUrl = baseIT.startAndInitMySQLBackend(); Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_URL)).thenReturn(mysqlJdbcUrl); Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_USER)).thenReturn("root"); Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_PASSWORD)).thenReturn("root"); @@ -152,7 +158,7 @@ private void init(String type, Config config) { SQLExceptionConverterFactory.class, "converter", new MySQLExceptionConverter(), true); } else if (type.equalsIgnoreCase("postgresql")) { - String postgreSQLJdbcUrl = AbstractIT.startAndInitPGBackend(); + String postgreSQLJdbcUrl = baseIT.startAndInitPGBackend(); Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_URL)).thenReturn(postgreSQLJdbcUrl); Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_USER)).thenReturn("root"); Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_PASSWORD)).thenReturn("root"); @@ -570,9 +576,19 @@ void testEntityDelete(String type) throws IOException { SchemaEntity schema1 = createSchemaEntity(1L, Namespace.of("metalake", "catalog"), "schema1", auditInfo); + ColumnEntity column1 = + createColumnEntity( + RandomIdGenerator.INSTANCE.nextId(), "column1", Types.StringType.get(), auditInfo); + ColumnEntity column2 = + createColumnEntity( + RandomIdGenerator.INSTANCE.nextId(), "column2", Types.StringType.get(), auditInfo); TableEntity table1 = - createTableEntity( - 1L, Namespace.of("metalake", "catalog", "schema1"), "table1", auditInfo); + createTableEntityWithColumns( + 1L, + Namespace.of("metalake", "catalog", "schema1"), + "table1", + auditInfo, + Lists.newArrayList(column1, column2)); FilesetEntity fileset1 = createFilesetEntity( 1L, Namespace.of("metalake", "catalog", "schema1"), "fileset1", auditInfo); @@ -582,9 +598,19 @@ void testEntityDelete(String type) throws IOException { SchemaEntity schema2 = createSchemaEntity(2L, Namespace.of("metalake", "catalog"), "schema2", auditInfo); + ColumnEntity column3 = + createColumnEntity( + RandomIdGenerator.INSTANCE.nextId(), "column3", Types.StringType.get(), auditInfo); + ColumnEntity column4 = + createColumnEntity( + RandomIdGenerator.INSTANCE.nextId(), "column4", Types.StringType.get(), auditInfo); TableEntity table1InSchema2 = - createTableEntity( - 2L, Namespace.of("metalake", "catalog", "schema2"), "table1", auditInfo); + createTableEntityWithColumns( + 2L, + Namespace.of("metalake", "catalog", "schema2"), + "table1", + auditInfo, + Lists.newArrayList(column3, column4)); FilesetEntity fileset1InSchema2 = createFilesetEntity( 2L, Namespace.of("metalake", "catalog", "schema2"), "fileset1", auditInfo); @@ -705,18 +731,20 @@ void testEntityDelete(String type) throws IOException { store.put(schema2New); // table TableEntity table1New = - createTableEntity( + createTableEntityWithColumns( RandomIdGenerator.INSTANCE.nextId(), table1.namespace(), table1.name(), - table1.auditInfo()); + table1.auditInfo(), + table1.columns()); store.put(table1New); TableEntity table1InSchema2New = - createTableEntity( + createTableEntityWithColumns( RandomIdGenerator.INSTANCE.nextId(), table1InSchema2.namespace(), table1InSchema2.name(), - table1InSchema2.auditInfo()); + table1InSchema2.auditInfo(), + table1InSchema2.columns()); store.put(table1InSchema2New); // fileset FilesetEntity fileset1New = @@ -1228,13 +1256,32 @@ public static SchemaEntity createSchemaEntity( .build(); } + public static ColumnEntity createColumnEntity( + Long id, String name, Type dataType, AuditInfo auditInfo) { + return ColumnEntity.builder() + .withId(id) + .withName(name) + .withComment("") + .withDataType(dataType) + .withNullable(true) + .withAutoIncrement(false) + .withAuditInfo(auditInfo) + .build(); + } + public static TableEntity createTableEntity( Long id, Namespace namespace, String name, AuditInfo auditInfo) { + return createTableEntityWithColumns(id, namespace, name, auditInfo, Collections.emptyList()); + } + + public static TableEntity createTableEntityWithColumns( + Long id, Namespace namespace, String name, AuditInfo auditInfo, List columns) { return TableEntity.builder() .withId(id) .withName(name) .withNamespace(namespace) .withAuditInfo(auditInfo) + .withColumns(columns) .build(); } @@ -1325,6 +1372,7 @@ private void validateDeleteTableCascade(EntityStore store, TableEntity table1) Assertions.assertFalse(store.exists(table1.nameIdentifier(), Entity.EntityType.TABLE)); // Delete again should return false Assertions.assertFalse(store.delete(table1.nameIdentifier(), Entity.EntityType.TABLE, true)); + validateDeletedColumns(table1.id(), table1.type()); } private void validateDeleteFileset( @@ -1391,6 +1439,7 @@ private void validateDeleteMetalakeCascade( Assertions.assertFalse(store.exists(userNew.nameIdentifier(), Entity.EntityType.USER)); Assertions.assertFalse(store.exists(groupNew.nameIdentifier(), EntityType.GROUP)); Assertions.assertFalse(store.exists(roleNew.nameIdentifier(), EntityType.ROLE)); + validateDeletedColumns(metalake.id(), metalake.type()); // Delete again should return false Assertions.assertFalse( @@ -1407,6 +1456,7 @@ private void validateDeleteCatalogCascade( Assertions.assertThrowsExactly( NoSuchEntityException.class, () -> store.get(id, Entity.EntityType.CATALOG, CatalogEntity.class)); + validateDeletedColumns(catalog.id(), catalog.type()); Assertions.assertThrowsExactly( NoSuchEntityException.class, @@ -1423,11 +1473,12 @@ private void validateDeleteSchemaCascade( TopicEntity topic1) throws IOException { TableEntity table1New = - createTableEntity( + createTableEntityWithColumns( RandomIdGenerator.INSTANCE.nextId(), table1.namespace(), table1.name(), - table1.auditInfo()); + table1.auditInfo(), + table1.columns()); store.put(table1New); FilesetEntity fileset1New = createFilesetEntity( @@ -1464,6 +1515,9 @@ private void validateDeleteSchemaCascade( Assertions.assertTrue(e instanceof NoSuchEntityException); Assertions.assertTrue(e.getMessage().contains("schema1")); } + + validateDeletedColumns(schema1.id(), schema1.type()); + // Delete again should return false Assertions.assertFalse(store.delete(schema1.nameIdentifier(), Entity.EntityType.SCHEMA, true)); @@ -1476,7 +1530,7 @@ private void validateDeleteSchemaCascade( () -> store.get(topic1.nameIdentifier(), Entity.EntityType.TOPIC, TopicEntity.class)); } - private static void validateDeleteMetalake( + private void validateDeleteMetalake( EntityStore store, BaseMetalake metalake, CatalogEntity catalogCopy, @@ -1504,7 +1558,7 @@ private static void validateDeleteMetalake( Assertions.assertFalse(store.delete(metalake.nameIdentifier(), Entity.EntityType.METALAKE)); } - private static void validateDeleteCatalog( + private void validateDeleteCatalog( EntityStore store, CatalogEntity catalog, TableEntity table1, @@ -1521,6 +1575,7 @@ private static void validateDeleteCatalog( NonEmptyEntityException.class, () -> store.delete(catalog.nameIdentifier(), Entity.EntityType.CATALOG)); store.delete(table1.nameIdentifier(), Entity.EntityType.TABLE); + validateDeletedColumns(table1.id(), table1.type()); store.delete(fileset1.nameIdentifier(), Entity.EntityType.FILESET); store.delete(topic1.nameIdentifier(), Entity.EntityType.TOPIC); try { @@ -1530,6 +1585,7 @@ private static void validateDeleteCatalog( } store.delete(schema1.nameIdentifier(), Entity.EntityType.SCHEMA); store.delete(table1InSchema2.nameIdentifier(), Entity.EntityType.TABLE); + validateDeletedColumns(table1InSchema2.id(), table1InSchema2.type()); Assertions.assertFalse( store.exists(fileset1InSchema2.nameIdentifier(), Entity.EntityType.FILESET)); Assertions.assertFalse(store.exists(topic1InSchema2.nameIdentifier(), Entity.EntityType.TOPIC)); @@ -1541,7 +1597,7 @@ private static void validateDeleteCatalog( Assertions.assertFalse(store.delete(catalog.nameIdentifier(), Entity.EntityType.CATALOG)); } - private static void validateDeleteSchema( + private void validateDeleteSchema( EntityStore store, SchemaEntity schema1, TableEntity table1, @@ -1559,11 +1615,13 @@ private static void validateDeleteSchema( // has not been deleted yet; Assertions.assertTrue(store.exists(schema1.nameIdentifier(), Entity.EntityType.SCHEMA)); Assertions.assertTrue(store.exists(table1.nameIdentifier(), Entity.EntityType.TABLE)); + ; Assertions.assertTrue(store.exists(fileset1.nameIdentifier(), Entity.EntityType.FILESET)); Assertions.assertTrue(store.exists(topic1.nameIdentifier(), Entity.EntityType.TOPIC)); // Delete table1,fileset1 and schema1 Assertions.assertTrue(store.delete(table1.nameIdentifier(), Entity.EntityType.TABLE)); + validateDeletedColumns(table1.id(), table1.type()); Assertions.assertTrue(store.delete(fileset1.nameIdentifier(), Entity.EntityType.FILESET)); Assertions.assertTrue(store.delete(topic1.nameIdentifier(), Entity.EntityType.TOPIC)); Assertions.assertTrue(store.delete(schema1.nameIdentifier(), Entity.EntityType.SCHEMA)); @@ -1667,19 +1725,23 @@ private void validateDeleteTable( // delete again should return false Assertions.assertFalse(store.delete(table1InSchema2.nameIdentifier(), Entity.EntityType.TABLE)); + // Make sure all columns are deleted + validateDeletedColumns(table1InSchema2.id(), table1InSchema2.type()); + // Make sure table 'metalake.catalog.schema1.table1' still exist; Assertions.assertEquals( table1, store.get(table1.nameIdentifier(), Entity.EntityType.TABLE, TableEntity.class)); // Make sure schema 'metalake.catalog.schema2' still exist; Assertions.assertEquals( schema2, store.get(schema2.nameIdentifier(), Entity.EntityType.SCHEMA, SchemaEntity.class)); - // Re-insert table1Inschema2 and everything is OK + // Re-insert table1InSchema2 and everything is OK TableEntity table1InSchema2New = - createTableEntity( + createTableEntityWithColumns( RandomIdGenerator.INSTANCE.nextId(), table1InSchema2.namespace(), table1InSchema2.name(), - table1InSchema2.auditInfo()); + table1InSchema2.auditInfo(), + table1InSchema2.columns()); store.put(table1InSchema2New); Assertions.assertTrue(store.exists(table1InSchema2.nameIdentifier(), Entity.EntityType.TABLE)); } @@ -2136,6 +2198,55 @@ private void validateDeletedTable(EntityStore store) throws IOException { (e) -> e)); } + private List>> listAllColumnWithEntityId( + Long entityId, Entity.EntityType entityType) { + String queryTemp = + "SELECT column_id, table_version, deleted_at FROM " + + "table_column_version_info WHERE %s = %d"; + String query; + switch (entityType) { + case TABLE: + query = String.format(queryTemp, "table_id", entityId); + break; + case SCHEMA: + query = String.format(queryTemp, "schema_id", entityId); + break; + case CATALOG: + query = String.format(queryTemp, "catalog_id", entityId); + break; + case METALAKE: + query = String.format(queryTemp, "metalake_id", entityId); + break; + default: + throw new IllegalArgumentException("Unsupported entity type: " + entityType); + } + + List>> results = Lists.newArrayList(); + try (SqlSession sqlSession = + SqlSessionFactoryHelper.getInstance().getSqlSessionFactory().openSession(true)) { + Connection connection = sqlSession.getConnection(); + Statement statement = connection.createStatement(); + + ResultSet rs = statement.executeQuery(query); + while (rs.next()) { + results.add( + Pair.of( + rs.getLong("column_id"), + Pair.of(rs.getLong("table_version"), rs.getLong("deleted_at")))); + } + } catch (SQLException e) { + throw new RuntimeException(e); + } + + return results; + } + + private void validateDeletedColumns(Long entityId, Entity.EntityType entityType) { + List>> deleteResult = + listAllColumnWithEntityId(entityId, entityType); + deleteResult.forEach(p -> Assertions.assertTrue(p.getRight().getRight() > 0)); + } + @ParameterizedTest @MethodSource("storageProvider") void testOptimizedDeleteForKv(String type) throws IOException { diff --git a/core/src/test/java/org/apache/gravitino/storage/relational/TestJDBCBackend.java b/core/src/test/java/org/apache/gravitino/storage/relational/TestJDBCBackend.java index c67c9697a12..bb7586cde95 100644 --- a/core/src/test/java/org/apache/gravitino/storage/relational/TestJDBCBackend.java +++ b/core/src/test/java/org/apache/gravitino/storage/relational/TestJDBCBackend.java @@ -150,7 +150,7 @@ public void init() { private static void prepareJdbcTable() { // Read the ddl sql to create table - String scriptPath = "h2/schema-0.6.0-h2.sql"; + String scriptPath = "h2/schema-0.7.0-h2.sql"; try (SqlSession sqlSession = SqlSessionFactoryHelper.getInstance().getSqlSessionFactory().openSession(true); Connection connection = sqlSession.getConnection(); diff --git a/core/src/test/java/org/apache/gravitino/storage/relational/service/TestTableColumnMetaService.java b/core/src/test/java/org/apache/gravitino/storage/relational/service/TestTableColumnMetaService.java new file mode 100644 index 00000000000..0b15d1e0f5c --- /dev/null +++ b/core/src/test/java/org/apache/gravitino/storage/relational/service/TestTableColumnMetaService.java @@ -0,0 +1,457 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.storage.relational.service; + +import java.io.IOException; +import java.time.Instant; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.Namespace; +import org.apache.gravitino.exceptions.NoSuchEntityException; +import org.apache.gravitino.meta.AuditInfo; +import org.apache.gravitino.meta.BaseMetalake; +import org.apache.gravitino.meta.CatalogEntity; +import org.apache.gravitino.meta.ColumnEntity; +import org.apache.gravitino.meta.SchemaEntity; +import org.apache.gravitino.meta.TableEntity; +import org.apache.gravitino.rel.expressions.literals.Literals; +import org.apache.gravitino.rel.types.Types; +import org.apache.gravitino.storage.RandomIdGenerator; +import org.apache.gravitino.storage.relational.TestJDBCBackend; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.testcontainers.shaded.com.google.common.collect.Lists; + +public class TestTableColumnMetaService extends TestJDBCBackend { + + private static final String METALAKE_NAME = "metalake_for_table_column_test"; + + private final AuditInfo auditInfo = + AuditInfo.builder().withCreator("creator").withCreateTime(Instant.now()).build(); + + @Test + public void testInsertAndGetTableColumns() throws IOException { + String catalogName = "catalog1"; + String schemaName = "schema1"; + createParentEntities(METALAKE_NAME, catalogName, schemaName); + + // Create a table entity without columns + TableEntity createdTable = + createTableEntity( + RandomIdGenerator.INSTANCE.nextId(), + Namespace.of(METALAKE_NAME, catalogName, schemaName), + "table1", + auditInfo); + TableMetaService.getInstance().insertTable(createdTable, false); + + TableEntity retrievedTable = + TableMetaService.getInstance().getTableByIdentifier(createdTable.nameIdentifier()); + Assertions.assertEquals(createdTable.id(), retrievedTable.id()); + Assertions.assertEquals(createdTable.name(), retrievedTable.name()); + Assertions.assertEquals(createdTable.namespace(), retrievedTable.namespace()); + Assertions.assertEquals(createdTable.auditInfo(), retrievedTable.auditInfo()); + Assertions.assertTrue(retrievedTable.columns().isEmpty()); + + // Create a table entity with columns + ColumnEntity column1 = + ColumnEntity.builder() + .withId(RandomIdGenerator.INSTANCE.nextId()) + .withName("column1") + .withComment("comment1") + .withDataType(Types.IntegerType.get()) + .withNullable(true) + .withAutoIncrement(false) + .withDefaultValue(Literals.integerLiteral(1)) + .withAuditInfo(auditInfo) + .build(); + ColumnEntity column2 = + ColumnEntity.builder() + .withId(RandomIdGenerator.INSTANCE.nextId()) + .withName("column2") + .withComment("comment2") + .withDataType(Types.StringType.get()) + .withNullable(false) + .withAutoIncrement(false) + .withDefaultValue(Literals.stringLiteral("1")) + .withAuditInfo(auditInfo) + .build(); + TableEntity createdTable2 = + TableEntity.builder() + .withId(RandomIdGenerator.INSTANCE.nextId()) + .withName("table2") + .withNamespace(Namespace.of(METALAKE_NAME, catalogName, schemaName)) + .withColumns(Lists.newArrayList(column1, column2)) + .withAuditInfo(auditInfo) + .build(); + TableMetaService.getInstance().insertTable(createdTable2, false); + + TableEntity retrievedTable2 = + TableMetaService.getInstance().getTableByIdentifier(createdTable2.nameIdentifier()); + Assertions.assertEquals(createdTable2.id(), retrievedTable2.id()); + Assertions.assertEquals(createdTable2.name(), retrievedTable2.name()); + Assertions.assertEquals(createdTable2.namespace(), retrievedTable2.namespace()); + Assertions.assertEquals(createdTable2.auditInfo(), retrievedTable2.auditInfo()); + Assertions.assertEquals(createdTable2.columns().size(), retrievedTable2.columns().size()); + compareTwoColumns(createdTable2.columns(), retrievedTable2.columns()); + + // test insert with overwrite + ColumnEntity column3 = + ColumnEntity.builder() + .withId(RandomIdGenerator.INSTANCE.nextId()) + .withName("column3") + .withComment("comment3") + .withDataType(Types.IntegerType.get()) + .withNullable(true) + .withAutoIncrement(false) + .withDefaultValue(Literals.integerLiteral(1)) + .withAuditInfo(auditInfo) + .build(); + + TableEntity createdTable3 = + TableEntity.builder() + .withId(createdTable2.id()) + .withName("table3") + .withNamespace(Namespace.of(METALAKE_NAME, catalogName, schemaName)) + .withColumns(Lists.newArrayList(column3)) + .withAuditInfo(auditInfo) + .build(); + + TableMetaService.getInstance().insertTable(createdTable3, true); + TableEntity retrievedTable3 = + TableMetaService.getInstance().getTableByIdentifier(createdTable3.nameIdentifier()); + Assertions.assertEquals(createdTable3.id(), retrievedTable3.id()); + Assertions.assertEquals(createdTable3.name(), retrievedTable3.name()); + Assertions.assertEquals(createdTable3.namespace(), retrievedTable3.namespace()); + Assertions.assertEquals(createdTable3.auditInfo(), retrievedTable3.auditInfo()); + Assertions.assertEquals(createdTable3.columns().size(), retrievedTable3.columns().size()); + compareTwoColumns(createdTable3.columns(), retrievedTable3.columns()); + } + + @Test + public void testUpdateTable() throws IOException { + String catalogName = "catalog1"; + String schemaName = "schema1"; + createParentEntities(METALAKE_NAME, catalogName, schemaName); + + // Create a table entity without columns + TableEntity createdTable = + createTableEntity( + RandomIdGenerator.INSTANCE.nextId(), + Namespace.of(METALAKE_NAME, catalogName, schemaName), + "table1", + auditInfo); + TableMetaService.getInstance().insertTable(createdTable, false); + + // Test update table with new name + TableEntity updatedTable = + TableEntity.builder() + .withId(createdTable.id()) + .withName("table2") + .withNamespace(createdTable.namespace()) + .withColumns(createdTable.columns()) + .withAuditInfo(auditInfo) + .build(); + Function updater = oldTable -> updatedTable; + + TableMetaService.getInstance().updateTable(createdTable.nameIdentifier(), updater); + TableEntity retrievedTable = + TableMetaService.getInstance().getTableByIdentifier(updatedTable.nameIdentifier()); + + Assertions.assertEquals(updatedTable.id(), retrievedTable.id()); + Assertions.assertEquals(updatedTable.name(), retrievedTable.name()); + Assertions.assertEquals(updatedTable.namespace(), retrievedTable.namespace()); + Assertions.assertEquals(updatedTable.auditInfo(), retrievedTable.auditInfo()); + Assertions.assertTrue(retrievedTable.columns().isEmpty()); + + // Test update table with adding one new column + ColumnEntity column1 = + ColumnEntity.builder() + .withId(RandomIdGenerator.INSTANCE.nextId()) + .withName("column1") + .withComment("comment1") + .withDataType(Types.IntegerType.get()) + .withNullable(true) + .withAutoIncrement(false) + .withDefaultValue(Literals.integerLiteral(1)) + .withAuditInfo(auditInfo) + .build(); + + TableEntity updatedTable2 = + TableEntity.builder() + .withId(updatedTable.id()) + .withName(updatedTable.name()) + .withNamespace(updatedTable.namespace()) + .withColumns(Lists.newArrayList(column1)) + .withAuditInfo(auditInfo) + .build(); + + Function updater2 = oldTable -> updatedTable2; + TableMetaService.getInstance().updateTable(updatedTable.nameIdentifier(), updater2); + + TableEntity retrievedTable2 = + TableMetaService.getInstance().getTableByIdentifier(updatedTable2.nameIdentifier()); + + Assertions.assertEquals(updatedTable2.id(), retrievedTable2.id()); + Assertions.assertEquals(updatedTable2.name(), retrievedTable2.name()); + Assertions.assertEquals(updatedTable2.namespace(), retrievedTable2.namespace()); + Assertions.assertEquals(updatedTable2.auditInfo(), retrievedTable2.auditInfo()); + Assertions.assertEquals(updatedTable2.columns().size(), retrievedTable2.columns().size()); + compareTwoColumns(updatedTable2.columns(), retrievedTable2.columns()); + + // Update the table with add one more column + ColumnEntity column2 = + ColumnEntity.builder() + .withId(RandomIdGenerator.INSTANCE.nextId()) + .withName("column2") + .withComment("comment2") + .withDataType(Types.StringType.get()) + .withNullable(false) + .withAutoIncrement(false) + .withDefaultValue(Literals.stringLiteral("1")) + .withAuditInfo(auditInfo) + .build(); + + TableEntity updatedTable3 = + TableEntity.builder() + .withId(updatedTable2.id()) + .withName(updatedTable2.name()) + .withNamespace(updatedTable2.namespace()) + .withColumns(Lists.newArrayList(column1, column2)) + .withAuditInfo(auditInfo) + .build(); + + Function updater3 = oldTable -> updatedTable3; + TableMetaService.getInstance().updateTable(updatedTable2.nameIdentifier(), updater3); + + TableEntity retrievedTable3 = + TableMetaService.getInstance().getTableByIdentifier(updatedTable3.nameIdentifier()); + + Assertions.assertEquals(updatedTable3.id(), retrievedTable3.id()); + Assertions.assertEquals(updatedTable3.name(), retrievedTable3.name()); + Assertions.assertEquals(updatedTable3.namespace(), retrievedTable3.namespace()); + Assertions.assertEquals(updatedTable3.auditInfo(), retrievedTable3.auditInfo()); + Assertions.assertEquals(updatedTable3.columns().size(), retrievedTable3.columns().size()); + compareTwoColumns(updatedTable3.columns(), retrievedTable3.columns()); + + // Update the table with updating one column + ColumnEntity updatedColumn1 = + ColumnEntity.builder() + .withId(column1.id()) + .withName(column1.name()) + .withComment("comment1_updated") + .withDataType(Types.LongType.get()) + .withNullable(column1.nullable()) + .withAutoIncrement(column1.autoIncrement()) + .withDefaultValue(null) + .withAuditInfo(auditInfo) + .build(); + TableEntity updatedTable4 = + TableEntity.builder() + .withId(updatedTable3.id()) + .withName(updatedTable3.name()) + .withNamespace(updatedTable3.namespace()) + .withColumns(Lists.newArrayList(updatedColumn1, column2)) + .withAuditInfo(auditInfo) + .build(); + + Function updater4 = oldTable -> updatedTable4; + TableMetaService.getInstance().updateTable(updatedTable3.nameIdentifier(), updater4); + + TableEntity retrievedTable4 = + TableMetaService.getInstance().getTableByIdentifier(updatedTable4.nameIdentifier()); + + Assertions.assertEquals(updatedTable4.id(), retrievedTable4.id()); + Assertions.assertEquals(updatedTable4.name(), retrievedTable4.name()); + Assertions.assertEquals(updatedTable4.namespace(), retrievedTable4.namespace()); + Assertions.assertEquals(updatedTable4.auditInfo(), retrievedTable4.auditInfo()); + Assertions.assertEquals(updatedTable4.columns().size(), retrievedTable4.columns().size()); + compareTwoColumns(updatedTable4.columns(), retrievedTable4.columns()); + + // Update the table with removing one column + TableEntity updatedTable5 = + TableEntity.builder() + .withId(updatedTable4.id()) + .withName(updatedTable4.name()) + .withNamespace(updatedTable4.namespace()) + .withColumns(Lists.newArrayList(column2)) + .withAuditInfo(auditInfo) + .build(); + + Function updater5 = oldTable -> updatedTable5; + TableMetaService.getInstance().updateTable(updatedTable4.nameIdentifier(), updater5); + + TableEntity retrievedTable5 = + TableMetaService.getInstance().getTableByIdentifier(updatedTable5.nameIdentifier()); + compareTwoColumns(updatedTable5.columns(), retrievedTable5.columns()); + + // update the table with removing all columns + TableEntity updatedTable6 = + TableEntity.builder() + .withId(updatedTable5.id()) + .withName(updatedTable5.name()) + .withNamespace(updatedTable5.namespace()) + .withAuditInfo(auditInfo) + .build(); + + Function updater6 = oldTable -> updatedTable6; + TableMetaService.getInstance().updateTable(updatedTable5.nameIdentifier(), updater6); + + TableEntity retrievedTable6 = + TableMetaService.getInstance().getTableByIdentifier(updatedTable6.nameIdentifier()); + Assertions.assertTrue(retrievedTable6.columns().isEmpty()); + } + + @Test + public void testCreateAndDeleteTable() throws IOException { + String catalogName = "catalog1"; + String schemaName = "schema1"; + createParentEntities(METALAKE_NAME, catalogName, schemaName); + + // Create a table entity with column + ColumnEntity column = + ColumnEntity.builder() + .withId(RandomIdGenerator.INSTANCE.nextId()) + .withName("column1") + .withComment("comment1") + .withDataType(Types.IntegerType.get()) + .withNullable(true) + .withAutoIncrement(false) + .withDefaultValue(Literals.integerLiteral(1)) + .withAuditInfo(auditInfo) + .build(); + + TableEntity createdTable = + TableEntity.builder() + .withId(RandomIdGenerator.INSTANCE.nextId()) + .withName("table1") + .withNamespace(Namespace.of(METALAKE_NAME, catalogName, schemaName)) + .withColumns(Lists.newArrayList(column)) + .withAuditInfo(auditInfo) + .build(); + + TableMetaService.getInstance().insertTable(createdTable, false); + + TableEntity retrievedTable = + TableMetaService.getInstance().getTableByIdentifier(createdTable.nameIdentifier()); + Assertions.assertEquals(createdTable.id(), retrievedTable.id()); + Assertions.assertEquals(createdTable.name(), retrievedTable.name()); + Assertions.assertEquals(createdTable.namespace(), retrievedTable.namespace()); + Assertions.assertEquals(createdTable.auditInfo(), retrievedTable.auditInfo()); + compareTwoColumns(createdTable.columns(), retrievedTable.columns()); + + Assertions.assertTrue( + TableMetaService.getInstance().deleteTable(retrievedTable.nameIdentifier())); + + Assertions.assertThrows( + NoSuchEntityException.class, + () -> TableMetaService.getInstance().getTableByIdentifier(retrievedTable.nameIdentifier())); + } + + @Test + public void testDeleteMetalake() throws IOException { + String catalogName = "catalog1"; + String schemaName = "schema1"; + createParentEntities(METALAKE_NAME, catalogName, schemaName); + + // Create a table entity with column + ColumnEntity column = + ColumnEntity.builder() + .withId(RandomIdGenerator.INSTANCE.nextId()) + .withName("column1") + .withComment("comment1") + .withDataType(Types.IntegerType.get()) + .withNullable(true) + .withAutoIncrement(false) + .withDefaultValue(Literals.integerLiteral(1)) + .withAuditInfo(auditInfo) + .build(); + + TableEntity createdTable = + TableEntity.builder() + .withId(RandomIdGenerator.INSTANCE.nextId()) + .withName("table1") + .withNamespace(Namespace.of(METALAKE_NAME, catalogName, schemaName)) + .withColumns(Lists.newArrayList(column)) + .withAuditInfo(auditInfo) + .build(); + + TableMetaService.getInstance().insertTable(createdTable, false); + + TableEntity retrievedTable = + TableMetaService.getInstance().getTableByIdentifier(createdTable.nameIdentifier()); + Assertions.assertEquals(createdTable.id(), retrievedTable.id()); + Assertions.assertEquals(createdTable.name(), retrievedTable.name()); + Assertions.assertEquals(createdTable.namespace(), retrievedTable.namespace()); + Assertions.assertEquals(createdTable.auditInfo(), retrievedTable.auditInfo()); + compareTwoColumns(createdTable.columns(), retrievedTable.columns()); + + Assertions.assertTrue( + MetalakeMetaService.getInstance().deleteMetalake(NameIdentifier.of(METALAKE_NAME), true)); + + Assertions.assertThrows( + NoSuchEntityException.class, + () -> TableMetaService.getInstance().getTableByIdentifier(retrievedTable.nameIdentifier())); + } + + private void compareTwoColumns( + List expectedColumns, List actualColumns) { + Assertions.assertEquals(expectedColumns.size(), actualColumns.size()); + Map expectedColumnsMap = + expectedColumns.stream().collect(Collectors.toMap(ColumnEntity::name, Function.identity())); + actualColumns.forEach( + column -> { + ColumnEntity expectedColumn = expectedColumnsMap.get(column.name()); + Assertions.assertNotNull(expectedColumn); + Assertions.assertEquals(expectedColumn.id(), column.id()); + Assertions.assertEquals(expectedColumn.name(), column.name()); + Assertions.assertEquals(expectedColumn.comment(), column.comment()); + Assertions.assertEquals(expectedColumn.dataType(), column.dataType()); + Assertions.assertEquals(expectedColumn.nullable(), column.nullable()); + Assertions.assertEquals(expectedColumn.autoIncrement(), column.autoIncrement()); + Assertions.assertEquals(expectedColumn.defaultValue(), column.defaultValue()); + Assertions.assertEquals(expectedColumn.auditInfo(), column.auditInfo()); + }); + } + + private void createParentEntities(String metalakeName, String catalogName, String schemaName) + throws IOException { + BaseMetalake metalake = + createBaseMakeLake(RandomIdGenerator.INSTANCE.nextId(), metalakeName, auditInfo); + backend.insert(metalake, false); + + CatalogEntity catalog = + createCatalog( + RandomIdGenerator.INSTANCE.nextId(), + Namespace.of(metalakeName), + catalogName, + auditInfo); + backend.insert(catalog, false); + + SchemaEntity schema = + createSchemaEntity( + RandomIdGenerator.INSTANCE.nextId(), + Namespace.of(metalakeName, catalog.name()), + schemaName, + auditInfo); + backend.insert(schema, false); + } +} diff --git a/core/src/test/java/org/apache/gravitino/storage/relational/utils/TestPOConverters.java b/core/src/test/java/org/apache/gravitino/storage/relational/utils/TestPOConverters.java index 3f92aafdfde..e97e650a144 100644 --- a/core/src/test/java/org/apache/gravitino/storage/relational/utils/TestPOConverters.java +++ b/core/src/test/java/org/apache/gravitino/storage/relational/utils/TestPOConverters.java @@ -24,11 +24,13 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; import java.time.Instant; import java.time.LocalDateTime; import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -36,18 +38,25 @@ import org.apache.gravitino.Entity; import org.apache.gravitino.MetadataObject; import org.apache.gravitino.Namespace; +import org.apache.gravitino.dto.util.DTOConverters; import org.apache.gravitino.file.Fileset; import org.apache.gravitino.json.JsonUtils; import org.apache.gravitino.meta.AuditInfo; import org.apache.gravitino.meta.BaseMetalake; import org.apache.gravitino.meta.CatalogEntity; +import org.apache.gravitino.meta.ColumnEntity; import org.apache.gravitino.meta.FilesetEntity; import org.apache.gravitino.meta.SchemaEntity; import org.apache.gravitino.meta.SchemaVersion; import org.apache.gravitino.meta.TableEntity; import org.apache.gravitino.meta.TagEntity; import org.apache.gravitino.meta.TopicEntity; +import org.apache.gravitino.rel.expressions.Expression; +import org.apache.gravitino.rel.expressions.literals.Literals; +import org.apache.gravitino.rel.types.Type; +import org.apache.gravitino.rel.types.Types; import org.apache.gravitino.storage.relational.po.CatalogPO; +import org.apache.gravitino.storage.relational.po.ColumnPO; import org.apache.gravitino.storage.relational.po.FilesetPO; import org.apache.gravitino.storage.relational.po.FilesetVersionPO; import org.apache.gravitino.storage.relational.po.MetalakePO; @@ -145,6 +154,122 @@ public void testFromTablePO() throws JsonProcessingException { assertEquals(expectedTable.auditInfo().creator(), convertedTable.auditInfo().creator()); } + @Test + public void testFromColumnPO() throws JsonProcessingException { + ColumnPO columnPO = + createColumnPO( + 1L, + "test", + 1L, + 1L, + 1L, + 1L, + Types.IntegerType.get(), + "test", + true, + true, + Literals.integerLiteral(1), + ColumnPO.ColumnOpType.CREATE); + + ColumnEntity expectedColumn = + createColumn( + 1L, "test", Types.IntegerType.get(), "test", true, true, Literals.integerLiteral(1)); + + ColumnEntity convertedColumn = POConverters.fromColumnPO(columnPO); + assertEquals(expectedColumn.id(), convertedColumn.id()); + assertEquals(expectedColumn.name(), convertedColumn.name()); + assertEquals(expectedColumn.dataType(), convertedColumn.dataType()); + assertEquals(expectedColumn.comment(), convertedColumn.comment()); + assertEquals(expectedColumn.nullable(), convertedColumn.nullable()); + assertEquals(expectedColumn.autoIncrement(), convertedColumn.autoIncrement()); + assertEquals(expectedColumn.defaultValue(), convertedColumn.defaultValue()); + + // Test column comment is null + ColumnPO columnPO1 = + createColumnPO( + 1L, + "test", + 1L, + 1L, + 1L, + 1L, + Types.IntegerType.get(), + null, + true, + true, + Literals.integerLiteral(1), + ColumnPO.ColumnOpType.CREATE); + + ColumnEntity expectedColumn1 = + createColumn( + 1L, "test", Types.IntegerType.get(), null, true, true, Literals.integerLiteral(1)); + + ColumnEntity convertedColumn1 = POConverters.fromColumnPO(columnPO1); + assertEquals(expectedColumn1.comment(), convertedColumn1.comment()); + } + + @Test + public void testFromTableColumnPOs() throws JsonProcessingException { + TablePO tablePO = createTablePO(1L, "test", 1L, 1L, 1L); + ColumnPO columnPO1 = + createColumnPO( + 1L, + "test1", + 1L, + 1L, + 1L, + 1L, + Types.IntegerType.get(), + "test1", + true, + true, + Literals.integerLiteral(1), + ColumnPO.ColumnOpType.CREATE); + + ColumnPO columnPO2 = + createColumnPO( + 2L, + "test2", + 1L, + 1L, + 1L, + 1L, + Types.StringType.get(), + "test2", + true, + true, + Literals.stringLiteral("1"), + ColumnPO.ColumnOpType.CREATE); + + ColumnEntity expectedColumn1 = + createColumn( + 1L, "test1", Types.IntegerType.get(), "test1", true, true, Literals.integerLiteral(1)); + + ColumnEntity expectedColumn2 = + createColumn( + 2L, "test2", Types.StringType.get(), "test2", true, true, Literals.stringLiteral("1")); + + TableEntity expectedTable = + createTableWithColumns( + 1L, + "test", + NamespaceUtil.ofTable("test_metalake", "test_catalog", "test_schema"), + Lists.newArrayList(expectedColumn1, expectedColumn2)); + + TableEntity convertedTable = + POConverters.fromTableAndColumnPOs( + tablePO, + Lists.newArrayList(columnPO1, columnPO2), + NamespaceUtil.ofTable("test_metalake", "test_catalog", "test_schema")); + + assertEquals(expectedTable.id(), convertedTable.id()); + assertEquals(expectedTable.name(), convertedTable.name()); + assertEquals(expectedTable.namespace(), convertedTable.namespace()); + assertEquals(expectedTable.auditInfo().creator(), convertedTable.auditInfo().creator()); + assertEquals(expectedTable.columns().size(), convertedTable.columns().size()); + assertEquals(expectedTable.columns(), convertedTable.columns()); + } + @Test public void testFromFilesetPO() throws JsonProcessingException { FilesetVersionPO filesetVersionPO = @@ -527,7 +652,7 @@ public void testUpdateTablePOVersion() { TablePO.Builder builder = TablePO.builder().withMetalakeId(1L).withCatalogId(1L).withSchemaId(1L); TablePO initPO = POConverters.initializeTablePOWithVersion(tableEntity, builder); - TablePO updatePO = POConverters.updateTablePOWithVersion(initPO, updatedTable); + TablePO updatePO = POConverters.updateTablePOWithVersion(initPO, updatedTable, false); assertEquals(1, initPO.getCurrentVersion()); assertEquals(1, initPO.getLastVersion()); assertEquals(0, initPO.getDeletedAt()); @@ -800,26 +925,18 @@ private static SchemaPO createSchemaPO( } private static TableEntity createTable(Long id, String name, Namespace namespace) { - AuditInfo auditInfo = - AuditInfo.builder().withCreator("creator").withCreateTime(FIX_INSTANT).build(); - return TableEntity.builder() - .withId(id) - .withName(name) - .withNamespace(namespace) - .withAuditInfo(auditInfo) - .build(); + return createTableWithColumns(id, name, namespace, Collections.emptyList()); } - private static TopicEntity createTopic( - Long id, String name, Namespace namespace, String comment, Map properties) { + private static TableEntity createTableWithColumns( + Long id, String name, Namespace namespace, List columns) { AuditInfo auditInfo = AuditInfo.builder().withCreator("creator").withCreateTime(FIX_INSTANT).build(); - return TopicEntity.builder() + return TableEntity.builder() .withId(id) .withName(name) .withNamespace(namespace) - .withComment(comment) - .withProperties(properties) + .withColumns(columns) .withAuditInfo(auditInfo) .build(); } @@ -842,6 +959,79 @@ private static TablePO createTablePO( .build(); } + private static ColumnPO createColumnPO( + Long id, + String columnName, + Long metalakeId, + Long catalogId, + Long schemaId, + Long tableId, + Type columnType, + String columnComment, + boolean columnNullable, + boolean columnAutoIncrement, + Expression columnDefaultValue, + ColumnPO.ColumnOpType columnOpType) + throws JsonProcessingException { + AuditInfo auditInfo = + AuditInfo.builder().withCreator("creator").withCreateTime(FIX_INSTANT).build(); + return ColumnPO.builder() + .withColumnId(id) + .withColumnName(columnName) + .withMetalakeId(metalakeId) + .withCatalogId(catalogId) + .withSchemaId(schemaId) + .withTableId(tableId) + .withTableVersion(1L) + .withColumnType(JsonUtils.anyFieldMapper().writeValueAsString(columnType)) + .withColumnComment(columnComment) + .withNullable(ColumnPO.Nullable.fromBoolean(columnNullable).value()) + .withAutoIncrement(ColumnPO.AutoIncrement.fromBoolean(columnAutoIncrement).value()) + .withDefaultValue( + JsonUtils.anyFieldMapper() + .writeValueAsString(DTOConverters.toFunctionArg(columnDefaultValue))) + .withColumnOpType(columnOpType.value()) + .withAuditInfo(JsonUtils.anyFieldMapper().writeValueAsString(auditInfo)) + .withDeletedAt(0L) + .build(); + } + + private static ColumnEntity createColumn( + Long id, + String columnName, + Type columnType, + String columnComment, + boolean columnNullable, + boolean columnAutoIncrement, + Expression columnDefaultValue) { + AuditInfo auditInfo = + AuditInfo.builder().withCreator("creator").withCreateTime(FIX_INSTANT).build(); + return ColumnEntity.builder() + .withId(id) + .withName(columnName) + .withDataType(columnType) + .withComment(columnComment) + .withNullable(columnNullable) + .withAutoIncrement(columnAutoIncrement) + .withDefaultValue(columnDefaultValue) + .withAuditInfo(auditInfo) + .build(); + } + + private static TopicEntity createTopic( + Long id, String name, Namespace namespace, String comment, Map properties) { + AuditInfo auditInfo = + AuditInfo.builder().withCreator("creator").withCreateTime(FIX_INSTANT).build(); + return TopicEntity.builder() + .withId(id) + .withName(name) + .withNamespace(namespace) + .withComment(comment) + .withProperties(properties) + .withAuditInfo(auditInfo) + .build(); + } + private static TopicPO createTopicPO( Long id, String name, diff --git a/core/src/test/resources/META-INF/services/org.apache.gravitino.credential.CredentialProvider b/core/src/test/resources/META-INF/services/org.apache.gravitino.credential.CredentialProvider new file mode 100644 index 00000000000..cbdbff0bee9 --- /dev/null +++ b/core/src/test/resources/META-INF/services/org.apache.gravitino.credential.CredentialProvider @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +org.apache.gravitino.credential.DummyCredentialProvider \ No newline at end of file diff --git a/docs/hadoop-catalog.md b/docs/hadoop-catalog.md index d6706ff3e1b..d28e6d93b04 100644 --- a/docs/hadoop-catalog.md +++ b/docs/hadoop-catalog.md @@ -25,16 +25,19 @@ Hadoop 3. If there's any compatibility issue, please create an [issue](https://g Besides the [common catalog properties](./gravitino-server-config.md#gravitino-catalog-properties-configuration), the Hadoop catalog has the following properties: -| Property Name | Description | Default Value | Required | Since Version | -|----------------------------------------------------|------------------------------------------------------------------------------------------------|---------------|-------------------------------------------------------------|---------------| -| `location` | The storage location managed by Hadoop catalog. | (none) | No | 0.5.0 | -| `authentication.impersonation-enable` | Whether to enable impersonation for the Hadoop catalog. | `false` | No | 0.5.1 | -| `authentication.type` | The type of authentication for Hadoop catalog, currently we only support `kerberos`, `simple`. | `simple` | No | 0.5.1 | -| `authentication.kerberos.principal` | The principal of the Kerberos authentication | (none) | required if the value of `authentication.type` is Kerberos. | 0.5.1 | -| `authentication.kerberos.keytab-uri` | The URI of The keytab for the Kerberos authentication. | (none) | required if the value of `authentication.type` is Kerberos. | 0.5.1 | -| `authentication.kerberos.check-interval-sec` | The check interval of Kerberos credential for Hadoop catalog. | 60 | No | 0.5.1 | -| `authentication.kerberos.keytab-fetch-timeout-sec` | The fetch timeout of retrieving Kerberos keytab from `authentication.kerberos.keytab-uri`. | 60 | No | 0.5.1 | - +| Property Name | Description | Default Value | Required | Since Version | +|----------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------|-------------------------------------------------------------|------------------| +| `location` | The storage location managed by Hadoop catalog. | (none) | No | 0.5.0 | +| `filesystem-providers` | The names (split by comma) of filesystem providers for the Hadoop catalog. Gravitino already support built-in `builtin-local`(`local file`) and `builtin-hdfs`(`hdfs`). If users want to support more file system and add it to Gravitino, they custom more file system by implementing `FileSystemProvider`. | (none) | No | 0.7.0-incubating | +| `default-filesystem-provider` | The name default filesystem providers of this Hadoop catalog if users do not specify the scheme in the URI. Default value is `builtin-local` | `builtin-local` | No | 0.7.0-incubating | +| `authentication.impersonation-enable` | Whether to enable impersonation for the Hadoop catalog. | `false` | No | 0.5.1 | +| `authentication.type` | The type of authentication for Hadoop catalog, currently we only support `kerberos`, `simple`. | `simple` | No | 0.5.1 | +| `authentication.kerberos.principal` | The principal of the Kerberos authentication | (none) | required if the value of `authentication.type` is Kerberos. | 0.5.1 | +| `authentication.kerberos.keytab-uri` | The URI of The keytab for the Kerberos authentication. | (none) | required if the value of `authentication.type` is Kerberos. | 0.5.1 | +| `authentication.kerberos.check-interval-sec` | The check interval of Kerberos credential for Hadoop catalog. | 60 | No | 0.5.1 | +| `authentication.kerberos.keytab-fetch-timeout-sec` | The fetch timeout of retrieving Kerberos keytab from `authentication.kerberos.keytab-uri`. | 60 | No | 0.5.1 | + +For more about `filesystem-providers`, please refer to `HadoopFileSystemProvider` or `LocalFileSystemProvider` in the source code. Furthermore, you also need to place the jar of the file system provider into the `$GRAVITINO_HOME/catalogs/hadoop/libs` directory if it's not in the classpath. ### Authentication for Hadoop Catalog diff --git a/docs/lakehouse-hudi-catalog.md b/docs/lakehouse-hudi-catalog.md new file mode 100644 index 00000000000..be6d328bfb4 --- /dev/null +++ b/docs/lakehouse-hudi-catalog.md @@ -0,0 +1,110 @@ +--- +title: "Hudi catalog" +slug: /lakehouse-hudi-catalog +keywords: + - lakehouse + - hudi + - metadata +license: "This software is licensed under the Apache License version 2." +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Introduction + +Apache Gravitino provides the ability to manage Apache Hudi metadata. + +### Requirements and limitations + +:::info +Tested and verified with Apache Hudi `0.15.0`. +::: + +## Catalog + +### Catalog capabilities + +- Works as a catalog proxy, supporting `HMS` as catalog backend. +- Only support read operations (list and load) for Hudi schemas and tables. +- Doesn't support timeline management operations now. + +### Catalog properties + +| Property name | Description | Default value | Required | Since Version | +|------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|------------------| +| `catalog-backend` | Catalog backend of Gravitino Hudi catalog. Only supports `hms` now. | (none) | Yes | 0.7.0-incubating | +| `uri` | The URI associated with the backend. Such as `thrift://127.0.0.1:9083` for HMS backend. | (none) | Yes | 0.7.0-incubating | +| `client.pool-size` | For HMS backend. The maximum number of Hive metastore clients in the pool for Gravitino. | 1 | No | 0.7.0-incubating | +| `client.pool-cache.eviction-interval-ms` | For HMS backend. The cache pool eviction interval. | 300000 | No | 0.7.0-incubating | +| `gravitino.bypass.` | Property name with this prefix passed down to the underlying backend client for use. Such as `gravitino.bypass.hive.metastore.failure.retries = 3` indicate 3 times of retries upon failure of Thrift metastore calls for HMS backend. | (none) | No | 0.7.0-incubating | + +### Catalog operations + +Please refer to [Manage Relational Metadata Using Gravitino](./manage-relational-metadata-using-gravitino.md#catalog-operations) for more details. + +## Schema + +### Schema capabilities + +- Only support read operations: listSchema, loadSchema, and schemaExists. + +### Schema properties + +- The `Location` is an optional property that shows the storage path to the Hudi database + +### Schema operations + +Only support read operations: listSchema, loadSchema, and schemaExists. +Please refer to [Manage Relational Metadata Using Gravitino](./manage-relational-metadata-using-gravitino.md#schema-operations) for more details. + +## Table + +### Table capabilities + +- Only support read operations: listTable, loadTable, and tableExists. + +### Table partitions + +- Support loading Hudi partitioned tables (Hudi only supports identity partitioning). + +### Table sort orders + +- Doesn't support table sort orders. + +### Table distributions + +- Doesn't support table distributions. + +### Table indexes + +- Doesn't support table indexes. + +### Table properties + +- For HMS backend, it will bring out all the table parameters from the HMS. + +### Table column types + +The following table shows the mapping between Gravitino and [Apache Hudi column types](https://hudi.apache.org/docs/sql_ddl#supported-types): + +| Gravitino Type | Apache Hudi Type | +|----------------|------------------| +| `boolean` | `boolean` | +| `integer` | `int` | +| `long` | `long` | +| `date` | `date` | +| `timestamp` | `timestamp` | +| `float` | `float` | +| `double` | `double` | +| `string` | `string` | +| `decimal` | `decimal` | +| `binary` | `bytes` | +| `array` | `array` | +| `map` | `map` | +| `struct` | `struct` | + +### Table operations + +Only support read operations: listTable, loadTable, and tableExists. +Please refer to [Manage Relational Metadata Using Gravitino](./manage-relational-metadata-using-gravitino.md#table-operations) for more details. diff --git a/docs/lakehouse-paimon-catalog.md b/docs/lakehouse-paimon-catalog.md index b9c2772bc7a..4c336f3d323 100644 --- a/docs/lakehouse-paimon-catalog.md +++ b/docs/lakehouse-paimon-catalog.md @@ -22,17 +22,17 @@ Builds with Apache Paimon `0.8.0`. ### Catalog capabilities -- Works as a catalog proxy, supporting `FilesystemCatalog`. +- Works as a catalog proxy, supporting `FilesystemCatalog` and `JdbcCatalog`. - Supports DDL operations for Paimon schemas and tables. -- Doesn't support `JdbcCatalog` and `HiveCatalog` catalog backend now. +- Doesn't support `HiveCatalog` catalog backend now. - Doesn't support alterSchema. ### Catalog properties | Property name | Description | Default value | Required | Since Version | |----------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------|-----------------------------------------------------------------|-------------------| -| `catalog-backend` | Catalog backend of Gravitino Paimon catalog. Only supports `filesystem` now. | (none) | Yes | 0.6.0-incubating | +| `catalog-backend` | Catalog backend of Gravitino Paimon catalog. Supports `filesystem` and `jdbc` now. | (none) | Yes | 0.6.0-incubating | | `uri` | The URI configuration of the Paimon catalog. `thrift://127.0.0.1:9083` or `jdbc:postgresql://127.0.0.1:5432/db_name` or `jdbc:mysql://127.0.0.1:3306/metastore_db`. It is optional for `FilesystemCatalog`. | (none) | required if the value of `catalog-backend` is not `filesystem`. | 0.6.0-incubating | | `warehouse` | Warehouse directory of catalog. `file:///user/hive/warehouse-paimon/` for local fs, `hdfs://namespace/hdfs/path` for HDFS , `s3://{bucket-name}/path/` for S3 or `oss://{bucket-name}/path` for Aliyun OSS | (none) | Yes | 0.6.0-incubating | | `authentication.type` | The type of authentication for Paimon catalog backend, currently Gravitino only supports `Kerberos` and `simple`. | `simple` | No | 0.6.0-incubating | @@ -54,6 +54,19 @@ If you want to use the `oss` or `s3` warehouse, you need to place related jars i Any properties not defined by Gravitino with `gravitino.bypass.` prefix will pass to Paimon catalog properties and HDFS configuration. For example, if specify `gravitino.bypass.table.type`, `table.type` will pass to Paimon catalog properties. +#### JDBC backend + +If you are using JDBC backend, you must specify the properties like `jdbc-user` and `jdbc-password`. + +| Property name | Description | Default value | Required | Since Version | +|-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------|-----------------------------------------------------------------|--------------------| +| `jdbc-user` | Jdbc user of Gravitino Paimon catalog for `jdbc` backend. | (none) | required if the value of `catalog-backend` is `jdbc`. | 0.7.0-incubating | +| `jdbc-password` | Jdbc password of Gravitino Paimon catalog for `jdbc` backend. | (none) | required if the value of `catalog-backend` is `jdbc`. | 0.7.0-incubating | + +:::caution +You must download the corresponding JDBC driver and place it to the `catalogs/lakehouse-paimon/libs` directory If you are using JDBC backend. +::: + ### Catalog operations Please refer to [Manage Relational Metadata Using Gravitino](./manage-relational-metadata-using-gravitino.md#catalog-operations) for more details. @@ -175,11 +188,16 @@ The Gravitino server doesn't allow passing the following reserved fields. | `comment` | The table comment. | | `owner` | The table owner. | | `bucket-key` | The table bucket-key. | +| `primary-key` | The table primary-key. | +| `partition` | The table partition. | + +The Gravitino server doesn't allow the following immutable fields to be modified, but allows them to be specified when creating a new table. + +| Configuration item | Description | +|------------------------------------|--------------------------------------------------------------| | `merge-engine` | The table merge-engine. | | `sequence.field` | The table sequence.field. | | `rowkind.field` | The table rowkind.field. | -| `primary-key` | The table primary-key. | -| `partition` | The table partition. | ### Table operations diff --git a/docs/manage-relational-metadata-using-gravitino.md b/docs/manage-relational-metadata-using-gravitino.md index fa2a11ac487..f810b4aa325 100644 --- a/docs/manage-relational-metadata-using-gravitino.md +++ b/docs/manage-relational-metadata-using-gravitino.md @@ -24,6 +24,7 @@ For more details, please refer to the related doc. - [**Apache Doris**](./jdbc-doris-catalog.md) - [**Apache Iceberg**](./lakehouse-iceberg-catalog.md) - [**Apache Paimon**](./lakehouse-paimon-catalog.md) +- [**Apache Hudi**](./lakehouse-hudi-catalog.md) Assuming: @@ -93,6 +94,7 @@ Currently, Gravitino supports the following catalog providers: | `hive` | [Hive catalog property](./apache-hive-catalog.md#catalog-properties) | | `lakehouse-iceberg` | [Iceberg catalog property](./lakehouse-iceberg-catalog.md#catalog-properties) | | `lakehouse-paimon` | [Paimon catalog property](./lakehouse-paimon-catalog.md#catalog-properties) | +| `lakehouse-hudi` | [Hudi catalog property](./lakehouse-hudi-catalog.md#catalog-properties) | | `jdbc-mysql` | [MySQL catalog property](./jdbc-mysql-catalog.md#catalog-properties) | | `jdbc-postgresql` | [PostgreSQL catalog property](./jdbc-postgresql-catalog.md#catalog-properties) | | `jdbc-doris` | [Doris catalog property](./jdbc-doris-catalog.md#catalog-properties) | @@ -326,6 +328,7 @@ Currently, Gravitino supports the following schema property: | `hive` | [Hive schema property](./apache-hive-catalog.md#schema-properties) | | `lakehouse-iceberg` | [Iceberg scheme property](./lakehouse-iceberg-catalog.md#schema-properties) | | `lakehouse-paimon` | [Paimon scheme property](./lakehouse-paimon-catalog.md#schema-properties) | +| `lakehouse-hudi` | [Hudi scheme property](./lakehouse-hudi-catalog.md#schema-properties) | | `jdbc-mysql` | [MySQL schema property](./jdbc-mysql-catalog.md#schema-properties) | | `jdbc-postgresql` | [PostgreSQL schema property](./jdbc-postgresql-catalog.md#schema-properties) | | `jdbc-doris` | [Doris schema property](./jdbc-doris-catalog.md#schema-properties) | @@ -807,6 +810,7 @@ The following is a table of the column default value that Gravitino supports for | `hive` | ✘ | | `lakehouse-iceberg` | ✘ | | `lakehouse-paimon` | ✘ | +| `lakehouse-hudi` | ✘ | | `jdbc-mysql` | ✔ | | `jdbc-postgresql` | ✔ | @@ -820,6 +824,7 @@ The following table shows the column auto-increment that Gravitino supports for | `hive` | ✘ | | `lakehouse-iceberg` | ✘ | | `lakehouse-paimon` | ✘ | +| `lakehouse-hudi` | ✘ | | `jdbc-mysql` | ✔([limitations](./jdbc-mysql-catalog.md#table-column-auto-increment)) | | `jdbc-postgresql` | ✔ | @@ -832,6 +837,7 @@ The following is the table property that Gravitino supports: | `hive` | [Hive table property](./apache-hive-catalog.md#table-properties) | [Hive type mapping](./apache-hive-catalog.md#table-column-types) | | `lakehouse-iceberg` | [Iceberg table property](./lakehouse-iceberg-catalog.md#table-properties) | [Iceberg type mapping](./lakehouse-iceberg-catalog.md#table-column-types) | | `lakehouse-paimon` | [Paimon table property](./lakehouse-paimon-catalog.md#table-properties) | [Paimon type mapping](./lakehouse-paimon-catalog.md#table-column-types) | +| `lakehouse-hudi` | [Hudi table property](./lakehouse-hudi-catalog.md#table-properties) | [Hudi type mapping](./lakehouse-hudi-catalog.md#table-column-types) | | `jdbc-mysql` | [MySQL table property](./jdbc-mysql-catalog.md#table-properties) | [MySQL type mapping](./jdbc-mysql-catalog.md#table-column-types) | | `jdbc-postgresql` | [PostgreSQL table property](./jdbc-postgresql-catalog.md#table-properties) | [PostgreSQL type mapping](./jdbc-postgresql-catalog.md#table-column-types) | | `doris` | [Doris table property](./jdbc-doris-catalog.md#table-properties) | [Doris type mapping](./jdbc-doris-catalog.md#table-column-types) | diff --git a/docs/security/access-control.md b/docs/security/access-control.md index fb8bde8cb00..afdf921bf47 100644 --- a/docs/security/access-control.md +++ b/docs/security/access-control.md @@ -90,6 +90,18 @@ The owner of each entity has implicit administrative class privilege, for exampl Only the Owner of a securable object can fully manage that resource. If a securable object needs to be managed by more than one person at the same time, the owner is assigned to a user group. +The metadata object that supports ownership is as follows: + +| Metadata Object Type | +|----------------------| +| Metalake | +| Catalog | +| Schema | +| Table | +| Topic | +| Fileset | +| Role | + ### User Users are generally granted one or multiple Roles, and users have different operating privileges depending on their Role. diff --git a/flink-connector/flink-runtime/build.gradle.kts b/flink-connector/flink-runtime/build.gradle.kts index 63349ba690a..1a71646444e 100644 --- a/flink-connector/flink-runtime/build.gradle.kts +++ b/flink-connector/flink-runtime/build.gradle.kts @@ -56,7 +56,6 @@ tasks.withType(ShadowJar::class.java) { relocate("com.google", "org.apache.gravitino.shaded.com.google") relocate("google", "org.apache.gravitino.shaded.google") relocate("org.apache.hc", "org.apache.gravitino.shaded.org.apache.hc") - relocate("org.apache.commons", "org.apache.gravitino.shaded.org.apache.commons") } publishing { diff --git a/flink-connector/flink/build.gradle.kts b/flink-connector/flink/build.gradle.kts index c4e75200ec6..9e2a48c036c 100644 --- a/flink-connector/flink/build.gradle.kts +++ b/flink-connector/flink/build.gradle.kts @@ -38,18 +38,10 @@ val scalaVersion: String = "2.12" val artifactName = "${rootProject.name}-flink-${flinkMajorVersion}_$scalaVersion" dependencies { - implementation(project(":api")) implementation(project(":catalogs:catalog-common")) - implementation(project(":common")) - - compileOnly(libs.bundles.log4j) - implementation(libs.commons.lang3) implementation(libs.guava) - implementation(libs.httpclient5) - implementation(libs.jackson.databind) - implementation(libs.jackson.annotations) - implementation(libs.jackson.datatype.jdk8) - implementation(libs.jackson.datatype.jsr310) + + compileOnly(project(":clients:client-java-runtime", configuration = "shadow")) compileOnly("org.apache.flink:flink-connector-hive_$scalaVersion:$flinkVersion") compileOnly("org.apache.flink:flink-table-common:$flinkVersion") @@ -76,13 +68,13 @@ dependencies { exclude("org.slf4j") } - // replace with client-java-runtime in flink connector runtime - compileOnly(project(":clients:client-java")) - testAnnotationProcessor(libs.lombok) - testCompileOnly(libs.lombok) + + testImplementation(project(":api")) testImplementation(project(":clients:client-java")) + testImplementation(project(":core")) + testImplementation(project(":common")) testImplementation(project(":integration-test-common", "testArtifacts")) testImplementation(project(":server")) testImplementation(project(":server-common")) @@ -146,7 +138,7 @@ dependencies { exclude("com.google.code.findbugs", "sr305") exclude("com.tdunning", "json") exclude("com.zaxxer", "HikariCP") - exclude("io.dropwizard.metricss") + exclude("io.dropwizard.metrics") exclude("javax.transaction", "transaction-api") exclude("org.apache.avro") exclude("org.apache.curator") diff --git a/flink-connector/flink/src/test/java/org/apache/gravitino/flink/connector/integration/test/FlinkEnvIT.java b/flink-connector/flink/src/test/java/org/apache/gravitino/flink/connector/integration/test/FlinkEnvIT.java index 4d61b3ad489..5ae8847c6c1 100644 --- a/flink-connector/flink/src/test/java/org/apache/gravitino/flink/connector/integration/test/FlinkEnvIT.java +++ b/flink-connector/flink/src/test/java/org/apache/gravitino/flink/connector/integration/test/FlinkEnvIT.java @@ -36,14 +36,14 @@ import org.apache.gravitino.flink.connector.store.GravitinoCatalogStoreFactoryOptions; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.hadoop.fs.FileSystem; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public abstract class FlinkEnvIT extends AbstractIT { +public abstract class FlinkEnvIT extends BaseIT { private static final Logger LOG = LoggerFactory.getLogger(FlinkEnvIT.class); private static final ContainerSuite CONTAINER_SUITE = ContainerSuite.getInstance(); protected static final String GRAVITINO_METALAKE = "flink"; @@ -61,7 +61,7 @@ public abstract class FlinkEnvIT extends AbstractIT { private static String gravitinoUri = "http://127.0.0.1:8090"; @BeforeAll - static void startUp() { + void startUp() { // Start Gravitino server initGravitinoEnv(); initMetalake(); @@ -82,13 +82,13 @@ protected String flinkByPass(String key) { return PropertiesConverter.FLINK_PROPERTY_PREFIX + key; } - private static void initGravitinoEnv() { + private void initGravitinoEnv() { // Gravitino server is already started by AbstractIT, just construct gravitinoUrl int gravitinoPort = getGravitinoServerPort(); gravitinoUri = String.format("http://127.0.0.1:%d", gravitinoPort); } - private static void initMetalake() { + private void initMetalake() { metalake = client.createMetalake(GRAVITINO_METALAKE, "", Collections.emptyMap()); } diff --git a/flink-connector/flink/src/test/java/org/apache/gravitino/flink/connector/integration/test/hive/FlinkHiveCatalogIT.java b/flink-connector/flink/src/test/java/org/apache/gravitino/flink/connector/integration/test/hive/FlinkHiveCatalogIT.java index 0da3820d978..9cdac8be7db 100644 --- a/flink-connector/flink/src/test/java/org/apache/gravitino/flink/connector/integration/test/hive/FlinkHiveCatalogIT.java +++ b/flink-connector/flink/src/test/java/org/apache/gravitino/flink/connector/integration/test/hive/FlinkHiveCatalogIT.java @@ -64,10 +64,8 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; @Tag("gravitino-docker-test") -@TestInstance(TestInstance.Lifecycle.PER_CLASS) public class FlinkHiveCatalogIT extends FlinkCommonIT { private static final String DEFAULT_HIVE_CATALOG = "test_flink_hive_schema_catalog"; diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 2888f220876..3cfd42d5e53 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -32,6 +32,7 @@ airlift-resolver = "1.6" hive2 = "2.3.9" hadoop2 = "2.10.2" hadoop3 = "3.1.0" +hadoop3-gcs = "1.9.4-hadoop3" hadoop-minikdc = "3.3.6" htrace-core4 = "4.1.0-incubating" httpclient5 = "5.2.1" @@ -103,6 +104,7 @@ datanucleus-core = "4.1.17" datanucleus-api-jdo = "4.2.4" datanucleus-rdbms = "4.1.19" datanucleus-jdo = "3.2.0-m3" +hudi = "0.15.0" [libraries] protobuf-java = { group = "com.google.protobuf", name = "protobuf-java", version.ref = "protoc" } @@ -153,6 +155,7 @@ hadoop3-hdfs = { group = "org.apache.hadoop", name = "hadoop-hdfs", version.ref hadoop3-common = { group = "org.apache.hadoop", name = "hadoop-common", version.ref = "hadoop3"} hadoop3-client = { group = "org.apache.hadoop", name = "hadoop-client", version.ref = "hadoop3"} hadoop3-minicluster = { group = "org.apache.hadoop", name = "hadoop-minicluster", version.ref = "hadoop-minikdc"} +hadoop3-gcs = { group = "com.google.cloud.bigdataoss", name = "gcs-connector", version.ref = "hadoop3-gcs"} htrace-core4 = { group = "org.apache.htrace", name = "htrace-core4", version.ref = "htrace-core4" } airlift-json = { group = "io.airlift", name = "json", version.ref = "airlift-json"} airlift-resolver = { group = "io.airlift.resolver", name = "resolver", version.ref = "airlift-resolver"} diff --git a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/AbstractIT.java b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/BaseIT.java similarity index 90% rename from integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/AbstractIT.java rename to integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/BaseIT.java index 0ef754d5ef5..e8f688f96ea 100644 --- a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/AbstractIT.java +++ b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/BaseIT.java @@ -58,35 +58,42 @@ import org.apache.gravitino.server.web.JettyServerConfig; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.extension.ExtendWith; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.CsvSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testcontainers.shaded.org.awaitility.Awaitility; +/** + * BaseIT can be used as a base class for integration tests. It will automatically start a Gravitino + * server and stop it after all tests are finished. + * + *

Another use case is to start a MySQL or PostgreSQL docker instance and create a database for + * testing or just start the Gravitino server manually. + */ @ExtendWith({PrintFuncNameExtension.class, CloseContainerExtension.class}) -public class AbstractIT { +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public class BaseIT { protected static final ContainerSuite containerSuite = ContainerSuite.getInstance(); - private static final Logger LOG = LoggerFactory.getLogger(AbstractIT.class); + private static final Logger LOG = LoggerFactory.getLogger(BaseIT.class); private static final Splitter COMMA = Splitter.on(",").omitEmptyStrings().trimResults(); - protected static GravitinoAdminClient client; + protected GravitinoAdminClient client; private static final OAuthMockDataProvider mockDataProvider = OAuthMockDataProvider.getInstance(); protected static final CloseableGroup closer = CloseableGroup.create(); - private static MiniGravitino miniGravitino; + private MiniGravitino miniGravitino; - protected static Config serverConfig; + protected Config serverConfig; - public static String testMode = ""; + public String testMode = ""; - protected static Map customConfigs = new HashMap<>(); + protected Map customConfigs = new HashMap<>(); - protected static boolean ignoreIcebergRestService = true; + protected boolean ignoreIcebergRestService = true; public static final String DOWNLOAD_MYSQL_JDBC_DRIVER_URL = "https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.26/mysql-connector-java-8.0.26.jar"; @@ -94,25 +101,25 @@ public class AbstractIT { public static final String DOWNLOAD_POSTGRESQL_JDBC_DRIVER_URL = "https://jdbc.postgresql.org/download/postgresql-42.7.0.jar"; - private static TestDatabaseName META_DATA; - private static MySQLContainer MYSQL_CONTAINER; - private static PostgreSQLContainer POSTGRESQL_CONTAINER; + private TestDatabaseName META_DATA; + private MySQLContainer MYSQL_CONTAINER; + private PostgreSQLContainer POSTGRESQL_CONTAINER; - protected static String serverUri; + protected String serverUri; - protected static String originConfig; + protected String originConfig; - public static int getGravitinoServerPort() { + public int getGravitinoServerPort() { JettyServerConfig jettyServerConfig = JettyServerConfig.fromConfig(serverConfig, WEBSERVER_CONF_PREFIX); return jettyServerConfig.getHttpPort(); } - public static void registerCustomConfigs(Map configs) { + public void registerCustomConfigs(Map configs) { customConfigs.putAll(configs); } - private static void rewriteGravitinoServerConfig() throws IOException { + private void rewriteGravitinoServerConfig() throws IOException { String gravitinoHome = System.getenv("GRAVITINO_HOME"); Path configPath = Paths.get(gravitinoHome, "conf", GravitinoServer.CONF_FILE); if (originConfig == null) { @@ -129,7 +136,7 @@ private static void rewriteGravitinoServerConfig() throws IOException { ITUtils.rewriteConfigFile(tmpPath.toString(), configPath.toString(), customConfigs); } - private static void recoverGravitinoServerConfig() throws IOException { + private void recoverGravitinoServerConfig() throws IOException { String gravitinoHome = System.getenv("GRAVITINO_HOME"); Path configPath = Paths.get(gravitinoHome, "conf", GravitinoServer.CONF_FILE); @@ -139,7 +146,7 @@ private static void recoverGravitinoServerConfig() throws IOException { } } - protected static void downLoadJDBCDriver() throws IOException { + protected void downLoadJDBCDriver() throws IOException { String gravitinoHome = System.getenv("GRAVITINO_HOME"); if (!ITUtils.EMBEDDED_TEST_MODE.equals(testMode)) { String serverPath = ITUtils.joinPath(gravitinoHome, "libs"); @@ -157,7 +164,7 @@ protected static void downLoadJDBCDriver() throws IOException { } } - public static String startAndInitPGBackend() { + public String startAndInitPGBackend() { META_DATA = PG_JDBC_BACKEND; containerSuite.startPostgreSQLContainer(META_DATA); POSTGRESQL_CONTAINER = containerSuite.getPostgreSQLContainer(); @@ -208,7 +215,7 @@ public static String startAndInitPGBackend() { return pgUrlWithoutSchema; } - public static String startAndInitMySQLBackend() { + public String startAndInitMySQLBackend() { META_DATA = TestDatabaseName.MYSQL_JDBC_BACKEND; containerSuite.startMySQLContainer(META_DATA); MYSQL_CONTAINER = containerSuite.getMySQLContainer(); @@ -228,7 +235,7 @@ public static String startAndInitMySQLBackend() { new File( gravitinoHome + String.format( - "/scripts/mysql/schema-%s-mysql.sql", ConfigConstants.VERSION_0_6_0)), + "/scripts/mysql/schema-%s-mysql.sql", ConfigConstants.VERSION_0_7_0)), "UTF-8"); String[] initMySQLBackendSqls = @@ -248,15 +255,8 @@ public static String startAndInitMySQLBackend() { } } - @ParameterizedTest - @CsvSource({ - "embedded, jdbcBackend", - "embedded, kvBackend", - "deploy, jdbcBackend", - "deploy, kvBackend" - }) @BeforeAll - public static void startIntegrationTest() throws Exception { + public void startIntegrationTest() throws Exception { testMode = System.getProperty(ITUtils.TEST_MODE) == null ? ITUtils.EMBEDDED_TEST_MODE @@ -352,7 +352,7 @@ public static void startIntegrationTest() throws Exception { } @AfterAll - public static void stopIntegrationTest() throws IOException, InterruptedException { + public void stopIntegrationTest() throws IOException, InterruptedException { if (testMode != null && testMode.equals(ITUtils.EMBEDDED_TEST_MODE) && miniGravitino != null) { miniGravitino.stop(); } else { @@ -366,7 +366,7 @@ public static void stopIntegrationTest() throws IOException, InterruptedExceptio LOG.info("Tearing down Gravitino Server"); } - public static GravitinoAdminClient getGravitinoClient() { + public GravitinoAdminClient getGravitinoClient() { return client; } diff --git a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/CloseContainerExtension.java b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/CloseContainerExtension.java index 82d823f0e25..a37a9647009 100644 --- a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/CloseContainerExtension.java +++ b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/CloseContainerExtension.java @@ -27,7 +27,7 @@ /** * This is an extension for JUnit 5, which aims to perform certain operations (such as resource * recycling, etc.) after all test executions are completed (regardless of success or failure). You - * can Refer to {@link AbstractIT} for more information. + * can Refer to {@link BaseIT} for more information. */ public class CloseContainerExtension implements BeforeAllCallback { @Override diff --git a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/ITUtils.java b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/ITUtils.java index e5454199f8a..9a6d7b13010 100644 --- a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/ITUtils.java +++ b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/ITUtils.java @@ -50,6 +50,7 @@ public class ITUtils { public static final String TEST_MODE = "testMode"; public static final String EMBEDDED_TEST_MODE = "embedded"; + public static final String DEPLOY_TEST_MODE = "deploy"; public static String joinPath(String... dirs) { return String.join(File.separator, dirs); diff --git a/integration-test-common/src/test/resources/log4j2.properties b/integration-test-common/src/test/resources/log4j2.properties new file mode 100644 index 00000000000..b22faa0a547 --- /dev/null +++ b/integration-test-common/src/test/resources/log4j2.properties @@ -0,0 +1,73 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Set to debug or trace if log4j initialization is failing +status = info + +# Name of the configuration +name = ConsoleLogConfig + +# Console appender configuration +appender.console.type = Console +appender.console.name = consoleLogger +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p [%t] %c{1}:%L - %m%n + +# Log files location +property.logPath = ${sys:gravitino.log.path:-build/integration-test-common.log} + +# File appender configuration +appender.file.type = File +appender.file.name = fileLogger +appender.file.fileName = ${logPath} +appender.file.layout.type = PatternLayout +appender.file.layout.pattern = %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c - %m%n + +# Root logger level +rootLogger.level = info + +# Root logger referring to console and file appenders +rootLogger.appenderRef.stdout.ref = consoleLogger +rootLogger.appenderRef.file.ref = fileLogger + +# File appender configuration for testcontainers +appender.testcontainersFile.type = File +appender.testcontainersFile.name = testcontainersLogger +appender.testcontainersFile.fileName = build/testcontainers.log +appender.testcontainersFile.layout.type = PatternLayout +appender.testcontainersFile.layout.pattern = %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c - %m%n + +# Logger for testcontainers +logger.testcontainers.name = org.testcontainers +logger.testcontainers.level = debug +logger.testcontainers.additivity = false +logger.testcontainers.appenderRef.file.ref = testcontainersLogger + +logger.tc.name = tc +logger.tc.level = debug +logger.tc.additivity = false +logger.tc.appenderRef.file.ref = testcontainersLogger + +logger.docker.name = com.github.dockerjava +logger.docker.level = warn +logger.docker.additivity = false +logger.docker.appenderRef.file.ref = testcontainersLogger + +logger.http.name = com.github.dockerjava.zerodep.shaded.org.apache.hc.client5.http.wire +logger.http.level = off \ No newline at end of file diff --git a/scripts/h2/schema-0.7.0-h2.sql b/scripts/h2/schema-0.7.0-h2.sql new file mode 100644 index 00000000000..bada37abc32 --- /dev/null +++ b/scripts/h2/schema-0.7.0-h2.sql @@ -0,0 +1,290 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file-- +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"). You may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. +-- + +CREATE TABLE IF NOT EXISTS `metalake_meta` ( + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `metalake_name` VARCHAR(128) NOT NULL COMMENT 'metalake name', + `metalake_comment` VARCHAR(256) DEFAULT '' COMMENT 'metalake comment', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'metalake properties', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'metalake audit info', + `schema_version` MEDIUMTEXT NOT NULL COMMENT 'metalake schema version info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'metalake current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'metalake last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'metalake deleted at', + PRIMARY KEY (metalake_id), + CONSTRAINT uk_mn_del UNIQUE (metalake_name, deleted_at) +) ENGINE = InnoDB; + + +CREATE TABLE IF NOT EXISTS `catalog_meta` ( + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `catalog_name` VARCHAR(128) NOT NULL COMMENT 'catalog name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `type` VARCHAR(64) NOT NULL COMMENT 'catalog type', + `provider` VARCHAR(64) NOT NULL COMMENT 'catalog provider', + `catalog_comment` VARCHAR(256) DEFAULT '' COMMENT 'catalog comment', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'catalog properties', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'catalog audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'catalog current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'catalog last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'catalog deleted at', + PRIMARY KEY (catalog_id), + CONSTRAINT uk_mid_cn_del UNIQUE (metalake_id, catalog_name, deleted_at) +) ENGINE=InnoDB; + + +CREATE TABLE IF NOT EXISTS `schema_meta` ( + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `schema_name` VARCHAR(128) NOT NULL COMMENT 'schema name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_comment` VARCHAR(256) DEFAULT '' COMMENT 'schema comment', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'schema properties', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'schema audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'schema current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'schema last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'schema deleted at', + PRIMARY KEY (schema_id), + CONSTRAINT uk_cid_sn_del UNIQUE (catalog_id, schema_name, deleted_at), + -- Aliases are used here, and indexes with the same name in H2 can only be created once. + KEY idx_smid (metalake_id) +) ENGINE=InnoDB; + +CREATE TABLE IF NOT EXISTS `table_meta` ( + `table_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'table id', + `table_name` VARCHAR(128) NOT NULL COMMENT 'table name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'table audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'table current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'table last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'table deleted at', + PRIMARY KEY (table_id), + CONSTRAINT uk_sid_tn_del UNIQUE (schema_id, table_name, deleted_at), + -- Aliases are used here, and indexes with the same name in H2 can only be created once. + KEY idx_tmid (metalake_id), + KEY idx_tcid (catalog_id) +) ENGINE=InnoDB; + + +CREATE TABLE IF NOT EXISTS `table_column_version_info` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `table_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'table id', + `table_version` INT UNSIGNED NOT NULL COMMENT 'table version', + `column_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'column id', + `column_name` VARCHAR(128) NOT NULL COMMENT 'column name', + `column_type` TEXT NOT NULL COMMENT 'column type', + `column_comment` VARCHAR(256) DEFAULT '' COMMENT 'column comment', + `column_nullable` TINYINT(1) NOT NULL DEFAULT 1 COMMENT 'column nullable, 0 is not nullable, 1 is nullable', + `column_auto_increment` TINYINT(1) NOT NULL DEFAULT 0 COMMENT 'column auto increment, 0 is not auto increment, 1 is auto increment', + `column_default_value` TEXT DEFAULT NULL COMMENT 'column default value', + `column_op_type` TINYINT(1) NOT NULL COMMENT 'column operation type, 1 is create, 2 is update, 3 is delete', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'column deleted at', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'column audit info', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_tid_ver_cid_del` (`table_id`, `table_version`, `column_id`, `deleted_at`), + KEY `idx_tcmid` (`metalake_id`), + KEY `idx_tccid` (`catalog_id`), + KEY `idx_tcsid` (`schema_id`) +) ENGINE=InnoDB; + + +CREATE TABLE IF NOT EXISTS `fileset_meta` ( + `fileset_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'fileset id', + `fileset_name` VARCHAR(128) NOT NULL COMMENT 'fileset name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `type` VARCHAR(64) NOT NULL COMMENT 'fileset type', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'fileset audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'fileset current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'fileset last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'fileset deleted at', + PRIMARY KEY (fileset_id), + CONSTRAINT uk_sid_fn_del UNIQUE (schema_id, fileset_name, deleted_at), + -- Aliases are used here, and indexes with the same name in H2 can only be created once. + KEY idx_fmid (metalake_id), + KEY idx_fcid (catalog_id) +) ENGINE=InnoDB; + + +CREATE TABLE IF NOT EXISTS `fileset_version_info` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `fileset_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'fileset id', + `version` INT UNSIGNED NOT NULL COMMENT 'fileset info version', + `fileset_comment` VARCHAR(256) DEFAULT '' COMMENT 'fileset comment', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'fileset properties', + `storage_location` MEDIUMTEXT DEFAULT NULL COMMENT 'fileset storage location', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'fileset deleted at', + PRIMARY KEY (id), + CONSTRAINT uk_fid_ver_del UNIQUE (fileset_id, version, deleted_at), + -- Aliases are used here, and indexes with the same name in H2 can only be created once. + KEY idx_fvmid (metalake_id), + KEY idx_fvcid (catalog_id) +) ENGINE=InnoDB; + +CREATE TABLE IF NOT EXISTS `topic_meta` ( + `topic_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'topic id', + `topic_name` VARCHAR(128) NOT NULL COMMENT 'topic name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `comment` VARCHAR(256) DEFAULT '' COMMENT 'topic comment', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'topic properties', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'topic audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'topic current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'topic last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'topic deleted at', + PRIMARY KEY (topic_id), + CONSTRAINT uk_cid_tn_del UNIQUE (schema_id, topic_name, deleted_at), + -- Aliases are used here, and indexes with the same name in H2 can only be created once. + KEY idx_tvmid (metalake_id), + KEY idx_tvcid (catalog_id) +) ENGINE=InnoDB; + +CREATE TABLE IF NOT EXISTS `user_meta` ( + `user_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'user id', + `user_name` VARCHAR(128) NOT NULL COMMENT 'username', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'user audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'user current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'user last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'user deleted at', + PRIMARY KEY (`user_id`), + CONSTRAINT `uk_mid_us_del` UNIQUE (`metalake_id`, `user_name`, `deleted_at`) +) ENGINE=InnoDB; + +CREATE TABLE IF NOT EXISTS `role_meta` ( + `role_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'role id', + `role_name` VARCHAR(128) NOT NULL COMMENT 'role name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'schema properties', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'role audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'role current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'role last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'role deleted at', + PRIMARY KEY (`role_id`), + CONSTRAINT `uk_mid_rn_del` UNIQUE (`metalake_id`, `role_name`, `deleted_at`) +) ENGINE=InnoDB; + +CREATE TABLE IF NOT EXISTS `role_meta_securable_object` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `role_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'role id', + `metadata_object_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'securable object entity id', + `type` VARCHAR(128) NOT NULL COMMENT 'securable object type', + `privilege_names` VARCHAR(256) NOT NULL COMMENT 'securable object privilege names', + `privilege_conditions` VARCHAR(256) NOT NULL COMMENT 'securable object privilege conditions', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'securable objectcurrent version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'securable object last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'securable object deleted at', + PRIMARY KEY (`id`), + KEY `idx_obj_rid` (`role_id`), + KEY `idx_obj_eid` (`metadata_object_id`) + ) ENGINE=InnoDB; + +CREATE TABLE IF NOT EXISTS `user_role_rel` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `user_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'user id', + `role_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'role id', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'relation audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'relation current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'relation last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'relation deleted at', + PRIMARY KEY (`id`), + CONSTRAINT `uk_ui_ri_del` UNIQUE (`user_id`, `role_id`, `deleted_at`), + KEY `idx_rid` (`role_id`) +) ENGINE=InnoDB; + +CREATE TABLE IF NOT EXISTS `group_meta` ( + `group_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'group id', + `group_name` VARCHAR(128) NOT NULL COMMENT 'group name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'group audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'group current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'group last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'group deleted at', + PRIMARY KEY (`group_id`), + CONSTRAINT `uk_mid_gr_del` UNIQUE (`metalake_id`, `group_name`, `deleted_at`) + ) ENGINE=InnoDB; + +CREATE TABLE IF NOT EXISTS `group_role_rel` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `group_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'group id', + `role_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'role id', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'relation audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'relation current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'relation last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'relation deleted at', + PRIMARY KEY (`id`), + CONSTRAINT `uk_gi_ri_del` UNIQUE (`group_id`, `role_id`, `deleted_at`), + KEY `idx_gid` (`group_id`) + ) ENGINE=InnoDB; + +CREATE TABLE IF NOT EXISTS `tag_meta` ( + `tag_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'tag id', + `tag_name` VARCHAR(128) NOT NULL COMMENT 'tag name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `tag_comment` VARCHAR(256) DEFAULT '' COMMENT 'tag comment', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'tag properties', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'tag audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'tag current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'tag last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'tag deleted at', + PRIMARY KEY (`tag_id`), + UNIQUE KEY `uk_mn_tn_del` (`metalake_id`, `tag_name`, `deleted_at`) + ) ENGINE=InnoDB; + +CREATE TABLE IF NOT EXISTS `tag_relation_meta` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `tag_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'tag id', + `metadata_object_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metadata object id', + `metadata_object_type` VARCHAR(64) NOT NULL COMMENT 'metadata object type', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'tag relation audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'tag relation current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'tag relation last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'tag relation deleted at', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_ti_mi_del` (`tag_id`, `metadata_object_id`, `deleted_at`), + KEY `idx_tid` (`tag_id`), + KEY `idx_mid` (`metadata_object_id`) + ) ENGINE=InnoDB; + +CREATE TABLE IF NOT EXISTS `owner_meta` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `owner_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'owner id', + `owner_type` VARCHAR(64) NOT NULL COMMENT 'owner type', + `metadata_object_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metadata object id', + `metadata_object_type` VARCHAR(64) NOT NULL COMMENT 'metadata object type', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'owner relation audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'owner relation current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'owner relation last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'owner relation deleted at', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_ow_me_del` (`owner_id`, `metadata_object_id`, `metadata_object_type`, `deleted_at`), + KEY `idx_oid` (`owner_id`), + KEY `idx_meid` (`metadata_object_id`) + ) ENGINE=InnoDB; diff --git a/scripts/h2/upgrade-0.6.0-to-0.7.0-h2.sql b/scripts/h2/upgrade-0.6.0-to-0.7.0-h2.sql new file mode 100644 index 00000000000..cdf1bbdc432 --- /dev/null +++ b/scripts/h2/upgrade-0.6.0-to-0.7.0-h2.sql @@ -0,0 +1,41 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file-- +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"). You may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. +-- +CREATE TABLE IF NOT EXISTS `table_column_version_info` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `table_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'table id', + `table_version` INT UNSIGNED NOT NULL COMMENT 'table version', + `column_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'column id', + `column_name` VARCHAR(128) NOT NULL COMMENT 'column name', + `column_type` TEXT NOT NULL COMMENT 'column type', + `column_comment` VARCHAR(256) DEFAULT '' COMMENT 'column comment', + `column_nullable` TINYINT(1) NOT NULL DEFAULT 1 COMMENT 'column nullable, 0 is not nullable, 1 is nullable', + `column_auto_increment` TINYINT(1) NOT NULL DEFAULT 0 COMMENT 'column auto increment, 0 is not auto increment, 1 is auto increment', + `column_default_value` TEXT DEFAULT NULL COMMENT 'column default value', + `column_op_type` TINYINT(1) NOT NULL COMMENT 'column operation type, 1 is create, 2 is update, 3 is delete', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'column deleted at', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'column audit info', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_tid_ver_cid_del` (`table_id`, `table_version`, `column_id`, `deleted_at`), + KEY `idx_tcmid` (`metalake_id`), + KEY `idx_tccid` (`catalog_id`), + KEY `idx_tcsid` (`schema_id`) +) ENGINE=InnoDB; diff --git a/scripts/mysql/schema-0.7.0-mysql.sql b/scripts/mysql/schema-0.7.0-mysql.sql new file mode 100644 index 00000000000..13f46debc0d --- /dev/null +++ b/scripts/mysql/schema-0.7.0-mysql.sql @@ -0,0 +1,281 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file-- +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"). You may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. +-- + +CREATE TABLE IF NOT EXISTS `metalake_meta` ( + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `metalake_name` VARCHAR(128) NOT NULL COMMENT 'metalake name', + `metalake_comment` VARCHAR(256) DEFAULT '' COMMENT 'metalake comment', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'metalake properties', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'metalake audit info', + `schema_version` MEDIUMTEXT NOT NULL COMMENT 'metalake schema version info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'metalake current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'metalake last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'metalake deleted at', + PRIMARY KEY (`metalake_id`), + UNIQUE KEY `uk_mn_del` (`metalake_name`, `deleted_at`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'metalake metadata'; + +CREATE TABLE IF NOT EXISTS `catalog_meta` ( + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `catalog_name` VARCHAR(128) NOT NULL COMMENT 'catalog name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `type` VARCHAR(64) NOT NULL COMMENT 'catalog type', + `provider` VARCHAR(64) NOT NULL COMMENT 'catalog provider', + `catalog_comment` VARCHAR(256) DEFAULT '' COMMENT 'catalog comment', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'catalog properties', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'catalog audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'catalog current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'catalog last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'catalog deleted at', + PRIMARY KEY (`catalog_id`), + UNIQUE KEY `uk_mid_cn_del` (`metalake_id`, `catalog_name`, `deleted_at`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'catalog metadata'; + +CREATE TABLE IF NOT EXISTS `schema_meta` ( + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `schema_name` VARCHAR(128) NOT NULL COMMENT 'schema name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_comment` VARCHAR(256) DEFAULT '' COMMENT 'schema comment', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'schema properties', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'schema audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'schema current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'schema last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'schema deleted at', + PRIMARY KEY (`schema_id`), + UNIQUE KEY `uk_cid_sn_del` (`catalog_id`, `schema_name`, `deleted_at`), + KEY `idx_mid` (`metalake_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'schema metadata'; + +CREATE TABLE IF NOT EXISTS `table_meta` ( + `table_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'table id', + `table_name` VARCHAR(128) NOT NULL COMMENT 'table name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'table audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'table current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'table last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'table deleted at', + PRIMARY KEY (`table_id`), + UNIQUE KEY `uk_sid_tn_del` (`schema_id`, `table_name`, `deleted_at`), + KEY `idx_mid` (`metalake_id`), + KEY `idx_cid` (`catalog_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'table metadata'; + +CREATE TABLE IF NOT EXISTS `table_column_version_info` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `table_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'table id', + `table_version` INT UNSIGNED NOT NULL COMMENT 'table version', + `column_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'column id', + `column_name` VARCHAR(128) NOT NULL COMMENT 'column name', + `column_type` TEXT NOT NULL COMMENT 'column type', + `column_comment` VARCHAR(256) DEFAULT '' COMMENT 'column comment', + `column_nullable` TINYINT(1) NOT NULL DEFAULT 1 COMMENT 'column nullable, 0 is not nullable, 1 is nullable', + `column_auto_increment` TINYINT(1) NOT NULL DEFAULT 0 COMMENT 'column auto increment, 0 is not auto increment, 1 is auto increment', + `column_default_value` TEXT DEFAULT NULL COMMENT 'column default value', + `column_op_type` TINYINT(1) NOT NULL COMMENT 'column operation type, 1 is create, 2 is update, 3 is delete', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'column deleted at', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'column audit info', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_tid_ver_cid_del` (`table_id`, `table_version`, `column_id`, `deleted_at`), + KEY `idx_mid` (`metalake_id`), + KEY `idx_cid` (`catalog_id`), + KEY `idx_sid` (`schema_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'table column version info'; + +CREATE TABLE IF NOT EXISTS `fileset_meta` ( + `fileset_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'fileset id', + `fileset_name` VARCHAR(128) NOT NULL COMMENT 'fileset name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `type` VARCHAR(64) NOT NULL COMMENT 'fileset type', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'fileset audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'fileset current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'fileset last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'fileset deleted at', + PRIMARY KEY (`fileset_id`), + UNIQUE KEY `uk_sid_fn_del` (`schema_id`, `fileset_name`, `deleted_at`), + KEY `idx_mid` (`metalake_id`), + KEY `idx_cid` (`catalog_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'fileset metadata'; + +CREATE TABLE IF NOT EXISTS `fileset_version_info` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `fileset_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'fileset id', + `version` INT UNSIGNED NOT NULL COMMENT 'fileset info version', + `fileset_comment` VARCHAR(256) DEFAULT '' COMMENT 'fileset comment', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'fileset properties', + `storage_location` MEDIUMTEXT NOT NULL COMMENT 'fileset storage location', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'fileset deleted at', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_fid_ver_del` (`fileset_id`, `version`, `deleted_at`), + KEY `idx_mid` (`metalake_id`), + KEY `idx_cid` (`catalog_id`), + KEY `idx_sid` (`schema_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'fileset version info'; + +CREATE TABLE IF NOT EXISTS `topic_meta` ( + `topic_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'topic id', + `topic_name` VARCHAR(128) NOT NULL COMMENT 'topic name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `comment` VARCHAR(256) DEFAULT '' COMMENT 'topic comment', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'topic properties', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'topic audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'topic current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'topic last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'topic deleted at', + PRIMARY KEY (`topic_id`), + UNIQUE KEY `uk_sid_tn_del` (`schema_id`, `topic_name`, `deleted_at`), + KEY `idx_mid` (`metalake_id`), + KEY `idx_cid` (`catalog_id`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'topic metadata'; + +CREATE TABLE IF NOT EXISTS `user_meta` ( + `user_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'user id', + `user_name` VARCHAR(128) NOT NULL COMMENT 'username', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'user audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'user current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'user last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'user deleted at', + PRIMARY KEY (`user_id`), + UNIQUE KEY `uk_mid_us_del` (`metalake_id`, `user_name`, `deleted_at`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'user metadata'; + +CREATE TABLE IF NOT EXISTS `role_meta` ( + `role_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'role id', + `role_name` VARCHAR(128) NOT NULL COMMENT 'role name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'schema properties', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'role audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'role current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'role last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'role deleted at', + PRIMARY KEY (`role_id`), + UNIQUE KEY `uk_mid_rn_del` (`metalake_id`, `role_name`, `deleted_at`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'role metadata'; + +CREATE TABLE IF NOT EXISTS `role_meta_securable_object` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `role_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'role id', + `metadata_object_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'The entity id of securable object', + `type` VARCHAR(128) NOT NULL COMMENT 'securable object type', + `privilege_names` VARCHAR(256) NOT NULL COMMENT 'securable object privilege names', + `privilege_conditions` VARCHAR(256) NOT NULL COMMENT 'securable object privilege conditions', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'securable objectcurrent version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'securable object last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'securable object deleted at', + PRIMARY KEY (`id`), + KEY `idx_obj_rid` (`role_id`), + KEY `idx_obj_eid` (`metadata_object_id`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'securable object meta'; + +CREATE TABLE IF NOT EXISTS `user_role_rel` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `user_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'user id', + `role_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'role id', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'relation audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'relation current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'relation last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'relation deleted at', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_ui_ri_del` (`user_id`, `role_id`, `deleted_at`), + KEY `idx_rid` (`role_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'user role relation'; + +CREATE TABLE IF NOT EXISTS `group_meta` ( + `group_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'group id', + `group_name` VARCHAR(128) NOT NULL COMMENT 'group name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'group audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'group current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'group last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'group deleted at', + PRIMARY KEY (`group_id`), + UNIQUE KEY `uk_mid_gr_del` (`metalake_id`, `group_name`, `deleted_at`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'group metadata'; + +CREATE TABLE IF NOT EXISTS `group_role_rel` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `group_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'group id', + `role_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'role id', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'relation audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'relation current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'relation last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'relation deleted at', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_gi_ri_del` (`group_id`, `role_id`, `deleted_at`), + KEY `idx_rid` (`group_id`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'group role relation'; + +CREATE TABLE IF NOT EXISTS `tag_meta` ( + `tag_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'tag id', + `tag_name` VARCHAR(128) NOT NULL COMMENT 'tag name', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `tag_comment` VARCHAR(256) DEFAULT '' COMMENT 'tag comment', + `properties` MEDIUMTEXT DEFAULT NULL COMMENT 'tag properties', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'tag audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'tag current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'tag last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'tag deleted at', + PRIMARY KEY (`tag_id`), + UNIQUE KEY `uk_mi_tn_del` (`metalake_id`, `tag_name`, `deleted_at`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'tag metadata'; + +CREATE TABLE IF NOT EXISTS `tag_relation_meta` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `tag_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'tag id', + `metadata_object_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metadata object id', + `metadata_object_type` VARCHAR(64) NOT NULL COMMENT 'metadata object type', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'tag relation audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'tag relation current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'tag relation last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'tag relation deleted at', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_ti_mi_mo_del` (`tag_id`, `metadata_object_id`, `metadata_object_type`, `deleted_at`), + KEY `idx_tid` (`tag_id`), + KEY `idx_mid` (`metadata_object_id`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'tag metadata object relation'; + +CREATE TABLE IF NOT EXISTS `owner_meta` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `owner_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'owner id', + `owner_type` VARCHAR(64) NOT NULL COMMENT 'owner type', + `metadata_object_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metadata object id', + `metadata_object_type` VARCHAR(64) NOT NULL COMMENT 'metadata object type', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'owner relation audit info', + `current_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'owner relation current version', + `last_version` INT UNSIGNED NOT NULL DEFAULT 1 COMMENT 'owner relation last version', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'owner relation deleted at', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_ow_me_del` (`owner_id`, `metadata_object_id`, `metadata_object_type`,`deleted_at`), + KEY `idx_oid` (`owner_id`), + KEY `idx_meid` (`metadata_object_id`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'owner relation'; diff --git a/scripts/mysql/upgrade-0.6.0-to-0.7.0-mysql.sql b/scripts/mysql/upgrade-0.6.0-to-0.7.0-mysql.sql new file mode 100644 index 00000000000..0afe5607841 --- /dev/null +++ b/scripts/mysql/upgrade-0.6.0-to-0.7.0-mysql.sql @@ -0,0 +1,41 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file-- +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"). You may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. +-- +CREATE TABLE IF NOT EXISTS `table_column_version_info` ( + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `metalake_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'metalake id', + `catalog_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'catalog id', + `schema_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'schema id', + `table_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'table id', + `table_version` INT UNSIGNED NOT NULL COMMENT 'table version', + `column_id` BIGINT(20) UNSIGNED NOT NULL COMMENT 'column id', + `column_name` VARCHAR(128) NOT NULL COMMENT 'column name', + `column_type` TEXT NOT NULL COMMENT 'column type', + `column_comment` VARCHAR(256) DEFAULT '' COMMENT 'column comment', + `column_nullable` TINYINT(1) NOT NULL DEFAULT 1 COMMENT 'column nullable, 0 is not nullable, 1 is nullable', + `column_auto_increment` TINYINT(1) NOT NULL DEFAULT 0 COMMENT 'column auto increment, 0 is not auto increment, 1 is auto increment', + `column_default_value` TEXT DEFAULT NULL COMMENT 'column default value', + `column_op_type` TINYINT(1) NOT NULL COMMENT 'column operation type, 1 is create, 2 is update, 3 is delete', + `deleted_at` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'column deleted at', + `audit_info` MEDIUMTEXT NOT NULL COMMENT 'column audit info', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_tid_ver_cid_del` (`table_id`, `table_version`, `column_id`, `deleted_at`), + KEY `idx_mid` (`metalake_id`), + KEY `idx_cid` (`catalog_id`), + KEY `idx_sid` (`schema_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT 'table column version info'; diff --git a/scripts/postgresql/schema-0.7.0-postgresql.sql b/scripts/postgresql/schema-0.7.0-postgresql.sql index 0ce7a968537..d377c57b556 100644 --- a/scripts/postgresql/schema-0.7.0-postgresql.sql +++ b/scripts/postgresql/schema-0.7.0-postgresql.sql @@ -140,6 +140,48 @@ COMMENT ON COLUMN table_meta.current_version IS 'table current version'; COMMENT ON COLUMN table_meta.last_version IS 'table last version'; COMMENT ON COLUMN table_meta.deleted_at IS 'table deleted at'; +CREATE TABLE IF NOT EXISTS table_column_version_info ( + id BIGINT NOT NULL GENERATED BY DEFAULT AS IDENTITY, + metalake_id BIGINT NOT NULL, + catalog_id BIGINT NOT NULL, + schema_id BIGINT NOT NULL, + table_id BIGINT NOT NULL, + table_version INT NOT NULL, + column_id BIGINT NOT NULL, + column_name VARCHAR(128) NOT NULL, + column_type TEXT NOT NULL, + column_comment VARCHAR(256) DEFAULT '', + column_nullable SMALLINT NOT NULL DEFAULT 1, + column_auto_increment SMALLINT NOT NULL DEFAULT 0, + column_default_value TEXT DEFAULT NULL, + column_op_type SMALLINT NOT NULL, + deleted_at BIGINT NOT NULL DEFAULT 0, + audit_info TEXT NOT NULL, + PRIMARY KEY (id), + UNIQUE (table_id, table_version, column_id, deleted_at) +); +CREATE INDEX idx_mid ON table_column_version_info (metalake_id); +CREATE INDEX idx_cid ON table_column_version_info (catalog_id); +CREATE INDEX idx_sid ON table_column_version_info (schema_id); +COMMENT ON TABLE table_column_version_info IS 'table column version information'; + +COMMENT ON COLUMN table_column_version_info.id IS 'auto increment id'; +COMMENT ON COLUMN table_column_version_info.metalake_id IS 'metalake id'; +COMMENT ON COLUMN table_column_version_info.catalog_id IS 'catalog id'; +COMMENT ON COLUMN table_column_version_info.schema_id IS 'schema id'; +COMMENT ON COLUMN table_column_version_info.table_id IS 'table id'; +COMMENT ON COLUMN table_column_version_info.table_version IS 'table version'; +COMMENT ON COLUMN table_column_version_info.column_id IS 'column id'; +COMMENT ON COLUMN table_column_version_info.column_name IS 'column name'; +COMMENT ON COLUMN table_column_version_info.column_type IS 'column type'; +COMMENT ON COLUMN table_column_version_info.column_comment IS 'column comment'; +COMMENT ON COLUMN table_column_version_info.column_nullable IS 'column nullable, 0 is not nullable, 1 is nullable'; +COMMENT ON COLUMN table_column_version_info.column_auto_increment IS 'column auto increment, 0 is not auto increment, 1 is auto increment'; +COMMENT ON COLUMN table_column_version_info.column_default_value IS 'column default value'; +COMMENT ON COLUMN table_column_version_info.column_op_type IS 'column operation type, 1 is create, 2 is update, 3 is delete'; +COMMENT ON COLUMN table_column_version_info.deleted_at IS 'column deleted at'; +COMMENT ON COLUMN table_column_version_info.audit_info IS 'column audit info'; + CREATE TABLE IF NOT EXISTS fileset_meta ( fileset_id BIGINT NOT NULL, diff --git a/server/src/main/java/org/apache/gravitino/server/GravitinoServer.java b/server/src/main/java/org/apache/gravitino/server/GravitinoServer.java index 3232c293bcd..e383c65b7a4 100644 --- a/server/src/main/java/org/apache/gravitino/server/GravitinoServer.java +++ b/server/src/main/java/org/apache/gravitino/server/GravitinoServer.java @@ -48,6 +48,7 @@ import org.apache.gravitino.server.web.ui.WebUIFilter; import org.apache.gravitino.tag.TagManager; import org.glassfish.hk2.utilities.binding.AbstractBinder; +import org.glassfish.jersey.CommonProperties; import org.glassfish.jersey.jackson.JacksonFeature; import org.glassfish.jersey.server.ResourceConfig; import org.glassfish.jersey.servlet.ServletContainer; @@ -119,6 +120,7 @@ protected void configure() { register(JsonParseExceptionMapper.class); register(JsonMappingExceptionMapper.class); register(ObjectMapperProvider.class).register(JacksonFeature.class); + property(CommonProperties.JSON_JACKSON_DISABLED_MODULES, "DefaultScalaModule"); if (!enableAuthorization) { register(AccessControlNotAllowedFilter.class); diff --git a/server/src/main/java/org/apache/gravitino/server/web/Utils.java b/server/src/main/java/org/apache/gravitino/server/web/Utils.java index 7226eb363e3..86d2f8d1bad 100644 --- a/server/src/main/java/org/apache/gravitino/server/web/Utils.java +++ b/server/src/main/java/org/apache/gravitino/server/web/Utils.java @@ -148,6 +148,13 @@ public static Response unsupportedOperation(String message, Throwable throwable) .build(); } + public static Response forbidden(String message, Throwable throwable) { + return Response.status(Response.Status.FORBIDDEN) + .entity(ErrorResponse.forbidden(message, throwable)) + .type(MediaType.APPLICATION_JSON) + .build(); + } + public static Response doAs( HttpServletRequest httpRequest, PrivilegedExceptionAction action) throws Exception { UserPrincipal principal = diff --git a/server/src/main/java/org/apache/gravitino/server/web/rest/ExceptionHandlers.java b/server/src/main/java/org/apache/gravitino/server/web/rest/ExceptionHandlers.java index 4748cf23d6e..dd8c21ab4cb 100644 --- a/server/src/main/java/org/apache/gravitino/server/web/rest/ExceptionHandlers.java +++ b/server/src/main/java/org/apache/gravitino/server/web/rest/ExceptionHandlers.java @@ -26,6 +26,7 @@ import org.apache.gravitino.exceptions.CatalogAlreadyExistsException; import org.apache.gravitino.exceptions.ConnectionFailedException; import org.apache.gravitino.exceptions.FilesetAlreadyExistsException; +import org.apache.gravitino.exceptions.ForbiddenException; import org.apache.gravitino.exceptions.GroupAlreadyExistsException; import org.apache.gravitino.exceptions.MetalakeAlreadyExistsException; import org.apache.gravitino.exceptions.NoSuchMetalakeException; @@ -212,6 +213,9 @@ public Response handle(OperationType op, String table, String schema, Exception } else if (e instanceof UnsupportedOperationException) { return Utils.unsupportedOperation(errorMsg, e); + } else if (e instanceof ForbiddenException) { + return Utils.forbidden(errorMsg, e); + } else { return super.handle(op, table, schema, e); } @@ -250,6 +254,9 @@ public Response handle(OperationType op, String schema, String catalog, Exceptio } else if (e instanceof UnsupportedOperationException) { return Utils.unsupportedOperation(errorMsg, e); + } else if (e instanceof ForbiddenException) { + return Utils.forbidden(errorMsg, e); + } else { return super.handle(op, schema, catalog, e); } @@ -282,6 +289,9 @@ public Response handle(OperationType op, String catalog, String metalake, Except } else if (e instanceof NotFoundException) { return Utils.notFound(errorMsg, e); + } else if (e instanceof ForbiddenException) { + return Utils.forbidden(errorMsg, e); + } else if (e instanceof CatalogAlreadyExistsException) { return Utils.alreadyExists(errorMsg, e); @@ -347,6 +357,9 @@ public Response handle(OperationType op, String fileset, String schema, Exceptio } else if (e instanceof FilesetAlreadyExistsException) { return Utils.alreadyExists(errorMsg, e); + } else if (e instanceof ForbiddenException) { + return Utils.forbidden(errorMsg, e); + } else { return super.handle(op, fileset, schema, e); } @@ -449,6 +462,9 @@ public Response handle(OperationType op, String role, String metalake, Exception } else if (e instanceof RoleAlreadyExistsException) { return Utils.alreadyExists(errorMsg, e); + } else if (e instanceof ForbiddenException) { + return Utils.forbidden(errorMsg, e); + } else { return super.handle(op, role, metalake, e); } @@ -480,6 +496,8 @@ public Response handle(OperationType op, String topic, String schema, Exception } else if (e instanceof TopicAlreadyExistsException) { return Utils.alreadyExists(errorMsg, e); + } else if (e instanceof ForbiddenException) { + return Utils.forbidden(errorMsg, e); } else { return super.handle(op, topic, schema, e); } diff --git a/settings.gradle.kts b/settings.gradle.kts index d219a0cdf72..6be67f4dd80 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -71,3 +71,4 @@ project(":spark-connector:spark-runtime-3.5").projectDir = file("spark-connector include("web:web", "web:integration-test") include("docs") include("integration-test-common") +include(":bundles:gcp-bundle") diff --git a/spark-connector/spark-common/build.gradle.kts b/spark-connector/spark-common/build.gradle.kts index 2d875204628..7f3c66aa6e6 100644 --- a/spark-connector/spark-common/build.gradle.kts +++ b/spark-connector/spark-common/build.gradle.kts @@ -91,7 +91,7 @@ dependencies { exclude("com.google.code.findbugs", "sr305") exclude("com.tdunning", "json") exclude("com.zaxxer", "HikariCP") - exclude("io.dropwizard.metricss") + exclude("io.dropwizard.metrics") exclude("javax.transaction", "transaction-api") exclude("org.apache.avro") exclude("org.apache.curator") diff --git a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/SparkEnvIT.java b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/SparkEnvIT.java index 30e6ed44b3f..b534a9772f7 100644 --- a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/SparkEnvIT.java +++ b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/SparkEnvIT.java @@ -30,7 +30,6 @@ import org.apache.gravitino.client.GravitinoMetalake; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; import org.apache.gravitino.server.web.JettyServerConfig; import org.apache.gravitino.spark.connector.GravitinoSparkConfig; import org.apache.gravitino.spark.connector.iceberg.IcebergPropertiesConstants; @@ -84,7 +83,7 @@ void startUp() throws Exception { initIcebergRestServiceEnv(); } // Start Gravitino server - AbstractIT.startIntegrationTest(); + super.startIntegrationTest(); initHdfsFileSystem(); initGravitinoEnv(); initMetalakeAndCatalogs(); @@ -107,7 +106,7 @@ void stop() throws IOException, InterruptedException { if (sparkSession != null) { sparkSession.close(); } - AbstractIT.stopIntegrationTest(); + super.stopIntegrationTest(); } // AbstractIT#startIntegrationTest() is static, so we couldn't update the value of @@ -115,14 +114,14 @@ void stop() throws IOException, InterruptedException { // if startIntegrationTest() is auto invoked by Junit. So here we override // startIntegrationTest() to disable the auto invoke by junit. @BeforeAll - public static void startIntegrationTest() {} + public void startIntegrationTest() {} @AfterAll - public static void stopIntegrationTest() {} + public void stopIntegrationTest() {} private void initMetalakeAndCatalogs() { - AbstractIT.client.createMetalake(metalakeName, "", Collections.emptyMap()); - GravitinoMetalake metalake = AbstractIT.client.loadMetalake(metalakeName); + client.createMetalake(metalakeName, "", Collections.emptyMap()); + GravitinoMetalake metalake = client.loadMetalake(metalakeName); Map properties = getCatalogConfigs(); if (lakeHouseIcebergProvider.equalsIgnoreCase(getProvider())) { properties.put(SPARK_PROPERTY_PREFIX + ICEBERG_CATALOG_CACHE_ENABLED, "true"); @@ -133,7 +132,7 @@ private void initMetalakeAndCatalogs() { private void initGravitinoEnv() { // Gravitino server is already started by AbstractIT, just construct gravitinoUrl - int gravitinoPort = AbstractIT.getGravitinoServerPort(); + int gravitinoPort = getGravitinoServerPort(); gravitinoUri = String.format("http://127.0.0.1:%d", gravitinoPort); icebergRestServiceUri = getIcebergRestServiceUri(); } @@ -173,7 +172,7 @@ private void initIcebergRestServiceEnv() { + "." + IcebergPropertiesConstants.GRAVITINO_ICEBERG_CATALOG_WAREHOUSE, warehouse); - AbstractIT.registerCustomConfigs(icebergRestServiceConfigs); + registerCustomConfigs(icebergRestServiceConfigs); } private void initHdfsFileSystem() { diff --git a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/hive/SparkHiveCatalogIT.java b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/hive/SparkHiveCatalogIT.java index 5680e2e30c2..c543d82819e 100644 --- a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/hive/SparkHiveCatalogIT.java +++ b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/hive/SparkHiveCatalogIT.java @@ -36,12 +36,10 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @Tag("gravitino-docker-test") -@TestInstance(TestInstance.Lifecycle.PER_CLASS) public abstract class SparkHiveCatalogIT extends SparkCommonIT { @Override diff --git a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogHiveBackendIT.java b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogHiveBackendIT.java index 428f680bfb9..344b538cc51 100644 --- a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogHiveBackendIT.java +++ b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogHiveBackendIT.java @@ -22,11 +22,9 @@ import java.util.Map; import org.apache.gravitino.spark.connector.iceberg.IcebergPropertiesConstants; import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.TestInstance; /** This class use Apache Iceberg HiveCatalog for backend catalog. */ @Tag("gravitino-docker-test") -@TestInstance(TestInstance.Lifecycle.PER_CLASS) public abstract class SparkIcebergCatalogHiveBackendIT extends SparkIcebergCatalogIT { @Override diff --git a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogRestBackendIT.java b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogRestBackendIT.java index 2e2ad1607c6..a10c82e0e6d 100644 --- a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogRestBackendIT.java +++ b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/iceberg/SparkIcebergCatalogRestBackendIT.java @@ -22,13 +22,11 @@ import java.util.Map; import org.apache.gravitino.spark.connector.iceberg.IcebergPropertiesConstants; import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.TestInstance; /** * This class use Apache Iceberg RESTCatalog for test, and the real backend catalog is HiveCatalog. */ @Tag("gravitino-docker-test") -@TestInstance(TestInstance.Lifecycle.PER_CLASS) public abstract class SparkIcebergCatalogRestBackendIT extends SparkIcebergCatalogIT { @Override diff --git a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/sql/SparkQueryRunner.java b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/sql/SparkQueryRunner.java index 982035e8b62..02301b362eb 100644 --- a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/sql/SparkQueryRunner.java +++ b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/sql/SparkQueryRunner.java @@ -36,7 +36,7 @@ import org.apache.gravitino.client.GravitinoMetalake; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.spark.connector.GravitinoSparkConfig; import org.apache.gravitino.spark.connector.iceberg.IcebergPropertiesConstants; import org.apache.gravitino.spark.connector.plugin.GravitinoSparkPlugin; @@ -61,6 +61,7 @@ public class SparkQueryRunner { private Map catalogs = new HashMap<>(); private boolean isGravitinoEnvSetup; private String dataDir; + private BaseIT baseIT; private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); @@ -83,6 +84,7 @@ public SparkQueryRunner(SparkTestConfig sparkTestConfig) { } initSparkEnv(); + baseIT = new BaseIT(); catalogs.put(CatalogType.HIVE, HIVE_CATALOG_NAME); catalogs.put(CatalogType.ICEBERG, ICEBERG_CATALOG_NAME); catalogs.put(CatalogType.UNKNOWN, HIVE_CATALOG_NAME); @@ -134,12 +136,12 @@ private void setupGravitinoEnv() throws Exception { HiveContainer.HDFS_DEFAULTFS_PORT); // Start Gravitino server - AbstractIT.startIntegrationTest(); - int gravitinoPort = AbstractIT.getGravitinoServerPort(); + baseIT.stopIntegrationTest(); + int gravitinoPort = baseIT.getGravitinoServerPort(); this.gravitinoUri = String.format("http://127.0.0.1:%d", gravitinoPort); // Init metalake and catalog - GravitinoAdminClient client = AbstractIT.getGravitinoClient(); + GravitinoAdminClient client = baseIT.getGravitinoClient(); client.createMetalake(metalakeName, "", Collections.emptyMap()); GravitinoMetalake metalake = client.loadMetalake(metalakeName); metalake.createCatalog( @@ -177,7 +179,7 @@ private Map getIcebergCatalogConfigs(String hiveMetastoreUri) { } private void closeGravitinoEnv() throws Exception { - AbstractIT.stopIntegrationTest(); + baseIT.stopIntegrationTest(); } private void writeQueryOutput(Path outputFile, List queryOutputs) diff --git a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/sql/SparkSQLRegressionTest.java b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/sql/SparkSQLRegressionTest.java index dea3dd36ce1..e6d517b8f66 100644 --- a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/sql/SparkSQLRegressionTest.java +++ b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/sql/SparkSQLRegressionTest.java @@ -32,7 +32,6 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,7 +42,6 @@ * -PenableSparkSQLITs */ @Tag("gravitino-docker-test") -@TestInstance(TestInstance.Lifecycle.PER_CLASS) public abstract class SparkSQLRegressionTest { private static final Logger LOG = LoggerFactory.getLogger(SparkSQLRegressionTest.class); private static final String SPARK_TEST_CONFIG_FILE = "configFile"; diff --git a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/util/SparkUtilIT.java b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/util/SparkUtilIT.java index 05637c0ce39..646f414841b 100644 --- a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/util/SparkUtilIT.java +++ b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/integration/test/util/SparkUtilIT.java @@ -28,7 +28,7 @@ import java.util.TimeZone; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.spark.sql.AnalysisException; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; @@ -44,7 +44,7 @@ * *

Referred from spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/SparkTestBase.java */ -public abstract class SparkUtilIT extends AbstractIT { +public abstract class SparkUtilIT extends BaseIT { protected static final String NULL_STRING = "NULL"; protected abstract SparkSession getSparkSession(); diff --git a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/utils/TestConnectorUtil.java b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/utils/TestConnectorUtil.java index 36c8ef7770b..32250340867 100644 --- a/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/utils/TestConnectorUtil.java +++ b/spark-connector/spark-common/src/test/java/org/apache/gravitino/spark/connector/utils/TestConnectorUtil.java @@ -23,9 +23,7 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; -@TestInstance(TestInstance.Lifecycle.PER_CLASS) public class TestConnectorUtil { @Test diff --git a/spark-connector/v3.3/spark/build.gradle.kts b/spark-connector/v3.3/spark/build.gradle.kts index 2a8ed3f97ce..c4c417d62ef 100644 --- a/spark-connector/v3.3/spark/build.gradle.kts +++ b/spark-connector/v3.3/spark/build.gradle.kts @@ -98,7 +98,7 @@ dependencies { exclude("com.tdunning", "json") exclude("com.zaxxer", "HikariCP") exclude("com.sun.jersey") - exclude("io.dropwizard.metricss") + exclude("io.dropwizard.metrics") exclude("javax.transaction", "transaction-api") exclude("org.apache.avro") exclude("org.apache.curator") diff --git a/spark-connector/v3.4/spark/build.gradle.kts b/spark-connector/v3.4/spark/build.gradle.kts index bf51bc8690c..f3308fca34b 100644 --- a/spark-connector/v3.4/spark/build.gradle.kts +++ b/spark-connector/v3.4/spark/build.gradle.kts @@ -98,7 +98,7 @@ dependencies { exclude("com.tdunning", "json") exclude("com.zaxxer", "HikariCP") exclude("com.sun.jersey") - exclude("io.dropwizard.metricss") + exclude("io.dropwizard.metrics") exclude("javax.transaction", "transaction-api") exclude("org.apache.avro") exclude("org.apache.curator") diff --git a/spark-connector/v3.5/spark/build.gradle.kts b/spark-connector/v3.5/spark/build.gradle.kts index 170a4cc63f5..7b8cc8447b7 100644 --- a/spark-connector/v3.5/spark/build.gradle.kts +++ b/spark-connector/v3.5/spark/build.gradle.kts @@ -100,7 +100,7 @@ dependencies { exclude("com.tdunning", "json") exclude("com.zaxxer", "HikariCP") exclude("com.sun.jersey") - exclude("io.dropwizard.metricss") + exclude("io.dropwizard.metrics") exclude("javax.transaction", "transaction-api") exclude("org.apache.avro") exclude("org.apache.curator") diff --git a/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoConnectorIT.java b/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoConnectorIT.java index 589821e6faa..162fd10b746 100644 --- a/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoConnectorIT.java +++ b/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoConnectorIT.java @@ -37,7 +37,7 @@ import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; import org.apache.gravitino.integration.test.container.TrinoContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.Table; @@ -68,7 +68,7 @@ @Disabled @Deprecated @Tag("gravitino-docker-test") -public class TrinoConnectorIT extends AbstractIT { +public class TrinoConnectorIT extends BaseIT { public static final Logger LOG = LoggerFactory.getLogger(TrinoConnectorIT.class); private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); @@ -88,7 +88,7 @@ public class TrinoConnectorIT extends AbstractIT { private static Catalog catalog; @BeforeAll - public static void startDockerContainer() throws TException, InterruptedException { + public void startDockerContainer() throws TException, InterruptedException { String trinoConfDir = System.getenv("TRINO_CONF_DIR"); containerSuite.startHiveContainer(); @@ -1379,7 +1379,7 @@ void testDropCatalogAndCreateAgain() { } } - private static void createMetalake() { + private void createMetalake() { GravitinoMetalake[] gravitinoMetalakes = client.listMetalakes(); Assertions.assertEquals(0, gravitinoMetalakes.length); diff --git a/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryIT.java b/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryIT.java index c737e762fab..3ec2b6d46e7 100644 --- a/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryIT.java +++ b/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryIT.java @@ -46,10 +46,12 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @Tag("gravitino-docker-test") +@TestInstance(TestInstance.Lifecycle.PER_CLASS) public class TrinoQueryIT extends TrinoQueryITBase { private static final Logger LOG = LoggerFactory.getLogger(TrinoQueryIT.class); @@ -67,6 +69,8 @@ public class TrinoQueryIT extends TrinoQueryITBase { static Set ciTestsets = new HashSet<>(); + static TrinoQueryITBase trinoQueryITBase; + static { testsetsDir = TrinoQueryIT.class.getClassLoader().getResource("trino-ci-testset").getPath(); testsetsDir = ITUtils.joinPath(testsetsDir, "testsets"); @@ -79,8 +83,9 @@ public class TrinoQueryIT extends TrinoQueryITBase { } @BeforeAll - public static void setup() throws Exception { - TrinoQueryITBase.setup(); + public void setup() throws Exception { + trinoQueryITBase = new TrinoQueryITBase(); + trinoQueryITBase.setup(); cleanupTestEnv(); queryParams.put("mysql_uri", mysqlUri); diff --git a/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryITBase.java b/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryITBase.java index 1c8e7559fd4..14558237786 100644 --- a/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryITBase.java +++ b/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryITBase.java @@ -35,7 +35,7 @@ import org.apache.gravitino.exceptions.RESTException; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.TrinoITContainers; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.apache.gravitino.rel.TableCatalog; import org.junit.jupiter.api.Assertions; import org.slf4j.Logger; @@ -68,14 +68,17 @@ public class TrinoQueryITBase { protected static final String metalakeName = "test"; protected static GravitinoMetalake metalake; - private static void setEnv() throws Exception { + private static BaseIT baseIT; + + private void setEnv() throws Exception { + baseIT = new BaseIT(); if (autoStart) { - AbstractIT.startIntegrationTest(); - gravitinoClient = AbstractIT.getGravitinoClient(); - gravitinoUri = String.format("http://127.0.0.1:%d", AbstractIT.getGravitinoServerPort()); + baseIT.startIntegrationTest(); + gravitinoClient = baseIT.getGravitinoClient(); + gravitinoUri = String.format("http://127.0.0.1:%d", baseIT.getGravitinoServerPort()); trinoITContainers = ContainerSuite.getTrinoITContainers(); - trinoITContainers.launch(AbstractIT.getGravitinoServerPort()); + trinoITContainers.launch(baseIT.getGravitinoServerPort()); trinoUri = trinoITContainers.getTrinoUri(); hiveMetastoreUri = trinoITContainers.getHiveMetastoreUri(); @@ -84,16 +87,16 @@ private static void setEnv() throws Exception { postgresqlUri = trinoITContainers.getPostgresqlUri(); } else if (autoStartGravitino) { - AbstractIT.startIntegrationTest(); - gravitinoClient = AbstractIT.getGravitinoClient(); - gravitinoUri = String.format("http://127.0.0.1:%d", AbstractIT.getGravitinoServerPort()); + baseIT.startIntegrationTest(); + gravitinoClient = baseIT.getGravitinoClient(); + gravitinoUri = String.format("http://127.0.0.1:%d", baseIT.getGravitinoServerPort()); } else { gravitinoClient = GravitinoAdminClient.builder(gravitinoUri).build(); } } - public static void setup() throws Exception { + public void setup() throws Exception { if (started) { return; } @@ -113,7 +116,7 @@ public static void cleanup() { try { if (autoStart) { if (trinoITContainers != null) trinoITContainers.shutdown(); - AbstractIT.stopIntegrationTest(); + baseIT.stopIntegrationTest(); } } catch (Exception e) { LOG.error("Error in cleanup", e); diff --git a/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryTestTool.java b/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryTestTool.java index 676bcb62898..dee82d6a3c0 100644 --- a/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryTestTool.java +++ b/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryTestTool.java @@ -217,8 +217,8 @@ public static void main(String[] args) throws Exception { TrinoQueryITBase.autoStart = autoStart; TrinoQueryITBase.autoStartGravitino = autoStartGravitino; - TrinoQueryIT.setup(); TrinoQueryIT testerRunner = new TrinoQueryIT(); + testerRunner.setup(); if (commandLine.hasOption("gen_output")) { String catalogFileName = "catalog_" + catalog + "_prepare.sql"; diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageDorisTest.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageDorisTest.java index 1fcd754b9fc..cac8e5a38cc 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageDorisTest.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageDorisTest.java @@ -29,10 +29,9 @@ import org.apache.gravitino.client.GravitinoMetalake; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.DorisContainer; -import org.apache.gravitino.integration.test.util.AbstractIT; import org.apache.gravitino.integration.test.web.ui.pages.CatalogsPage; import org.apache.gravitino.integration.test.web.ui.pages.MetalakePage; -import org.apache.gravitino.integration.test.web.ui.utils.AbstractWebIT; +import org.apache.gravitino.integration.test.web.ui.utils.BaseWebIT; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.expressions.NamedReference; import org.apache.gravitino.rel.expressions.distributions.Distributions; @@ -48,9 +47,9 @@ @Tag("gravitino-docker-test") @TestMethodOrder(MethodOrderer.OrderAnnotation.class) -public class CatalogsPageDorisTest extends AbstractWebIT { - MetalakePage metalakePage = new MetalakePage(); - CatalogsPage catalogsPage = new CatalogsPage(); +public class CatalogsPageDorisTest extends BaseWebIT { + private MetalakePage metalakePage; + private CatalogsPage catalogsPage; private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); protected static GravitinoAdminClient gravitinoClient; @@ -77,9 +76,9 @@ public class CatalogsPageDorisTest extends AbstractWebIT { private static final String PROPERTIES_VALUE1 = "val1"; @BeforeAll - public static void before() throws Exception { - gravitinoClient = AbstractIT.getGravitinoClient(); - gravitinoUri = String.format("http://127.0.0.1:%d", AbstractIT.getGravitinoServerPort()); + public void before() throws Exception { + gravitinoClient = getGravitinoClient(); + gravitinoUri = String.format("http://127.0.0.1:%d", getGravitinoServerPort()); containerSuite.startDorisContainer(); @@ -89,6 +88,9 @@ public static void before() throws Exception { containerSuite.getDorisContainer().getContainerIpAddress(), DorisContainer.FE_MYSQL_PORT); LOG.info("Doris jdbc url: {}", dorisJdbcConnectionUri); + + metalakePage = new MetalakePage(driver); + catalogsPage = new CatalogsPage(driver); } /** diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageKafkaTest.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageKafkaTest.java index 5eea61c7a35..8af7277e20e 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageKafkaTest.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageKafkaTest.java @@ -27,10 +27,9 @@ import org.apache.gravitino.client.GravitinoAdminClient; import org.apache.gravitino.client.GravitinoMetalake; import org.apache.gravitino.integration.test.container.ContainerSuite; -import org.apache.gravitino.integration.test.util.AbstractIT; import org.apache.gravitino.integration.test.web.ui.pages.CatalogsPage; import org.apache.gravitino.integration.test.web.ui.pages.MetalakePage; -import org.apache.gravitino.integration.test.web.ui.utils.AbstractWebIT; +import org.apache.gravitino.integration.test.web.ui.utils.BaseWebIT; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; @@ -41,9 +40,9 @@ @Tag("gravitino-docker-test") @TestMethodOrder(MethodOrderer.OrderAnnotation.class) -public class CatalogsPageKafkaTest extends AbstractWebIT { - MetalakePage metalakePage = new MetalakePage(); - CatalogsPage catalogsPage = new CatalogsPage(); +public class CatalogsPageKafkaTest extends BaseWebIT { + private MetalakePage metalakePage; + private CatalogsPage catalogsPage; private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); protected static GravitinoAdminClient gravitinoClient; @@ -63,15 +62,18 @@ public class CatalogsPageKafkaTest extends AbstractWebIT { public static final int DEFAULT_BROKER_PORT = 9092; @BeforeAll - public static void before() throws Exception { - gravitinoClient = AbstractIT.getGravitinoClient(); + public void before() throws Exception { + gravitinoClient = getGravitinoClient(); - gravitinoUri = String.format("http://127.0.0.1:%d", AbstractIT.getGravitinoServerPort()); + gravitinoUri = String.format("http://127.0.0.1:%d", getGravitinoServerPort()); containerSuite.startKafkaContainer(); String address = containerSuite.getKafkaContainer().getContainerIpAddress(); kafkaUri = String.format("%s:%d", address, DEFAULT_BROKER_PORT); + + metalakePage = new MetalakePage(driver); + catalogsPage = new CatalogsPage(driver); } /** diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageTest.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageTest.java index 75d385ece8e..31e50e65725 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageTest.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageTest.java @@ -33,10 +33,9 @@ import org.apache.gravitino.file.Fileset; import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.TrinoITContainers; -import org.apache.gravitino.integration.test.util.AbstractIT; import org.apache.gravitino.integration.test.web.ui.pages.CatalogsPage; import org.apache.gravitino.integration.test.web.ui.pages.MetalakePage; -import org.apache.gravitino.integration.test.web.ui.utils.AbstractWebIT; +import org.apache.gravitino.integration.test.web.ui.utils.BaseWebIT; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.expressions.NamedReference; import org.apache.gravitino.rel.expressions.distributions.Distribution; @@ -60,9 +59,9 @@ @Tag("gravitino-docker-test") @TestMethodOrder(MethodOrderer.OrderAnnotation.class) -public class CatalogsPageTest extends AbstractWebIT { - MetalakePage metalakePage = new MetalakePage(); - CatalogsPage catalogsPage = new CatalogsPage(); +public class CatalogsPageTest extends BaseWebIT { + private MetalakePage metalakePage; + private CatalogsPage catalogsPage; protected static TrinoITContainers trinoITContainers; protected static GravitinoAdminClient gravitinoClient; @@ -115,18 +114,21 @@ public class CatalogsPageTest extends AbstractWebIT { private static String defaultBaseLocation; @BeforeAll - public static void before() throws Exception { - gravitinoClient = AbstractIT.getGravitinoClient(); + public void before() throws Exception { + gravitinoClient = getGravitinoClient(); - gravitinoUri = String.format("http://127.0.0.1:%d", AbstractIT.getGravitinoServerPort()); + gravitinoUri = String.format("http://127.0.0.1:%d", getGravitinoServerPort()); trinoITContainers = ContainerSuite.getTrinoITContainers(); - trinoITContainers.launch(AbstractIT.getGravitinoServerPort()); + trinoITContainers.launch(getGravitinoServerPort()); hiveMetastoreUri = trinoITContainers.getHiveMetastoreUri(); hdfsUri = trinoITContainers.getHdfsUri(); mysqlUri = trinoITContainers.getMysqlUri(); postgresqlUri = trinoITContainers.getPostgresqlUri(); + + metalakePage = new MetalakePage(driver); + catalogsPage = new CatalogsPage(driver); } /** diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/MetalakePageTest.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/MetalakePageTest.java index d288543cc1f..fc27dc08b48 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/MetalakePageTest.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/MetalakePageTest.java @@ -19,8 +19,9 @@ package org.apache.gravitino.integration.test.web.ui; import org.apache.gravitino.integration.test.web.ui.pages.MetalakePage; -import org.apache.gravitino.integration.test.web.ui.utils.AbstractWebIT; +import org.apache.gravitino.integration.test.web.ui.utils.BaseWebIT; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; @@ -29,7 +30,7 @@ @DisabledIfSystemProperty(named = "testMode", matches = "embedded") @TestMethodOrder(MethodOrderer.OrderAnnotation.class) -public class MetalakePageTest extends AbstractWebIT { +public class MetalakePageTest extends BaseWebIT { private static final String WEB_TITLE = "Gravitino"; private static final String METALAKE_NAME = "metalake_name"; private static final String EDITED_METALAKE_NAME = METALAKE_NAME + "_edited"; @@ -38,7 +39,12 @@ public class MetalakePageTest extends AbstractWebIT { private static final String FOOTER_LINK_LICENSE = "https://github.com/apache/gravitino/blob/main/LICENSE"; private static final String FOOTER_LINK_SUPPORT = "https://github.com/apache/gravitino/issues"; - MetalakePage metalakePage = new MetalakePage(); + private MetalakePage metalakePage; + + @BeforeAll + void init() { + metalakePage = new MetalakePage(driver); + } // Create a metalake by name, set the default comment and properties. public void createMetalakeAction(String name) throws InterruptedException { diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/CatalogsPage.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/CatalogsPage.java index 1df4f41729c..222cbd22714 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/CatalogsPage.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/CatalogsPage.java @@ -26,17 +26,18 @@ import java.util.Arrays; import java.util.List; import java.util.Objects; -import org.apache.gravitino.integration.test.web.ui.utils.AbstractWebIT; +import org.apache.gravitino.integration.test.web.ui.utils.BaseWebIT; import org.openqa.selenium.By; import org.openqa.selenium.JavascriptExecutor; import org.openqa.selenium.Keys; +import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebElement; import org.openqa.selenium.support.FindBy; import org.openqa.selenium.support.PageFactory; import org.openqa.selenium.support.ui.ExpectedConditions; import org.openqa.selenium.support.ui.WebDriverWait; -public class CatalogsPage extends AbstractWebIT { +public class CatalogsPage extends BaseWebIT { @FindBy(xpath = "//*[@data-refer='back-home-btn']") public WebElement backHomeBtn; @@ -115,7 +116,8 @@ public class CatalogsPage extends AbstractWebIT { @FindBy(xpath = "//ul[@aria-labelledby='select-catalog-type']") public WebElement catalogTypeList; - public CatalogsPage() { + public CatalogsPage(WebDriver driver) { + this.driver = driver; PageFactory.initElements(driver, this); } diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/MetalakePage.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/MetalakePage.java index 95153495b1c..419589f5ad9 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/MetalakePage.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/MetalakePage.java @@ -22,17 +22,18 @@ import java.util.List; import java.util.Objects; import java.util.Set; -import org.apache.gravitino.integration.test.web.ui.utils.AbstractWebIT; +import org.apache.gravitino.integration.test.web.ui.utils.BaseWebIT; import org.openqa.selenium.By; import org.openqa.selenium.JavascriptExecutor; import org.openqa.selenium.Keys; +import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebElement; import org.openqa.selenium.support.FindBy; import org.openqa.selenium.support.PageFactory; import org.openqa.selenium.support.ui.ExpectedConditions; import org.openqa.selenium.support.ui.WebDriverWait; -public class MetalakePage extends AbstractWebIT { +public class MetalakePage extends BaseWebIT { private static final String PAGE_TITLE = "Metalakes"; @FindBy( @@ -109,7 +110,8 @@ public class MetalakePage extends AbstractWebIT { @FindBy(xpath = "//a[@data-refer='footer-link-support']") public WebElement footerLinkSupport; - public MetalakePage() { + public MetalakePage(WebDriver driver) { + this.driver = driver; PageFactory.initElements(driver, this); } diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/utils/AbstractWebIT.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/utils/BaseWebIT.java similarity index 92% rename from web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/utils/AbstractWebIT.java rename to web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/utils/BaseWebIT.java index 9b3b6945d4f..8ccc5132621 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/utils/AbstractWebIT.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/utils/BaseWebIT.java @@ -21,7 +21,7 @@ import com.google.common.base.Function; import java.time.Duration; import java.time.temporal.ChronoUnit; -import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.BaseIT; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -41,13 +41,13 @@ import org.slf4j.LoggerFactory; // AbstractWebIT provides a WebDriver instance for WEB UI tests. -public class AbstractWebIT extends AbstractIT { - protected static final Logger LOG = LoggerFactory.getLogger(AbstractWebIT.class); - protected static WebDriver driver; +public class BaseWebIT extends BaseIT { + protected static final Logger LOG = LoggerFactory.getLogger(BaseWebIT.class); + protected WebDriver driver; // https://www.selenium.dev/documentation/webdriver/waits/#implicit-waits protected static final long MAX_IMPLICIT_WAIT = 30; - protected static final long MAX_TIMEOUT = 20; + protected static final long MAX_TIMEOUT = 60; protected static final long EACH_TEST_SLEEP_MILLIS = 1_000; protected static final long ACTION_SLEEP_MILLIS = 1_000; @@ -113,12 +113,12 @@ public void beforeEachTest() { } @BeforeAll - public static void startUp() { + public void startUp() { driver = WebDriverManager.getWebDriver(getGravitinoServerPort()); } @AfterAll - public static void tearDown() { + public void tearDown() { driver.quit(); } } diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/utils/WebDriverManager.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/utils/WebDriverManager.java index ab241ddf429..22cd9c53a1d 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/utils/WebDriverManager.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/utils/WebDriverManager.java @@ -36,12 +36,12 @@ public static WebDriver getWebDriver(int port) { WebDriverProvider provide = new ChromeWebDriverProvider(); WebDriver driver = generateWebDriver(provide); - driver.manage().timeouts().implicitlyWait(AbstractWebIT.MAX_IMPLICIT_WAIT, TimeUnit.SECONDS); + driver.manage().timeouts().implicitlyWait(BaseWebIT.MAX_IMPLICIT_WAIT, TimeUnit.SECONDS); driver.get(url); // wait for webpage load compiled. try { - (new WebDriverWait(driver, AbstractWebIT.MAX_TIMEOUT)) + (new WebDriverWait(driver, BaseWebIT.MAX_TIMEOUT)) .until( d -> { String gravitinoVersion = d.findElement(By.id("gravitino_version")).getText();