diff --git a/integration-test-common/src/test/java/com/datastrato/gravitino/integration/test/container/ContainerSuite.java b/integration-test-common/src/test/java/com/datastrato/gravitino/integration/test/container/ContainerSuite.java index c2bd66a9ede..0eeb4962a29 100644 --- a/integration-test-common/src/test/java/com/datastrato/gravitino/integration/test/container/ContainerSuite.java +++ b/integration-test-common/src/test/java/com/datastrato/gravitino/integration/test/container/ContainerSuite.java @@ -154,7 +154,8 @@ public void startKafkaContainer() { if (kafkaContainer == null) { synchronized (ContainerSuite.class) { if (kafkaContainer == null) { - KafkaContainer container = closer.register(KafkaContainer.builder().build()); + KafkaContainer.Builder builder = KafkaContainer.builder().withNetwork(network); + KafkaContainer container = closer.register(builder.build()); try { container.start(); } catch (Exception e) { diff --git a/integration-test/build.gradle.kts b/integration-test/build.gradle.kts index 0bcaf0cbf0c..c9b68e75a8d 100644 --- a/integration-test/build.gradle.kts +++ b/integration-test/build.gradle.kts @@ -155,6 +155,7 @@ tasks.test { // Gravitino CI Docker image environment("GRAVITINO_CI_HIVE_DOCKER_IMAGE", "datastrato/gravitino-ci-hive:0.1.10") environment("GRAVITINO_CI_TRINO_DOCKER_IMAGE", "datastrato/gravitino-ci-trino:0.1.5") + environment("GRAVITINO_CI_KAFKA_DOCKER_IMAGE", "apache/kafka:3.7.0") copy { from("${project.rootDir}/dev/docker/trino/conf") diff --git a/integration-test/src/test/java/com/datastrato/gravitino/integration/test/web/ui/CatalogsPageKafkaTest.java b/integration-test/src/test/java/com/datastrato/gravitino/integration/test/web/ui/CatalogsPageKafkaTest.java new file mode 100644 index 00000000000..59badd669d2 --- /dev/null +++ b/integration-test/src/test/java/com/datastrato/gravitino/integration/test/web/ui/CatalogsPageKafkaTest.java @@ -0,0 +1,222 @@ +/* + * Copyright 2024 Datastrato Pvt Ltd. + * This software is licensed under the Apache License version 2. + */ + +package com.datastrato.gravitino.integration.test.web.ui; + +import com.datastrato.gravitino.Catalog; +import com.datastrato.gravitino.NameIdentifier; +import com.datastrato.gravitino.client.GravitinoAdminClient; +import com.datastrato.gravitino.client.GravitinoMetalake; +import com.datastrato.gravitino.integration.test.container.ContainerSuite; +import com.datastrato.gravitino.integration.test.util.AbstractIT; +import com.datastrato.gravitino.integration.test.web.ui.pages.CatalogsPage; +import com.datastrato.gravitino.integration.test.web.ui.pages.MetalakePage; +import com.datastrato.gravitino.integration.test.web.ui.utils.AbstractWebIT; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; + +@Tag("gravitino-docker-it") +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +public class CatalogsPageKafkaTest extends AbstractWebIT { + MetalakePage metalakePage = new MetalakePage(); + CatalogsPage catalogsPage = new CatalogsPage(); + + private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); + protected static GravitinoAdminClient gravitinoClient; + private static GravitinoMetalake metalake; + + protected static String gravitinoUri = "http://127.0.0.1:8090"; + protected static String kafkaUri = "http://127.0.0.1:9092"; + + private static final String CATALOG_TABLE_TITLE = "Schemas"; + private static final String SCHEMA_TOPIC_TITLE = "Topics"; + private static final String METALAKE_NAME = "test"; + private static final String CATALOG_TYPE_MESSAGING = "messaging"; + private static final String HIVE_CATALOG_NAME = "catalog_hive"; + private static final String MODIFIED_HIVE_CATALOG_NAME = HIVE_CATALOG_NAME + "_edited"; + private static final String ICEBERG_CATALOG_NAME = "catalog_iceberg"; + private static final String FILESET_CATALOG_NAME = "catalog_fileset"; + private static final String KAFKA_CATALOG_NAME = "catalog_kafka"; + private static final String SCHEMA_NAME = "default"; + private static final String TOPIC_NAME = "topic1"; + + private static final String MYSQL_CATALOG_NAME = "catalog_mysql"; + + private static final String PG_CATALOG_NAME = "catalog_pg"; + + @BeforeAll + public static void before() throws Exception { + gravitinoClient = AbstractIT.getGravitinoClient(); + + gravitinoUri = String.format("http://127.0.0.1:%d", AbstractIT.getGravitinoServerPort()); + + containerSuite.startKafkaContainer(); + + String address = containerSuite.getKafkaContainer().getContainerIpAddress(); + kafkaUri = String.format("%s:%s", address, "9092"); + } + + /** + * Creates a Kafka topic within the specified Metalake, Catalog, Schema, and Topic names. + * + * @param metalakeName The name of the Metalake. + * @param catalogName The name of the Catalog. + * @param schemaName The name of the Schema. + * @param topicName The name of the Kafka topic. + */ + void createTopic(String metalakeName, String catalogName, String schemaName, String topicName) { + Catalog catalog_kafka = + metalake.loadCatalog(NameIdentifier.ofCatalog(metalakeName, catalogName)); + catalog_kafka + .asTopicCatalog() + .createTopic( + NameIdentifier.of(metalakeName, catalogName, schemaName, topicName), + "comment", + null, + Collections.emptyMap()); + } + + /** + * Drops a Kafka topic from the specified Metalake, Catalog, and Schema. + * + * @param metalakeName The name of the Metalake where the topic resides. + * @param catalogName The name of the Catalog that contains the topic. + * @param schemaName The name of the Schema under which the topic exists. + * @param topicName The name of the Kafka topic to be dropped. + */ + void dropTopic(String metalakeName, String catalogName, String schemaName, String topicName) { + Catalog catalog_kafka = + metalake.loadCatalog(NameIdentifier.ofCatalog(metalakeName, catalogName)); + catalog_kafka + .asTopicCatalog() + .dropTopic(NameIdentifier.of(metalakeName, catalogName, schemaName, topicName)); + } + + @Test + @Order(0) + public void testCreateKafkaCatalog() throws InterruptedException { + // create metalake + clickAndWait(metalakePage.createMetalakeBtn); + metalakePage.setMetalakeNameField(METALAKE_NAME); + clickAndWait(metalakePage.submitHandleMetalakeBtn); + // load metalake + metalake = gravitinoClient.loadMetalake(NameIdentifier.of(METALAKE_NAME)); + metalakePage.clickMetalakeLink(METALAKE_NAME); + // create kafka catalog actions + clickAndWait(catalogsPage.createCatalogBtn); + catalogsPage.setCatalogNameField(KAFKA_CATALOG_NAME); + clickAndWait(catalogsPage.catalogTypeSelector); + catalogsPage.clickSelectType("messaging"); + catalogsPage.setCatalogCommentField("kafka catalog comment"); + // set kafka catalog props + catalogsPage.setCatalogFixedProp("bootstrap.servers", kafkaUri); + clickAndWait(catalogsPage.handleSubmitCatalogBtn); + Assertions.assertTrue(catalogsPage.verifyGetCatalog(KAFKA_CATALOG_NAME)); + } + + @Test + @Order(1) + public void testKafkaSchemaTreeNode() throws InterruptedException { + // click kafka catalog tree node + String kafkaCatalogNode = + String.format( + "{{%s}}{{%s}}{{%s}}", METALAKE_NAME, KAFKA_CATALOG_NAME, CATALOG_TYPE_MESSAGING); + catalogsPage.clickTreeNode(kafkaCatalogNode); + // verify show table title、 schema name and tree node + Assertions.assertTrue(catalogsPage.verifyShowTableTitle(CATALOG_TABLE_TITLE)); + Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(SCHEMA_NAME, false)); + List treeNodes = + Arrays.asList( + MODIFIED_HIVE_CATALOG_NAME, + ICEBERG_CATALOG_NAME, + MYSQL_CATALOG_NAME, + PG_CATALOG_NAME, + FILESET_CATALOG_NAME, + KAFKA_CATALOG_NAME, + SCHEMA_NAME); + Assertions.assertTrue(catalogsPage.verifyTreeNodes(treeNodes)); + } + + @Test + @Order(2) + public void testKafkaTopicTreeNode() throws InterruptedException { + // 1. create topic of kafka catalog + createTopic(METALAKE_NAME, KAFKA_CATALOG_NAME, SCHEMA_NAME, TOPIC_NAME); + // 2. click schema tree node + String kafkaSchemaNode = + String.format( + "{{%s}}{{%s}}{{%s}}{{%s}}", + METALAKE_NAME, KAFKA_CATALOG_NAME, CATALOG_TYPE_MESSAGING, SCHEMA_NAME); + catalogsPage.clickTreeNode(kafkaSchemaNode); + // 3. verify show table title、 default schema name and tree node + Assertions.assertTrue(catalogsPage.verifyShowTableTitle(SCHEMA_TOPIC_TITLE)); + Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(TOPIC_NAME, false)); + List treeNodes = + Arrays.asList( + MODIFIED_HIVE_CATALOG_NAME, + ICEBERG_CATALOG_NAME, + MYSQL_CATALOG_NAME, + PG_CATALOG_NAME, + FILESET_CATALOG_NAME, + KAFKA_CATALOG_NAME, + SCHEMA_NAME, + TOPIC_NAME); + Assertions.assertTrue(catalogsPage.verifyTreeNodes(treeNodes)); + } + + @Test + @Order(3) + public void testKafkaTopicDetail() throws InterruptedException { + // 1. click topic tree node + String topicNode = + String.format( + "{{%s}}{{%s}}{{%s}}{{%s}}{{%s}}", + METALAKE_NAME, KAFKA_CATALOG_NAME, CATALOG_TYPE_MESSAGING, SCHEMA_NAME, TOPIC_NAME); + catalogsPage.clickTreeNode(topicNode); + // 2. verify show tab details + Assertions.assertTrue(catalogsPage.verifyShowDetailsContent()); + // 3. verify show highlight properties + Assertions.assertTrue( + catalogsPage.verifyShowPropertiesItemInList( + "key", "partition-count", "partition-count", true)); + Assertions.assertTrue( + catalogsPage.verifyShowPropertiesItemInList("value", "partition-count", "1", true)); + Assertions.assertTrue( + catalogsPage.verifyShowPropertiesItemInList( + "key", "replication-factor", "replication-factor", true)); + Assertions.assertTrue( + catalogsPage.verifyShowPropertiesItemInList("value", "replication-factor", "1", true)); + } + + @Test + @Order(4) + public void testDropKafkaTopic() throws InterruptedException { + // delete topic of kafka catalog + dropTopic(METALAKE_NAME, KAFKA_CATALOG_NAME, SCHEMA_NAME, TOPIC_NAME); + // click schema tree node + String kafkaSchemaNode = + String.format( + "{{%s}}{{%s}}{{%s}}{{%s}}", + METALAKE_NAME, KAFKA_CATALOG_NAME, CATALOG_TYPE_MESSAGING, SCHEMA_NAME); + catalogsPage.clickTreeNode(kafkaSchemaNode); + // verify empty topic list + Assertions.assertTrue(catalogsPage.verifyEmptyTableData()); + } + + @Test + @Order(5) + public void testBackHomePage() throws InterruptedException { + clickAndWait(catalogsPage.backHomeBtn); + Assertions.assertTrue(catalogsPage.verifyBackHomePage()); + } +} diff --git a/integration-test/src/test/java/com/datastrato/gravitino/integration/test/web/ui/CatalogsPageTest.java b/integration-test/src/test/java/com/datastrato/gravitino/integration/test/web/ui/CatalogsPageTest.java index b31631030ba..752d85d7074 100644 --- a/integration-test/src/test/java/com/datastrato/gravitino/integration/test/web/ui/CatalogsPageTest.java +++ b/integration-test/src/test/java/com/datastrato/gravitino/integration/test/web/ui/CatalogsPageTest.java @@ -9,6 +9,7 @@ import com.datastrato.gravitino.NameIdentifier; import com.datastrato.gravitino.client.GravitinoAdminClient; import com.datastrato.gravitino.client.GravitinoMetalake; +import com.datastrato.gravitino.file.Fileset; import com.datastrato.gravitino.integration.test.container.ContainerSuite; import com.datastrato.gravitino.integration.test.container.TrinoITContainers; import com.datastrato.gravitino.integration.test.util.AbstractIT; @@ -47,26 +48,30 @@ public class CatalogsPageTest extends AbstractWebIT { protected static String hdfsUri = "hdfs://127.0.0.1:9000"; protected static String mysqlUri = "jdbc:mysql://127.0.0.1"; protected static String postgresqlUri = "jdbc:postgresql://127.0.0.1"; - protected static String kafkaUri = "http://127.0.0.1:9092"; private static final String WEB_TITLE = "Gravitino"; private static final String CATALOG_TABLE_TITLE = "Schemas"; private static final String SCHEMA_TABLE_TITLE = "Tables"; + private static final String SCHEMA_FILESET_TITLE = "Filesets"; private static final String TABLE_TABLE_TITLE = "Columns"; private static final String METALAKE_NAME = "test"; private static final String METALAKE_SELECT_NAME = "metalake_select_name"; - private static final String CATALOG_TYPE = "relational"; + private static final String CATALOG_TYPE_RELATIONAL = "relational"; + private static final String CATALOG_TYPE_FILESET = "fileset"; private static final String DEFAULT_CATALOG_NAME = "default_catalog"; private static final String HIVE_CATALOG_NAME = "catalog_hive"; - private static final String MODIFIED_CATALOG_NAME = HIVE_CATALOG_NAME + "_edited"; + private static final String MODIFIED_HIVE_CATALOG_NAME = HIVE_CATALOG_NAME + "_edited"; private static final String ICEBERG_CATALOG_NAME = "catalog_iceberg"; private static final String FILESET_CATALOG_NAME = "catalog_fileset"; - private static final String KAFKA_CATALOG_NAME = "catalog_kafka"; private static final String SCHEMA_NAME = "default"; + private static final String SCHEMA_NAME_FILESET = "schema_fileset"; + private static final String FILESET_NAME = "fileset1"; private static final String TABLE_NAME = "table1"; private static final String TABLE_NAME_2 = "table2"; private static final String COLUMN_NAME = "column"; private static final String COLUMN_NAME_2 = "column_2"; + private static final String PROPERTIES_KEY1 = "key1"; + private static final String PROPERTIES_VALUE1 = "val1"; private static final String MYSQL_CATALOG_NAME = "catalog_mysql"; private static final String MYSQL_JDBC_DRIVER = "com.mysql.cj.jdbc.Driver"; @@ -78,6 +83,8 @@ public class CatalogsPageTest extends AbstractWebIT { private static final String COMMON_JDBC_USER = "trino"; private static final String COMMON_JDBC_PWD = "ds123"; + private static String defaultBaseLocation; + @BeforeAll public static void before() throws Exception { gravitinoClient = AbstractIT.getGravitinoClient(); @@ -94,6 +101,31 @@ public static void before() throws Exception { postgresqlUri = trinoITContainers.getPostgresqlUri(); } + /** + * Create the specified schema + * + * @param metalakeName The name of the Metalake where the schema will be created. + * @param catalogName The name of the Catalog where the schema will be created. + * @param schemaName The name of the Schema where the schema will be created. + */ + void createSchema(String metalakeName, String catalogName, String schemaName) { + Map properties = Maps.newHashMap(); + properties.put(PROPERTIES_KEY1, PROPERTIES_VALUE1); + catalog + .asSchemas() + .createSchema( + NameIdentifier.of(metalakeName, catalogName, schemaName), "comment", properties); + } + + /** + * Creates a table with a single column in the specified Metalake, Catalog, Schema, and Table. + * + * @param metalakeName The name of the Metalake where the table will be created. + * @param catalogName The name of the Catalog where the table will be created. + * @param schemaName The name of the Schema where the table will be created. + * @param tableName The name of the Table to be created. + * @param colName The name of the Column to be created in the Table. + */ void createTableAndColumn( String metalakeName, String catalogName, @@ -111,6 +143,56 @@ void createTableAndColumn( properties); } + /** + * Retrieves the default base location for the given schema name. + * + * @param schemaName The name of the schema. + * @return The default HDFS storage location for the schema. + */ + private static String defaultBaseLocation(String schemaName) { + if (defaultBaseLocation == null) { + defaultBaseLocation = + String.format("%s/user/hadoop/%s.db", hdfsUri, schemaName.toLowerCase()); + } + return defaultBaseLocation; + } + + /** + * Retrieves the storage location for the given schema name and fileset name. + * + * @param schemaName The name of the schema. + * @param filesetName The name of the fileset. + * @return The storage path for the combination of schema and fileset. + */ + private static String storageLocation(String schemaName, String filesetName) { + return defaultBaseLocation(schemaName) + "/" + filesetName; + } + + /** + * Creates a fileset within the specified Metalake, Catalog, Schema, and Fileset names. + * + * @param metalakeName The name of the Metalake. + * @param catalogName The name of the Catalog. + * @param schemaName The name of the Schema. + * @param filesetName The name of the Fileset. + */ + void createFileset( + String metalakeName, String catalogName, String schemaName, String filesetName) { + Map properties = Maps.newHashMap(); + properties.put(PROPERTIES_KEY1, PROPERTIES_VALUE1); + String storageLocation = storageLocation(schemaName, filesetName); + Catalog catalog_fileset = + metalake.loadCatalog(NameIdentifier.ofCatalog(metalakeName, catalogName)); + catalog_fileset + .asFilesetCatalog() + .createFileset( + NameIdentifier.of(metalakeName, catalogName, schemaName, filesetName), + "comment", + Fileset.Type.MANAGED, + storageLocation, + properties); + } + @AfterAll public static void after() { try { @@ -142,7 +224,7 @@ public void testDeleteCatalog() throws InterruptedException { // delete catalog catalogsPage.clickDeleteCatalogBtn(DEFAULT_CATALOG_NAME); clickAndWait(catalogsPage.confirmDeleteBtn); - Assertions.assertTrue(catalogsPage.verifyEmptyCatalog()); + Assertions.assertTrue(catalogsPage.verifyEmptyTableData()); } @Test @@ -235,20 +317,6 @@ public void testCreateFilesetCatalog() throws InterruptedException { @Test @Order(6) - public void testCreateKafkaCatalog() throws InterruptedException { - clickAndWait(catalogsPage.createCatalogBtn); - catalogsPage.setCatalogNameField(KAFKA_CATALOG_NAME); - clickAndWait(catalogsPage.catalogTypeSelector); - catalogsPage.clickSelectType("messaging"); - catalogsPage.setCatalogCommentField("kafka catalog comment"); - // set kafka catalog props - catalogsPage.setCatalogFixedProp("bootstrap.servers", kafkaUri); - clickAndWait(catalogsPage.handleSubmitCatalogBtn); - Assertions.assertTrue(catalogsPage.verifyGetCatalog(KAFKA_CATALOG_NAME)); - } - - @Test - @Order(7) public void testRefreshPage() { driver.navigate().refresh(); Assertions.assertEquals(WEB_TITLE, driver.getTitle()); @@ -259,13 +327,12 @@ public void testRefreshPage() { ICEBERG_CATALOG_NAME, MYSQL_CATALOG_NAME, PG_CATALOG_NAME, - FILESET_CATALOG_NAME, - KAFKA_CATALOG_NAME); + FILESET_CATALOG_NAME); Assertions.assertTrue(catalogsPage.verifyCreatedCatalogs(catalogsNames)); } @Test - @Order(8) + @Order(7) public void testViewTabMetalakeDetails() throws InterruptedException { clickAndWait(catalogsPage.tabDetailsBtn); Assertions.assertTrue(catalogsPage.verifyShowDetailsContent()); @@ -274,7 +341,7 @@ public void testViewTabMetalakeDetails() throws InterruptedException { } @Test - @Order(9) + @Order(8) public void testViewCatalogDetails() throws InterruptedException { catalogsPage.clickViewCatalogBtn(HIVE_CATALOG_NAME); Assertions.assertTrue( @@ -282,26 +349,27 @@ public void testViewCatalogDetails() throws InterruptedException { } @Test - @Order(10) - public void testEditCatalog() throws InterruptedException { + @Order(9) + public void testEditHiveCatalog() throws InterruptedException { catalogsPage.clickEditCatalogBtn(HIVE_CATALOG_NAME); - catalogsPage.setCatalogNameField(MODIFIED_CATALOG_NAME); + catalogsPage.setCatalogNameField(MODIFIED_HIVE_CATALOG_NAME); clickAndWait(catalogsPage.handleSubmitCatalogBtn); - Assertions.assertTrue(catalogsPage.verifyEditedCatalog(MODIFIED_CATALOG_NAME)); + Assertions.assertTrue(catalogsPage.verifyEditedCatalog(MODIFIED_HIVE_CATALOG_NAME)); } // test catalog show schema list @Test - @Order(11) + @Order(10) public void testClickCatalogLink() { - catalogsPage.clickCatalogLink(METALAKE_NAME, MODIFIED_CATALOG_NAME, CATALOG_TYPE); + catalogsPage.clickCatalogLink( + METALAKE_NAME, MODIFIED_HIVE_CATALOG_NAME, CATALOG_TYPE_RELATIONAL); Assertions.assertTrue(catalogsPage.verifyShowTableTitle(CATALOG_TABLE_TITLE)); Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(SCHEMA_NAME, false)); - Assertions.assertTrue(catalogsPage.verifySelectedNode(MODIFIED_CATALOG_NAME)); + Assertions.assertTrue(catalogsPage.verifySelectedNode(MODIFIED_HIVE_CATALOG_NAME)); } @Test - @Order(12) + @Order(11) public void testRefreshCatalogPage() { driver.navigate().refresh(); Assertions.assertEquals(driver.getTitle(), WEB_TITLE); @@ -309,32 +377,32 @@ public void testRefreshCatalogPage() { Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(SCHEMA_NAME, false)); List treeNodes = Arrays.asList( - MODIFIED_CATALOG_NAME, + MODIFIED_HIVE_CATALOG_NAME, SCHEMA_NAME, ICEBERG_CATALOG_NAME, MYSQL_CATALOG_NAME, PG_CATALOG_NAME, - FILESET_CATALOG_NAME, - KAFKA_CATALOG_NAME); + FILESET_CATALOG_NAME); Assertions.assertTrue(catalogsPage.verifyTreeNodes(treeNodes)); - Assertions.assertTrue(catalogsPage.verifySelectedNode(MODIFIED_CATALOG_NAME)); + Assertions.assertTrue(catalogsPage.verifySelectedNode(MODIFIED_HIVE_CATALOG_NAME)); } // test schema show table list @Test - @Order(13) + @Order(12) public void testClickSchemaLink() { // create table createTableAndColumn( - METALAKE_NAME, MODIFIED_CATALOG_NAME, SCHEMA_NAME, TABLE_NAME, COLUMN_NAME); - catalogsPage.clickSchemaLink(METALAKE_NAME, MODIFIED_CATALOG_NAME, CATALOG_TYPE, SCHEMA_NAME); + METALAKE_NAME, MODIFIED_HIVE_CATALOG_NAME, SCHEMA_NAME, TABLE_NAME, COLUMN_NAME); + catalogsPage.clickSchemaLink( + METALAKE_NAME, MODIFIED_HIVE_CATALOG_NAME, CATALOG_TYPE_RELATIONAL, SCHEMA_NAME); Assertions.assertTrue(catalogsPage.verifyShowTableTitle(SCHEMA_TABLE_TITLE)); Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(TABLE_NAME, false)); Assertions.assertTrue(catalogsPage.verifySelectedNode(SCHEMA_NAME)); } @Test - @Order(14) + @Order(13) public void testRefreshSchemaPage() { driver.navigate().refresh(); Assertions.assertEquals(driver.getTitle(), WEB_TITLE); @@ -342,24 +410,27 @@ public void testRefreshSchemaPage() { Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(TABLE_NAME, false)); List treeNodes = Arrays.asList( - MODIFIED_CATALOG_NAME, + MODIFIED_HIVE_CATALOG_NAME, SCHEMA_NAME, TABLE_NAME, ICEBERG_CATALOG_NAME, MYSQL_CATALOG_NAME, PG_CATALOG_NAME, - FILESET_CATALOG_NAME, - KAFKA_CATALOG_NAME); + FILESET_CATALOG_NAME); Assertions.assertTrue(catalogsPage.verifyTreeNodes(treeNodes)); Assertions.assertTrue(catalogsPage.verifySelectedNode(SCHEMA_NAME)); } // test table show column list @Test - @Order(15) + @Order(14) public void testClickTableLink() { catalogsPage.clickTableLink( - METALAKE_NAME, MODIFIED_CATALOG_NAME, CATALOG_TYPE, SCHEMA_NAME, TABLE_NAME); + METALAKE_NAME, + MODIFIED_HIVE_CATALOG_NAME, + CATALOG_TYPE_RELATIONAL, + SCHEMA_NAME, + TABLE_NAME); Assertions.assertTrue(catalogsPage.verifyShowTableTitle(TABLE_TABLE_TITLE)); Assertions.assertTrue(catalogsPage.verifyTableColumns()); Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(COLUMN_NAME, true)); @@ -367,7 +438,7 @@ public void testClickTableLink() { } @Test - @Order(16) + @Order(15) public void testRefreshTablePage() { driver.navigate().refresh(); Assertions.assertEquals(driver.getTitle(), WEB_TITLE); @@ -377,62 +448,41 @@ public void testRefreshTablePage() { Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(COLUMN_NAME, true)); List treeNodes = Arrays.asList( - MODIFIED_CATALOG_NAME, + MODIFIED_HIVE_CATALOG_NAME, SCHEMA_NAME, TABLE_NAME, ICEBERG_CATALOG_NAME, MYSQL_CATALOG_NAME, PG_CATALOG_NAME, - FILESET_CATALOG_NAME, - KAFKA_CATALOG_NAME); + FILESET_CATALOG_NAME); Assertions.assertTrue(catalogsPage.verifyTreeNodes(treeNodes)); } @Test - @Order(17) - public void testSelectMetalake() throws InterruptedException { - catalogsPage.metalakeSelectChange(METALAKE_SELECT_NAME); - Assertions.assertTrue(catalogsPage.verifyEmptyCatalog()); - - catalogsPage.metalakeSelectChange(METALAKE_NAME); - Assertions.assertTrue(catalogsPage.verifyGetCatalog(MODIFIED_CATALOG_NAME)); - } - - @Test - @Order(18) - public void testClickTreeList() throws InterruptedException { - String icebergNode = - String.format("{{%s}}{{%s}}{{%s}}", METALAKE_NAME, ICEBERG_CATALOG_NAME, CATALOG_TYPE); - catalogsPage.clickTreeNode(icebergNode); - Assertions.assertTrue(catalogsPage.verifyGetCatalog(ICEBERG_CATALOG_NAME)); - String mysqlNode = - String.format("{{%s}}{{%s}}{{%s}}", METALAKE_NAME, MYSQL_CATALOG_NAME, CATALOG_TYPE); - catalogsPage.clickTreeNode(mysqlNode); - Assertions.assertTrue(catalogsPage.verifyGetCatalog(MYSQL_CATALOG_NAME)); - String pgNode = - String.format("{{%s}}{{%s}}{{%s}}", METALAKE_NAME, PG_CATALOG_NAME, CATALOG_TYPE); - catalogsPage.clickTreeNode(pgNode); - Assertions.assertTrue(catalogsPage.verifyGetCatalog(PG_CATALOG_NAME)); - String filesetNode = - String.format("{{%s}}{{%s}}{{%s}}", METALAKE_NAME, FILESET_CATALOG_NAME, "fileset"); - catalogsPage.clickTreeNode(filesetNode); - Assertions.assertTrue(catalogsPage.verifyGetCatalog(FILESET_CATALOG_NAME)); + @Order(16) + public void testRelationalHiveCatalogTreeNode() throws InterruptedException { String hiveNode = - String.format("{{%s}}{{%s}}{{%s}}", METALAKE_NAME, MODIFIED_CATALOG_NAME, CATALOG_TYPE); + String.format( + "{{%s}}{{%s}}{{%s}}", + METALAKE_NAME, MODIFIED_HIVE_CATALOG_NAME, CATALOG_TYPE_RELATIONAL); catalogsPage.clickTreeNode(hiveNode); Assertions.assertTrue(catalogsPage.verifyShowTableTitle(CATALOG_TABLE_TITLE)); - Assertions.assertTrue(catalogsPage.verifyGetCatalog(MODIFIED_CATALOG_NAME)); + Assertions.assertTrue(catalogsPage.verifyGetCatalog(MODIFIED_HIVE_CATALOG_NAME)); String schemaNode = String.format( "{{%s}}{{%s}}{{%s}}{{%s}}", - METALAKE_NAME, MODIFIED_CATALOG_NAME, CATALOG_TYPE, SCHEMA_NAME); + METALAKE_NAME, MODIFIED_HIVE_CATALOG_NAME, CATALOG_TYPE_RELATIONAL, SCHEMA_NAME); catalogsPage.clickTreeNode(schemaNode); Assertions.assertTrue(catalogsPage.verifyShowTableTitle(SCHEMA_TABLE_TITLE)); Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(TABLE_NAME, false)); String tableNode = String.format( "{{%s}}{{%s}}{{%s}}{{%s}}{{%s}}", - METALAKE_NAME, MODIFIED_CATALOG_NAME, CATALOG_TYPE, SCHEMA_NAME, TABLE_NAME); + METALAKE_NAME, + MODIFIED_HIVE_CATALOG_NAME, + CATALOG_TYPE_RELATIONAL, + SCHEMA_NAME, + TABLE_NAME); catalogsPage.clickTreeNode(tableNode); Assertions.assertTrue(catalogsPage.verifyShowTableTitle(TABLE_TABLE_TITLE)); Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(COLUMN_NAME, true)); @@ -440,30 +490,128 @@ public void testClickTreeList() throws InterruptedException { } @Test - @Order(19) + @Order(17) public void testTreeNodeRefresh() throws InterruptedException { createTableAndColumn( - METALAKE_NAME, MODIFIED_CATALOG_NAME, SCHEMA_NAME, TABLE_NAME_2, COLUMN_NAME_2); + METALAKE_NAME, MODIFIED_HIVE_CATALOG_NAME, SCHEMA_NAME, TABLE_NAME_2, COLUMN_NAME_2); String hiveNode = - String.format("{{%s}}{{%s}}{{%s}}", METALAKE_NAME, MODIFIED_CATALOG_NAME, CATALOG_TYPE); + String.format( + "{{%s}}{{%s}}{{%s}}", + METALAKE_NAME, MODIFIED_HIVE_CATALOG_NAME, CATALOG_TYPE_RELATIONAL); catalogsPage.clickTreeNode(hiveNode); String schemaNode = String.format( "{{%s}}{{%s}}{{%s}}{{%s}}", - METALAKE_NAME, MODIFIED_CATALOG_NAME, CATALOG_TYPE, SCHEMA_NAME); + METALAKE_NAME, MODIFIED_HIVE_CATALOG_NAME, CATALOG_TYPE_RELATIONAL, SCHEMA_NAME); catalogsPage.clickTreeNodeRefresh(schemaNode); String tableNode = String.format( "{{%s}}{{%s}}{{%s}}{{%s}}{{%s}}", - METALAKE_NAME, MODIFIED_CATALOG_NAME, CATALOG_TYPE, SCHEMA_NAME, TABLE_NAME_2); + METALAKE_NAME, + MODIFIED_HIVE_CATALOG_NAME, + CATALOG_TYPE_RELATIONAL, + SCHEMA_NAME, + TABLE_NAME_2); catalogsPage.clickTreeNode(tableNode); Assertions.assertTrue(catalogsPage.verifyShowTableTitle(TABLE_TABLE_TITLE)); Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(COLUMN_NAME_2, true)); Assertions.assertTrue(catalogsPage.verifyTableColumns()); } + @Test + @Order(18) + public void testOtherRelationaCatalogTreeNode() throws InterruptedException { + String icebergNode = + String.format( + "{{%s}}{{%s}}{{%s}}", METALAKE_NAME, ICEBERG_CATALOG_NAME, CATALOG_TYPE_RELATIONAL); + catalogsPage.clickTreeNode(icebergNode); + Assertions.assertTrue(catalogsPage.verifyGetCatalog(ICEBERG_CATALOG_NAME)); + String mysqlNode = + String.format( + "{{%s}}{{%s}}{{%s}}", METALAKE_NAME, MYSQL_CATALOG_NAME, CATALOG_TYPE_RELATIONAL); + catalogsPage.clickTreeNode(mysqlNode); + Assertions.assertTrue(catalogsPage.verifyGetCatalog(MYSQL_CATALOG_NAME)); + String pgNode = + String.format( + "{{%s}}{{%s}}{{%s}}", METALAKE_NAME, PG_CATALOG_NAME, CATALOG_TYPE_RELATIONAL); + catalogsPage.clickTreeNode(pgNode); + Assertions.assertTrue(catalogsPage.verifyGetCatalog(PG_CATALOG_NAME)); + } + + @Test + @Order(19) + public void testSelectMetalake() throws InterruptedException { + catalogsPage.metalakeSelectChange(METALAKE_SELECT_NAME); + Assertions.assertTrue(catalogsPage.verifyEmptyTableData()); + + catalogsPage.metalakeSelectChange(METALAKE_NAME); + driver.navigate().refresh(); + } + @Test @Order(20) + public void testFilesetCatalogTreeNode() throws InterruptedException { + // 1. create schema and fileset of fileset catalog + createSchema(METALAKE_NAME, FILESET_CATALOG_NAME, SCHEMA_NAME_FILESET); + createFileset(METALAKE_NAME, FILESET_CATALOG_NAME, SCHEMA_NAME_FILESET, FILESET_NAME); + // 2. click fileset catalog tree node + String filesetCatalogNode = + String.format( + "{{%s}}{{%s}}{{%s}}", METALAKE_NAME, FILESET_CATALOG_NAME, CATALOG_TYPE_FILESET); + catalogsPage.clickTreeNode(filesetCatalogNode); + // 3. verify show table title、 schema name and tree node + Assertions.assertTrue(catalogsPage.verifyShowTableTitle(CATALOG_TABLE_TITLE)); + Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(SCHEMA_NAME_FILESET, false)); + List treeNodes = + Arrays.asList( + MODIFIED_HIVE_CATALOG_NAME, + ICEBERG_CATALOG_NAME, + MYSQL_CATALOG_NAME, + PG_CATALOG_NAME, + FILESET_CATALOG_NAME, + SCHEMA_NAME_FILESET); + Assertions.assertTrue(catalogsPage.verifyTreeNodes(treeNodes)); + // 4. click schema tree node + String filesetSchemaNode = + String.format( + "{{%s}}{{%s}}{{%s}}{{%s}}", + METALAKE_NAME, FILESET_CATALOG_NAME, CATALOG_TYPE_FILESET, SCHEMA_NAME_FILESET); + catalogsPage.clickTreeNode(filesetSchemaNode); + // 5. verify show table title、 fileset name and tree node + Assertions.assertTrue(catalogsPage.verifyShowTableTitle(SCHEMA_FILESET_TITLE)); + Assertions.assertTrue(catalogsPage.verifyShowDataItemInList(FILESET_NAME, false)); + treeNodes = + Arrays.asList( + MODIFIED_HIVE_CATALOG_NAME, + ICEBERG_CATALOG_NAME, + MYSQL_CATALOG_NAME, + PG_CATALOG_NAME, + FILESET_CATALOG_NAME, + SCHEMA_NAME_FILESET, + FILESET_NAME); + Assertions.assertTrue(catalogsPage.verifyTreeNodes(treeNodes)); + // 6. click fileset tree node + String filesetNode = + String.format( + "{{%s}}{{%s}}{{%s}}{{%s}}{{%s}}", + METALAKE_NAME, + FILESET_CATALOG_NAME, + CATALOG_TYPE_FILESET, + SCHEMA_NAME_FILESET, + FILESET_NAME); + catalogsPage.clickTreeNode(filesetNode); + // 7. verify show tab details + Assertions.assertTrue(catalogsPage.verifyShowDetailsContent()); + Assertions.assertTrue( + catalogsPage.verifyShowPropertiesItemInList( + "key", PROPERTIES_KEY1, PROPERTIES_KEY1, false)); + Assertions.assertTrue( + catalogsPage.verifyShowPropertiesItemInList( + "value", PROPERTIES_KEY1, PROPERTIES_VALUE1, false)); + } + + @Test + @Order(21) public void testBackHomePage() throws InterruptedException { clickAndWait(catalogsPage.backHomeBtn); Assertions.assertTrue(catalogsPage.verifyBackHomePage()); diff --git a/integration-test/src/test/java/com/datastrato/gravitino/integration/test/web/ui/pages/CatalogsPage.java b/integration-test/src/test/java/com/datastrato/gravitino/integration/test/web/ui/pages/CatalogsPage.java index 8f8c1a73a20..a2508fb0a50 100644 --- a/integration-test/src/test/java/com/datastrato/gravitino/integration/test/web/ui/pages/CatalogsPage.java +++ b/integration-test/src/test/java/com/datastrato/gravitino/integration/test/web/ui/pages/CatalogsPage.java @@ -201,6 +201,18 @@ public void clickDeleteCatalogBtn(String name) { } } + public void clickMetalakeLink(String metalakeName) { + try { + String xpath = "//a[@href='?metalake=" + metalakeName + "']"; + WebElement link = tableGrid.findElement(By.xpath(xpath)); + WebDriverWait wait = new WebDriverWait(driver, MAX_TIMEOUT); + wait.until(ExpectedConditions.elementToBeClickable(By.xpath(xpath))); + clickAndWait(link); + } catch (Exception e) { + LOG.error(e.getMessage(), e); + } + } + public void clickCatalogLink(String metalakeName, String catalogName, String catalogType) { try { String xpath = @@ -415,7 +427,7 @@ public boolean verifyEditedCatalog(String name) { } } - public boolean verifyEmptyCatalog() { + public boolean verifyEmptyTableData() { try { // Check is empty table boolean isNoRows = waitShowText("No rows", tableWrapper); @@ -450,6 +462,40 @@ public boolean verifyShowTableTitle(String title) { } } + /** + * Verifies if a given property item is present in a specified list. + * + * @param item The key or value item of the property. + * @param key The key of the property. + * @param value The value of key item of the property. + * @param isHighlight Whether to highlight the property item or not. + * @return True if the property item is found in the list, false otherwise. + */ + public boolean verifyShowPropertiesItemInList( + String item, String key, String value, Boolean isHighlight) { + try { + Thread.sleep(ACTION_SLEEP_MILLIS); + String xpath; + if (isHighlight) { + xpath = "//div[@data-refer='props-" + item + "-" + key + "-highlight']"; + } else { + xpath = "//div[@data-refer='props-" + item + "-" + key + "']"; + } + WebElement propertyElement = driver.findElement(By.xpath(xpath)); + boolean match = Objects.equals(propertyElement.getText(), value); + + if (!match) { + LOG.error("Prop: does not include itemName: {}", value); + return false; + } + + return true; + } catch (Exception e) { + LOG.error(e.getMessage(), e); + return false; + } + } + public boolean verifyShowDataItemInList(String itemName, Boolean isColumnLevel) { try { Thread.sleep(ACTION_SLEEP_MILLIS); diff --git a/integration-test/src/test/resources/run b/integration-test/src/test/resources/run new file mode 100755 index 00000000000..eae50741495 --- /dev/null +++ b/integration-test/src/test/resources/run @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script is originally from the official Kafka docker image container `/etc/kafka/docker/run` +# and has been modified to be used to modify the Kafka configuration `advertised.listeners` to use the container's IP address +IP=$(hostname -i) +export KAFKA_ADVERTISED_LISTENERS="PLAINTEXT://$IP:$DEFAULT_BROKER_PORT" +echo "KAFKA_ADVERTISED_LISTENERS is set to $KAFKA_ADVERTISED_LISTENERS" + + +. /etc/kafka/docker/bash-config + +# Set environment values if they exist as arguments +if [ $# -ne 0 ]; then + echo "===> Overriding env params with args ..." + for var in "$@" + do + export "$var" + done +fi + +echo "===> User" +id + +echo "===> Setting default values of environment variables if not already set." +. /etc/kafka/docker/configureDefaults + +echo "===> Configuring ..." +. /etc/kafka/docker/configure + +echo "===> Launching ... " +. /etc/kafka/docker/launch \ No newline at end of file diff --git a/web/src/app/metalakes/metalake/rightContent/tabsContent/detailsView/DetailsView.js b/web/src/app/metalakes/metalake/rightContent/tabsContent/detailsView/DetailsView.js index 84728a83cf5..eec3f7fcf18 100644 --- a/web/src/app/metalakes/metalake/rightContent/tabsContent/detailsView/DetailsView.js +++ b/web/src/app/metalakes/metalake/rightContent/tabsContent/detailsView/DetailsView.js @@ -130,7 +130,7 @@ const DetailsView = () => { Properties - + { : 400 }} > - {item.key} +
+ {item.key} +
`${theme.spacing(2.75)} !important` }}> @@ -167,7 +175,15 @@ const DetailsView = () => { : 400 }} > - {item.value} +
+ {item.value} +