Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

catalog tests #6

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 7 additions & 9 deletions .github/workflows/backend-integration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ jobs:
java-version: [ 8, 11, 17 ]
test-mode: [ embedded, deploy ]
backend: [ jdbcBackend, kvBackend]
catalog: [ jdbc-doris, jdbc-mysql, jdbc-postgresql, lakehouse-iceberg, hadoop, hive, kafka ]
env:
PLATFORM: ${{ matrix.architecture }}
steps:
Expand Down Expand Up @@ -92,19 +93,16 @@ jobs:
run: |
dev/ci/util_free_space.sh

- name: Backend Integration Test
id: integrationTest
run: >
./gradlew test --rerun-tasks -PskipTests -PtestMode=${{ matrix.test-mode }} -PjdkVersion=${{ matrix.java-version }} -P${{ matrix.backend }} -PskipWebITs
-x :web:test -x :clients:client-python:test -x :flink-connector:test -x :spark-connector:test -x :spark-connector:spark-common:test
-x :spark-connector:spark-3.3:test -x :spark-connector:spark-3.4:test -x :spark-connector:spark-3.5:test
-x :spark-connector:spark-runtime-3.3:test -x :spark-connector:spark-runtime-3.4:test -x :spark-connector:spark-runtime-3.5:test
- name: catalog test
id: catalogTest
run: |
./gradlew :catalogs:catalog-${{ matrix.catalog }}:test -PskipTests -PtestMode=${{ matrix.test-mode }} -PjdkVersion=${{ matrix.java-version }} -P${{ matrix.backend }}

- name: Upload integrate tests reports
uses: actions/upload-artifact@v3
if: ${{ (failure() && steps.integrationTest.outcome == 'failure') || contains(github.event.pull_request.labels.*.name, 'upload log') }}
if: ${{ (failure() && steps.catalogTest.outcome == 'failure') || contains(github.event.pull_request.labels.*.name, 'upload log') }}
with:
name: integrate-test-reports-${{ matrix.java-version }}-${{ matrix.test-mode }}-${{ matrix.backend }}
name: integrate-test-reports-${{ matrix.catalog }}-${{ matrix.java-version }}-${{ matrix.test-mode }}-${{ matrix.backend }}
path: |
build/reports
integration-test/build/*.log
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,12 @@
public interface SupportsCatalogs {

/**
* List all catalogs in the metalake.
* List the name of all catalogs in the metalake.
*
* @return The list of catalog's name identifiers.
* @throws NoSuchMetalakeException If the metalake with namespace does not exist.
* @return The list of catalog's names.
* @throws NoSuchMetalakeException If the metalake does not exist.
*/
NameIdentifier[] listCatalogs() throws NoSuchMetalakeException;
String[] listCatalogs() throws NoSuchMetalakeException;

/**
* List all catalogs with their information in the metalake.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,22 @@

import static com.datastrato.gravitino.catalog.hive.HiveCatalogPropertiesMeta.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS;
import static com.datastrato.gravitino.catalog.hive.HiveCatalogPropertiesMeta.CLIENT_POOL_SIZE;
import static com.datastrato.gravitino.catalog.hive.HiveCatalogPropertiesMeta.LIST_ALL_TABLES;
import static com.datastrato.gravitino.catalog.hive.HiveCatalogPropertiesMeta.METASTORE_URIS;
import static com.datastrato.gravitino.catalog.hive.HiveCatalogPropertiesMeta.PRINCIPAL;
import static com.datastrato.gravitino.catalog.hive.HiveTable.ICEBERG_TABLE_TYPE_VALUE;
import static com.datastrato.gravitino.catalog.hive.HiveTable.SUPPORT_TABLE_TYPES;
import static com.datastrato.gravitino.catalog.hive.HiveTable.TABLE_TYPE_PROP;
import static com.datastrato.gravitino.catalog.hive.HiveTablePropertiesMetadata.COMMENT;
import static com.datastrato.gravitino.catalog.hive.HiveTablePropertiesMetadata.TABLE_TYPE;
import static com.datastrato.gravitino.catalog.hive.converter.HiveDataTypeConverter.CONVERTER;
import static com.datastrato.gravitino.connector.BaseCatalog.CATALOG_BYPASS_PREFIX;
import static org.apache.hadoop.hive.metastore.TableType.EXTERNAL_TABLE;

import com.datastrato.gravitino.NameIdentifier;
import com.datastrato.gravitino.Namespace;
import com.datastrato.gravitino.SchemaChange;
import com.datastrato.gravitino.catalog.hive.HiveTablePropertiesMetadata.TableType;
import com.datastrato.gravitino.catalog.hive.converter.ToHiveType;
import com.datastrato.gravitino.connector.CatalogInfo;
import com.datastrato.gravitino.connector.CatalogOperations;
import com.datastrato.gravitino.connector.HasPropertyMetadata;
Expand Down Expand Up @@ -99,6 +102,7 @@ public class HiveCatalogOperations implements CatalogOperations, SupportsSchemas
private ScheduledThreadPoolExecutor checkTgtExecutor;
private String kerberosRealm;
private ProxyPlugin proxyPlugin;
boolean listAllTables = true;

// Map that maintains the mapping of keys in Gravitino to that in Hive, for example, users
// will only need to set the configuration 'METASTORE_URL' in Gravitino and Gravitino will change
Expand Down Expand Up @@ -150,6 +154,8 @@ public void initialize(

this.clientPool =
new CachedClientPool(getClientPoolSize(conf), hiveConf, getCacheEvictionInterval(conf));

this.listAllTables = enableListAllTables(conf);
}

private void initKerberosIfNecessary(Map<String, String> conf, Configuration hadoopConf) {
Expand Down Expand Up @@ -275,6 +281,10 @@ long getCacheEvictionInterval(Map<String, String> conf) {
.getOrDefault(conf, CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS);
}

boolean enableListAllTables(Map<String, String> conf) {
return (boolean)
propertiesMetadata.catalogPropertiesMetadata().getOrDefault(conf, LIST_ALL_TABLES);
}
/** Closes the Hive catalog and releases the associated client pool. */
@Override
public void close() {
Expand Down Expand Up @@ -534,7 +544,18 @@ public NameIdentifier[] listTables(Namespace namespace) throws NoSuchSchemaExcep
return clientPool.run(
c ->
c.getTableObjectsByName(schemaIdent.name(), allTables).stream()
.filter(tb -> SUPPORT_TABLE_TYPES.contains(tb.getTableType()))
.filter(
tb -> {
boolean isSupportTable = SUPPORT_TABLE_TYPES.contains(tb.getTableType());
if (!isSupportTable) {
return false;
}
if (!listAllTables) {
Map<String, String> parameters = tb.getParameters();
return isHiveTable(parameters);
}
return true;
})
.map(tb -> NameIdentifier.of(namespace, tb.getTableName()))
.toArray(NameIdentifier[]::new));
} catch (UnknownDBException e) {
Expand All @@ -550,6 +571,22 @@ public NameIdentifier[] listTables(Namespace namespace) throws NoSuchSchemaExcep
}
}

boolean isHiveTable(Map<String, String> tableParameters) {
if (isIcebergTable(tableParameters)) return false;
return true;
}

boolean isIcebergTable(Map<String, String> tableParameters) {
if (tableParameters != null) {
boolean isIcebergTable =
ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(tableParameters.get(TABLE_TYPE_PROP));
if (isIcebergTable) {
return true;
}
}
return false;
}

/**
* Loads a table from the Hive Metastore.
*
Expand Down Expand Up @@ -946,7 +983,7 @@ private void doAddColumn(List<FieldSchema> cols, TableChange.AddColumn change) {
targetPosition,
new FieldSchema(
change.fieldName()[0],
ToHiveType.convert(change.getDataType()).getQualifiedName(),
CONVERTER.fromGravitino(change.getDataType()).getQualifiedName(),
change.getComment()));
}

Expand Down Expand Up @@ -994,7 +1031,8 @@ private void doUpdateColumnType(List<FieldSchema> cols, TableChange.UpdateColumn
if (indexOfColumn == -1) {
throw new IllegalArgumentException("UpdateColumnType does not exist: " + columnName);
}
cols.get(indexOfColumn).setType(ToHiveType.convert(change.getNewDataType()).getQualifiedName());
cols.get(indexOfColumn)
.setType(CONVERTER.fromGravitino(change.getNewDataType()).getQualifiedName());
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,10 @@ public class HiveCatalogPropertiesMeta extends BaseCatalogPropertiesMetadata {

public static final String FETCH_TIMEOUT_SEC = "kerberos.keytab-fetch-timeout-sec";

public static final String LIST_ALL_TABLES = "list-all-tables";

public static final boolean DEFAULT_LIST_ALL_TABLES = false;

private static final Map<String, PropertyEntry<?>> HIVE_CATALOG_PROPERTY_ENTRIES =
ImmutableMap.<String, PropertyEntry<?>>builder()
.put(
Expand Down Expand Up @@ -88,6 +92,16 @@ public class HiveCatalogPropertiesMeta extends BaseCatalogPropertiesMetadata {
FETCH_TIMEOUT_SEC,
PropertyEntry.integerOptionalPropertyEntry(
FETCH_TIMEOUT_SEC, "The timeout to fetch key tab", true, 60, false))
.put(
LIST_ALL_TABLES,
PropertyEntry.booleanPropertyEntry(
LIST_ALL_TABLES,
"Lists all tables in a database, including non-Hive tables, such as Iceberg, etc.",
false,
false,
DEFAULT_LIST_ALL_TABLES,
false,
false))
.putAll(BASIC_CATALOG_PROPERTY_ENTRIES)
.build();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,10 @@
import static com.datastrato.gravitino.catalog.hive.HiveTablePropertiesMetadata.TABLE_TYPE;
import static com.datastrato.gravitino.catalog.hive.HiveTablePropertiesMetadata.TableType.EXTERNAL_TABLE;
import static com.datastrato.gravitino.catalog.hive.HiveTablePropertiesMetadata.TableType.MANAGED_TABLE;
import static com.datastrato.gravitino.catalog.hive.converter.HiveDataTypeConverter.CONVERTER;
import static com.datastrato.gravitino.rel.expressions.transforms.Transforms.identity;

import com.datastrato.gravitino.catalog.hive.HiveTablePropertiesMetadata.TableType;
import com.datastrato.gravitino.catalog.hive.converter.FromHiveType;
import com.datastrato.gravitino.catalog.hive.converter.ToHiveType;
import com.datastrato.gravitino.connector.BaseTable;
import com.datastrato.gravitino.connector.PropertiesMetadata;
import com.datastrato.gravitino.connector.TableOperations;
Expand Down Expand Up @@ -62,6 +61,8 @@ public class HiveTable extends BaseTable {
// A set of supported Hive table types.
public static final Set<String> SUPPORT_TABLE_TYPES =
Sets.newHashSet(MANAGED_TABLE.name(), EXTERNAL_TABLE.name());
public static final String ICEBERG_TABLE_TYPE_VALUE = "ICEBERG";
public static final String TABLE_TYPE_PROP = "table_type";
private String schemaName;
private CachedClientPool clientPool;
private StorageDescriptor sd;
Expand Down Expand Up @@ -112,15 +113,15 @@ public static HiveTable.Builder fromHiveTable(Table table) {
f ->
HiveColumn.builder()
.withName(f.getName())
.withType(FromHiveType.convert(f.getType()))
.withType(CONVERTER.toGravitino(f.getType()))
.withComment(f.getComment())
.build()),
table.getPartitionKeys().stream()
.map(
p ->
HiveColumn.builder()
.withName(p.getName())
.withType(FromHiveType.convert(p.getType()))
.withType(CONVERTER.toGravitino(p.getType()))
.withComment(p.getComment())
.build()))
.toArray(Column[]::new);
Expand Down Expand Up @@ -239,7 +240,7 @@ private FieldSchema getPartitionKey(String[] fieldName) {
.collect(Collectors.toList());
return new FieldSchema(
partitionColumns.get(0).name(),
ToHiveType.convert(partitionColumns.get(0).dataType()).getQualifiedName(),
CONVERTER.fromGravitino(partitionColumns.get(0).dataType()).getQualifiedName(),
partitionColumns.get(0).comment());
}

Expand All @@ -254,7 +255,9 @@ private StorageDescriptor buildStorageDescriptor(
.map(
c ->
new FieldSchema(
c.name(), ToHiveType.convert(c.dataType()).getQualifiedName(), c.comment()))
c.name(),
CONVERTER.fromGravitino(c.dataType()).getQualifiedName(),
c.comment()))
.collect(Collectors.toList()));

// `location` must not be null, otherwise it will result in an NPE when calling HMS `alterTable`
Expand Down
Loading
Loading