Skip to content

Commit

Permalink
Skip fetching table metadata in HivePartitionManager
Browse files Browse the repository at this point in the history
  • Loading branch information
electrum committed Jun 1, 2019
1 parent a389636 commit 486fe07
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 26 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ public ConnectorTableHandle getTableHandleForStatisticsCollection(ConnectorSessi

HiveTableHandle table = handle;
return partitionValuesList
.map(values -> partitionManager.getPartitions(metastore, table, values))
.map(values -> partitionManager.getPartitions(table, values))
.map(result -> partitionManager.applyPartitionResult(table, result))
.orElse(table);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,9 @@
import static com.google.common.base.Predicates.not;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static io.prestosql.plugin.hive.HiveBucketing.getHiveBucketFilter;
import static io.prestosql.plugin.hive.HiveBucketing.getHiveBucketHandle;
import static io.prestosql.plugin.hive.HiveErrorCode.HIVE_EXCEEDED_PARTITION_LIMIT;
import static io.prestosql.plugin.hive.HiveUtil.getPartitionKeyColumnHandles;
import static io.prestosql.plugin.hive.HiveUtil.parsePartitionValue;
import static io.prestosql.plugin.hive.metastore.MetastoreUtil.getProtectMode;
import static io.prestosql.plugin.hive.metastore.MetastoreUtil.makePartName;
import static io.prestosql.plugin.hive.metastore.MetastoreUtil.verifyOnline;
import static io.prestosql.plugin.hive.metastore.MetastoreUtil.toPartitionName;
import static io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED;
import static io.prestosql.spi.connector.Constraint.alwaysTrue;
import static io.prestosql.spi.predicate.TupleDomain.all;
Expand Down Expand Up @@ -126,15 +122,16 @@ public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastor
TupleDomain<ColumnHandle> effectivePredicate = constraint.getSummary();

SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
Table table = getTable(metastore, tableName);
Optional<HiveBucketHandle> hiveBucketHandle = getHiveBucketHandle(table);

List<HiveColumnHandle> partitionColumns = getPartitionKeyColumnHandles(table);
Optional<HiveBucketHandle> hiveBucketHandle = hiveTableHandle.getBucketHandle();
List<HiveColumnHandle> partitionColumns = hiveTableHandle.getPartitionColumns();

if (effectivePredicate.isNone()) {
return new HivePartitionResult(partitionColumns, ImmutableList.of(), none(), none(), none(), hiveBucketHandle, Optional.empty());
}

Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName())
.orElseThrow(() -> new TableNotFoundException(tableName));

Optional<HiveBucketFilter> bucketFilter = getHiveBucketFilter(table, effectivePredicate);
TupleDomain<HiveColumnHandle> compactEffectivePredicate = toCompactTupleDomain(effectivePredicate, domainCompactionThreshold);

Expand Down Expand Up @@ -168,25 +165,28 @@ public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastor
return new HivePartitionResult(partitionColumns, partitionsIterable, compactEffectivePredicate, remainingTupleDomain, enforcedTupleDomain, hiveBucketHandle, bucketFilter);
}

public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastore, ConnectorTableHandle tableHandle, List<List<String>> partitionValuesList)
public HivePartitionResult getPartitions(ConnectorTableHandle tableHandle, List<List<String>> partitionValuesList)
{
HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
List<HiveColumnHandle> partitionColumns = hiveTableHandle.getPartitionColumns();
Optional<HiveBucketHandle> bucketHandle = hiveTableHandle.getBucketHandle();

Table table = getTable(metastore, tableName);
List<String> partitionColumnNames = partitionColumns.stream()
.map(HiveColumnHandle::getName)
.collect(toImmutableList());

List<HiveColumnHandle> partitionColumns = getPartitionKeyColumnHandles(table);
List<Type> partitionColumnTypes = partitionColumns.stream()
.map(column -> typeManager.getType(column.getTypeSignature()))
.collect(toImmutableList());

List<HivePartition> partitionList = partitionValuesList.stream()
.map(partitionValues -> makePartName(table.getPartitionColumns(), partitionValues))
.map(partitionValues -> toPartitionName(partitionColumnNames, partitionValues))
.map(partitionName -> parseValuesAndFilterPartition(tableName, partitionName, partitionColumns, partitionColumnTypes, alwaysTrue()))
.map(partition -> partition.orElseThrow(() -> new VerifyException("partition must exist")))
.collect(toImmutableList());

return new HivePartitionResult(partitionColumns, partitionList, all(), all(), none(), getHiveBucketHandle(table), Optional.empty());
return new HivePartitionResult(partitionColumns, partitionList, all(), all(), none(), bucketHandle, Optional.empty());
}

public List<HivePartition> getPartitionsAsList(HivePartitionResult partitionResult)
Expand Down Expand Up @@ -272,17 +272,6 @@ private Optional<HivePartition> parseValuesAndFilterPartition(
return Optional.of(partition);
}

private Table getTable(SemiTransactionalHiveMetastore metastore, SchemaTableName tableName)
{
Optional<Table> target = metastore.getTable(tableName.getSchemaName(), tableName.getTableName());
if (!target.isPresent()) {
throw new TableNotFoundException(tableName);
}
Table table = target.get();
verifyOnline(tableName, Optional.empty(), getProtectMode(table), table.getParameters());
return table;
}

private List<String> getFilteredPartitionNames(SemiTransactionalHiveMetastore metastore, SchemaTableName tableName, List<HiveColumnHandle> partitionKeys, TupleDomain<ColumnHandle> effectivePredicate)
{
checkArgument(effectivePredicate.getDomains().isPresent());
Expand Down

0 comments on commit 486fe07

Please sign in to comment.