Skip to content

Commit

Permalink
Fix applyFilter when an Iceberg table does not have any snapshots
Browse files Browse the repository at this point in the history
Tables created by Spark may not have a snapshot committed if
they are newly created, empty tables.
  • Loading branch information
alexjo2144 authored and findepi committed Aug 10, 2022
1 parent 57714f7 commit 7190dae
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1790,10 +1790,12 @@ public Optional<ConstraintApplicationResult<ConnectorTableHandle>> applyFilter(C
else {
Table icebergTable = catalog.loadTable(session, table.getSchemaTableName());

Long snapshotId = table.getSnapshotId().orElseThrow(() -> new IllegalStateException("Snapshot id must be present"));
Set<Integer> partitionSpecIds = icebergTable.snapshot(snapshotId).allManifests(icebergTable.io()).stream()
.map(ManifestFile::partitionSpecId)
.collect(toImmutableSet());
Set<Integer> partitionSpecIds = table.getSnapshotId().map(
snapshot -> icebergTable.snapshot(snapshot).allManifests(icebergTable.io()).stream()
.map(ManifestFile::partitionSpecId)
.collect(toImmutableSet()))
// No snapshot, so no data. This case doesn't matter.
.orElseGet(() -> ImmutableSet.copyOf(icebergTable.specs().keySet()));

Map<IcebergColumnHandle, Domain> unsupported = new LinkedHashMap<>();
Map<IcebergColumnHandle, Domain> newEnforced = new LinkedHashMap<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ public void testTrinoReadingSparkData(StorageFormat storageFormat, int specVersi
// Validate queries on an empty table created by Spark
assertThat(onTrino().executeQuery(format("SELECT * FROM %s", trinoTableName("\"" + baseTableName + "$snapshots\"")))).hasNoRows();
assertThat(onTrino().executeQuery(format("SELECT * FROM %s", trinoTableName))).hasNoRows();
assertThat(onTrino().executeQuery(format("SELECT * FROM %s WHERE _integer > 0", trinoTableName))).hasNoRows();

onSpark().executeQuery(format(
"INSERT INTO %s VALUES (" +
Expand Down

0 comments on commit 7190dae

Please sign in to comment.