Skip to content

Commit

Permalink
Remove redundant use of Optional for target_max_file_size
Browse files Browse the repository at this point in the history
The `target_max_file_size` Hive session property has non-null default
value, so can never be null. The default can be configured via catalog
config, which doesn't allow unsetting the value.
  • Loading branch information
findepi committed Feb 8, 2022
1 parent 6588a9e commit 24ffb79
Showing 1 changed file with 3 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import io.airlift.json.JsonCodec;
import io.airlift.log.Logger;
import io.airlift.slice.Slice;
import io.airlift.units.DataSize;
import io.trino.plugin.hive.util.HiveBucketing.BucketingVersion;
import io.trino.spi.Page;
import io.trino.spi.PageIndexer;
Expand All @@ -44,7 +43,6 @@
import java.util.Map;
import java.util.Optional;
import java.util.OptionalInt;
import java.util.OptionalLong;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executors;
Expand Down Expand Up @@ -87,7 +85,7 @@ public class HivePageSink

private final ConnectorSession session;

private final OptionalLong targetMaxFileSize;
private final long targetMaxFileSize;
private final List<HiveWriter> closedWriters = new ArrayList<>();
private final List<Slice> partitionUpdates = new ArrayList<>();
private final List<Callable<Object>> verificationTasks = new ArrayList<>();
Expand Down Expand Up @@ -164,7 +162,7 @@ public HivePageSink(
}

this.session = requireNonNull(session, "session is null");
this.targetMaxFileSize = Optional.ofNullable(HiveSessionProperties.getTargetMaxFileSize(session)).stream().mapToLong(DataSize::toBytes).findAny();
this.targetMaxFileSize = HiveSessionProperties.getTargetMaxFileSize(session).toBytes();
}

@Override
Expand Down Expand Up @@ -366,7 +364,7 @@ private int[] getWriterIndexes(Page page)
// if current file not too big continue with the current writer
// for transactional tables we don't want to split output files because there is an explicit or implicit bucketing
// and file names have no random component (e.g. bucket_00000)
if (bucketFunction != null || isTransactional || writer.getWrittenBytes() <= targetMaxFileSize.orElse(Long.MAX_VALUE)) {
if (bucketFunction != null || isTransactional || writer.getWrittenBytes() <= targetMaxFileSize) {
continue;
}
// close current writer
Expand Down

0 comments on commit 24ffb79

Please sign in to comment.