Skip to content

Commit

Permalink
[7.1.0] Add an option to set a minimum size threshold for zstd blob c…
Browse files Browse the repository at this point in the history
…ompression. (#21124)

As discussed in #18997, compression causes performance degradation for
small blobs. The flag defaults to zero (always compress) for backwards
compatibility, but the discussion suggests that 100 might be an
appropriate value to set it to.

Fixes #18997.

Commit
69eb0ec

PiperOrigin-RevId: 602353295
Change-Id: Ie9ef8c10e221e9190d3c5cb5f267fcf35a63fd3b

Co-authored-by: Googler <[email protected]>
  • Loading branch information
bazel-io and tjgq authored Jan 29, 2024
1 parent 0aa0f60 commit b7b9931
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 18 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -394,16 +394,14 @@ private ListenableFuture<Long> requestRead(
CountingOutputStream rawOut,
@Nullable Supplier<Digest> digestSupplier,
Channel channel) {
boolean compressed = shouldCompress(digest);
String resourceName =
getResourceName(
options.remoteInstanceName,
digest,
options.cacheCompression,
digestUtil.getDigestFunction());
options.remoteInstanceName, digest, compressed, digestUtil.getDigestFunction());
SettableFuture<Long> future = SettableFuture.create();
OutputStream out;
try {
out = options.cacheCompression ? new ZstdDecompressingOutputStream(rawOut) : rawOut;
out = compressed ? new ZstdDecompressingOutputStream(rawOut) : rawOut;
} catch (IOException e) {
return Futures.immediateFailedFuture(e);
}
Expand Down Expand Up @@ -499,7 +497,7 @@ public ListenableFuture<Void> uploadFile(
digest,
Chunker.builder()
.setInput(digest.getSizeBytes(), path)
.setCompressed(options.cacheCompression)
.setCompressed(shouldCompress(digest))
.build());
}

Expand All @@ -511,7 +509,7 @@ public ListenableFuture<Void> uploadBlob(
digest,
Chunker.builder()
.setInput(data.toByteArray())
.setCompressed(options.cacheCompression)
.setCompressed(shouldCompress(digest))
.build());
}

Expand All @@ -535,6 +533,10 @@ Retrier getRetrier() {
return this.retrier;
}

private boolean shouldCompress(Digest digest) {
return options.cacheCompression && digest.getSizeBytes() >= options.cacheCompressionThreshold;
}

public ReferenceCountedChannel getChannel() {
return channel;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -430,9 +430,21 @@ public RemoteBuildEventUploadModeConverter() {
defaultValue = "false",
documentationCategory = OptionDocumentationCategory.REMOTE,
effectTags = {OptionEffectTag.UNKNOWN},
help = "If enabled, compress/decompress cache blobs with zstd.")
help =
"If enabled, compress/decompress cache blobs with zstd when their size is at least"
+ " --experimental_remote_cache_compression_threshold.")
public boolean cacheCompression;

@Option(
name = "experimental_remote_cache_compression_threshold",
defaultValue = "0",
documentationCategory = OptionDocumentationCategory.REMOTE,
effectTags = {OptionEffectTag.UNKNOWN},
help =
"The minimum blob size required to compress/decompress with zstd. Ineffectual unless"
+ " --remote_cache_compression is set.")
public int cacheCompressionThreshold;

@Option(
name = "build_event_upload_max_threads",
defaultValue = "100",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@
import com.google.devtools.common.options.Options;
import com.google.gson.JsonObject;
import com.google.protobuf.ByteString;
import com.google.testing.junit.testparameterinjector.TestParameter;
import com.google.testing.junit.testparameterinjector.TestParameterInjector;
import io.grpc.BindableService;
import io.grpc.CallCredentials;
import io.grpc.CallOptions;
Expand Down Expand Up @@ -129,14 +131,13 @@
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.mockito.ArgumentMatchers;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;

/** Tests for {@link GrpcCacheClient}. */
@RunWith(JUnit4.class)
@RunWith(TestParameterInjector.class)
public class GrpcCacheClientTest {
private static final DigestUtil DIGEST_UTIL =
new DigestUtil(SyscallCache.NO_CACHE, DigestHashFunction.SHA256);
Expand Down Expand Up @@ -1271,36 +1272,42 @@ public void read(ReadRequest request, StreamObserver<ReadResponse> responseObser
}

@Test
public void testCompressedDownload() throws IOException, InterruptedException {
public void testCompressedDownload(@TestParameter boolean overThreshold)
throws IOException, InterruptedException {
RemoteOptions options = Options.getDefaults(RemoteOptions.class);
options.cacheCompression = true;
options.cacheCompressionThreshold = 100;
final GrpcCacheClient client = newClient(options);
final byte[] data = "abcdefg".getBytes(UTF_8);
final byte[] data =
overThreshold ? "0123456789".repeat(10).getBytes(UTF_8) : "0123456789".getBytes(UTF_8);
final Digest digest = DIGEST_UTIL.compute(data);
final byte[] compressed = Zstd.compress(data);
final byte[] bytes = overThreshold ? Zstd.compress(data) : data;

serviceRegistry.addService(
new ByteStreamImplBase() {
@Override
public void read(ReadRequest request, StreamObserver<ReadResponse> responseObserver) {
assertThat(request.getResourceName()).contains(digest.getHash());
if (overThreshold) {
assertThat(request.getResourceName()).contains("compressed-blobs/zstd");
} else {
assertThat(request.getResourceName()).doesNotContain("compressed-blobs/zstd");
}
responseObserver.onNext(
ReadResponse.newBuilder()
.setData(ByteString.copyFrom(Arrays.copyOf(compressed, compressed.length / 3)))
.setData(ByteString.copyFrom(Arrays.copyOf(bytes, bytes.length / 3)))
.build());
responseObserver.onNext(
ReadResponse.newBuilder()
.setData(
ByteString.copyFrom(
Arrays.copyOfRange(
compressed, compressed.length / 3, compressed.length / 3 * 2)))
Arrays.copyOfRange(bytes, bytes.length / 3, bytes.length / 3 * 2)))
.build());
responseObserver.onNext(
ReadResponse.newBuilder()
.setData(
ByteString.copyFrom(
Arrays.copyOfRange(
compressed, compressed.length / 3 * 2, compressed.length)))
Arrays.copyOfRange(bytes, bytes.length / 3 * 2, bytes.length)))
.build());
responseObserver.onCompleted();
}
Expand Down

0 comments on commit b7b9931

Please sign in to comment.