From 09043c5587ae9e6adeb425f5c2f7c4f65ec5e20e Mon Sep 17 00:00:00 2001 From: BenWhitehead Date: Wed, 17 Apr 2024 18:48:07 -0400 Subject: [PATCH] fix: update Grpc Retry Conformance after new additions to testbench (#2309) --- .../storage/GrpcRetryAlgorithmManager.java | 4 +- .../google/cloud/storage/GrpcStorageImpl.java | 63 +++- .../ITGrpcStorageImplUploadRetryTest.java | 310 ------------------ .../PackagePrivateMethodWorkarounds.java | 17 +- .../retry/ITRetryConformanceTest.java | 7 +- .../conformance/retry/RpcMethodMapping.java | 10 +- .../conformance/retry/RpcMethodMappings.java | 5 +- .../retry/TestRetryConformance.java | 26 +- 8 files changed, 87 insertions(+), 355 deletions(-) delete mode 100644 google-cloud-storage/src/test/java/com/google/cloud/storage/ITGrpcStorageImplUploadRetryTest.java diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcRetryAlgorithmManager.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcRetryAlgorithmManager.java index de7af195b9..3a97c2e952 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcRetryAlgorithmManager.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcRetryAlgorithmManager.java @@ -86,11 +86,11 @@ public ResultRetryAlgorithm getFor(CreateNotificationConfigRequest req) { } public ResultRetryAlgorithm getFor(DeleteBucketRequest req) { - return retryStrategy.getNonidempotentHandler(); + return retryStrategy.getIdempotentHandler(); } public ResultRetryAlgorithm getFor(DeleteHmacKeyRequest req) { - return retryStrategy.getNonidempotentHandler(); + return retryStrategy.getIdempotentHandler(); } public ResultRetryAlgorithm getFor(DeleteNotificationConfigRequest req) { diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageImpl.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageImpl.java index e5e81c060e..c195ce78f3 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageImpl.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageImpl.java @@ -262,8 +262,32 @@ public Blob create( @Override public Blob create(BlobInfo blobInfo, InputStream content, BlobWriteOption... options) { try { - return createFrom(blobInfo, content, options); - } catch (IOException e) { + requireNonNull(blobInfo, "blobInfo must be non null"); + InputStream inputStreamParam = firstNonNull(content, new ByteArrayInputStream(ZERO_BYTES)); + + Opts optsWithDefaults = Opts.unwrap(options).prepend(defaultOpts); + GrpcCallContext grpcCallContext = + optsWithDefaults.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); + WriteObjectRequest req = getWriteObjectRequest(blobInfo, optsWithDefaults); + Hasher hasher = Hasher.enabled(); + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + UnbufferedWritableByteChannelSession session = + ResumableMedia.gapic() + .write() + .byteChannel(storageClient.writeObjectCallable().withDefaultCallContext(merge)) + .setByteStringStrategy(ByteStringStrategy.noCopy()) + .setHasher(hasher) + .direct() + .unbuffered() + .setRequest(req) + .build(); + + try (UnbufferedWritableByteChannel c = session.open()) { + ByteStreams.copy(Channels.newChannel(inputStreamParam), c); + } + ApiFuture responseApiFuture = session.getResult(); + return this.getBlob(responseApiFuture); + } catch (IOException | ApiException e) { throw StorageException.coalesce(e); } } @@ -549,17 +573,20 @@ public boolean delete(String bucket, BucketSourceOption... options) { DeleteBucketRequest.Builder builder = DeleteBucketRequest.newBuilder().setName(bucketNameCodec.encode(bucket)); DeleteBucketRequest req = opts.deleteBucketsRequest().apply(builder).build(); - try { - GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); - Retrying.run( - getOptions(), - retryAlgorithmManager.getFor(req), - () -> storageClient.deleteBucketCallable().call(req, merge), - Decoder.identity()); - return true; - } catch (StorageException e) { - return false; - } + GrpcCallContext merge = Utils.merge(grpcCallContext, Retrying.newCallContext()); + return Boolean.TRUE.equals( + Retrying.run( + getOptions(), + retryAlgorithmManager.getFor(req), + () -> { + try { + storageClient.deleteBucketCallable().call(req, merge); + return true; + } catch (NotFoundException e) { + return false; + } + }, + Decoder.identity())); } @Override @@ -760,11 +787,19 @@ public GrpcBlobWriteChannel writer(BlobInfo blobInfo, BlobWriteOption... options opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); WriteObjectRequest req = getWriteObjectRequest(blobInfo, opts); Hasher hasher = Hasher.noop(); + // in JSON, the starting of the resumable session happens before the invocation of write can + // happen. Emulate the same thing here. + // 1. create the future + ApiFuture startResumableWrite = startResumableWrite(grpcCallContext, req); + // 2. await the result of the future + ResumableWrite resumableWrite = ApiFutureUtils.await(startResumableWrite); + // 3. wrap the result in another future container before constructing the BlobWriteChannel + ApiFuture wrapped = ApiFutures.immediateFuture(resumableWrite); return new GrpcBlobWriteChannel( storageClient.writeObjectCallable(), getOptions(), retryAlgorithmManager.idempotent(), - () -> startResumableWrite(grpcCallContext, req), + () -> wrapped, hasher); } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGrpcStorageImplUploadRetryTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGrpcStorageImplUploadRetryTest.java deleted file mode 100644 index 12f1bd6b9a..0000000000 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITGrpcStorageImplUploadRetryTest.java +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.storage; - -import static com.google.cloud.storage.ByteSizeConstants._2MiB; -import static com.google.common.truth.Truth.assertThat; - -import com.google.api.core.ApiFuture; -import com.google.api.gax.grpc.GrpcCallContext; -import com.google.cloud.storage.ITGapicUnbufferedWritableByteChannelTest.DirectWriteService; -import com.google.cloud.storage.Storage.BlobTargetOption; -import com.google.cloud.storage.Storage.BlobWriteOption; -import com.google.common.collect.ImmutableList; -import com.google.protobuf.ByteString; -import com.google.storage.v2.ChecksummedData; -import com.google.storage.v2.Object; -import com.google.storage.v2.ObjectChecksums; -import com.google.storage.v2.QueryWriteStatusRequest; -import com.google.storage.v2.QueryWriteStatusResponse; -import com.google.storage.v2.StartResumableWriteRequest; -import com.google.storage.v2.StartResumableWriteResponse; -import com.google.storage.v2.StorageGrpc.StorageImplBase; -import com.google.storage.v2.WriteObjectRequest; -import com.google.storage.v2.WriteObjectResponse; -import com.google.storage.v2.WriteObjectSpec; -import io.grpc.Status.Code; -import io.grpc.stub.StreamObserver; -import java.io.InputStream; -import java.nio.channels.Channels; -import java.nio.file.Path; -import java.util.concurrent.atomic.AtomicBoolean; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -/** - * Verify some simplistic retries can take place and that we're constructing {@link - * WriteObjectRequest}s as expected. - */ -public final class ITGrpcStorageImplUploadRetryTest { - private static final String FORMATTED_BUCKET_NAME = "projects/_/buckets/buck"; - private static final int objectContentSize = 64; - private static final byte[] bytes = DataGenerator.base64Characters().genBytes(objectContentSize); - - @Rule public final TemporaryFolder tmpDir = new TemporaryFolder(); - - private Path baseDir; - - @Before - public void setUp() throws Exception { - baseDir = tmpDir.getRoot().toPath(); - } - - @Test - public void create_bytes() throws Exception { - Direct.FakeService service = Direct.FakeService.create(); - - try (FakeServer server = FakeServer.of(service); - Storage s = server.getGrpcStorageOptions().getService()) { - BlobInfo info = BlobInfo.newBuilder("buck", "obj").build(); - s.create(info, bytes, BlobTargetOption.doesNotExist()); - } - - assertThat(service.returnError.get()).isFalse(); - } - - @Test - public void create_inputStream() throws Exception { - Resumable.FakeService service = Resumable.FakeService.create(); - try (TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize); - FakeServer server = FakeServer.of(service); - Storage s = server.getGrpcStorageOptions().getService(); - InputStream in = Channels.newInputStream(tmpFile.reader())) { - BlobInfo info = BlobInfo.newBuilder("buck", "obj").build(); - s.create(info, in, BlobWriteOption.doesNotExist()); - } - - assertThat(service.returnError.get()).isFalse(); - } - - @Test - public void createFrom_path_smallerThanBufferSize() throws Exception { - Resumable.FakeService service = Resumable.FakeService.create(); - - try (TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize); - FakeServer server = FakeServer.of(service); - Storage s = server.getGrpcStorageOptions().getService()) { - BlobInfo info = BlobInfo.newBuilder("buck", "obj").build(); - s.createFrom(info, tmpFile.getPath(), _2MiB, BlobWriteOption.doesNotExist()); - } - - assertThat(service.returnError.get()).isFalse(); - } - - @Test - public void createFrom_path_largerThanBufferSize() throws Exception { - Resumable.FakeService service = Resumable.FakeService.create(); - try (TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize); - FakeServer server = FakeServer.of(service); - Storage s = server.getGrpcStorageOptions().getService()) { - BlobInfo info = BlobInfo.newBuilder("buck", "obj").build(); - s.createFrom(info, tmpFile.getPath(), 16, BlobWriteOption.doesNotExist()); - } - - assertThat(service.returnError.get()).isFalse(); - } - - @Test - public void createFrom_inputStream() throws Exception { - Resumable.FakeService service = Resumable.FakeService.create(); - try (TmpFile tmpFile = DataGenerator.base64Characters().tempFile(baseDir, objectContentSize); - FakeServer server = FakeServer.of(service); - Storage s = server.getGrpcStorageOptions().getService(); - InputStream in = Channels.newInputStream(tmpFile.reader())) { - BlobInfo info = BlobInfo.newBuilder("buck", "obj").build(); - s.createFrom(info, in, BlobWriteOption.doesNotExist()); - } - - assertThat(service.returnError.get()).isFalse(); - } - - @Test - public void startResumableWrite() throws Exception { - - AtomicBoolean returnError = new AtomicBoolean(true); - StorageImplBase service = - new StorageImplBase() { - @Override - public void startResumableWrite( - StartResumableWriteRequest request, StreamObserver obs) { - if (request.equals(Resumable.startReq)) { - if (returnError.get()) { - // clear the need to error. We only error on the first request. - returnError.compareAndSet(true, false); - obs.onError(TestUtils.apiException(Code.INTERNAL, "should retry")); - } else { - obs.onNext(Resumable.startResp); - obs.onCompleted(); - } - } else { - obs.onError( - TestUtils.apiException(Code.PERMISSION_DENIED, "Unexpected request chain.")); - } - } - }; - - try (FakeServer server = FakeServer.of(service); - GrpcStorageImpl s = (GrpcStorageImpl) server.getGrpcStorageOptions().getService()) { - ApiFuture f = - s.startResumableWrite(GrpcCallContext.createDefault(), Resumable.baseReq); - ResumableWrite resumableWrite = f.get(); - StartResumableWriteResponse resp = resumableWrite.getRes(); - assertThat(resp).isNotNull(); - assertThat(resp.getUploadId()).isEqualTo(Resumable.uploadId); - } - - assertThat(returnError.get()).isFalse(); - } - - private static final class Direct { - private static final Object obj = - Object.newBuilder().setBucket(FORMATTED_BUCKET_NAME).setName("obj").build(); - private static final WriteObjectSpec spec = - WriteObjectSpec.newBuilder().setResource(obj).setIfGenerationMatch(0).build(); - - private static final ChecksummedData checksummedData = - TestUtils.getChecksummedData(ByteString.copyFrom(bytes), Hasher.enabled()); - private static final WriteObjectRequest req1 = - WriteObjectRequest.newBuilder() - .setWriteObjectSpec(spec) - .setChecksummedData(checksummedData) - .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(checksummedData.getCrc32C())) - .setFinishWrite(true) - .build(); - private static final WriteObjectResponse resp1 = - WriteObjectResponse.newBuilder() - .setResource(obj.toBuilder().setSize(objectContentSize)) - .build(); - - private static final class FakeService extends DirectWriteService { - private final AtomicBoolean returnError; - - private FakeService(AtomicBoolean returnError) { - super( - (obs, reqs) -> { - if (reqs.equals(ImmutableList.of(req1))) { - if (returnError.get()) { - // clear the need to error. We only error on the first request. - returnError.compareAndSet(true, false); - obs.onError(TestUtils.apiException(Code.INTERNAL, "should retry")); - } else { - obs.onNext(resp1); - obs.onCompleted(); - } - } else { - obs.onError( - TestUtils.apiException(Code.PERMISSION_DENIED, "Unexpected request chain.")); - } - }); - this.returnError = returnError; - } - - // a bit of constructor lifecycle hackery to appease the compiler - // Even though the thing past to super() is a lazy function, the closing over of the outer - // fields happens earlier than they are available. To side step this fact, we provide the - // AtomicBoolean as a constructor argument which can be closed over without issue, and then - // bind it to the class field after super(). - static Direct.FakeService create() { - return new Direct.FakeService(new AtomicBoolean(true)); - } - } - } - - private static final class Resumable { - - private static final String uploadId = "upload-id"; - - private static final Object obj = - Object.newBuilder().setBucket(FORMATTED_BUCKET_NAME).setName("obj").build(); - private static final WriteObjectSpec spec = - WriteObjectSpec.newBuilder().setResource(obj).setIfGenerationMatch(0).build(); - - private static final WriteObjectRequest baseReq = - WriteObjectRequest.newBuilder().setWriteObjectSpec(spec).build(); - private static final StartResumableWriteRequest startReq = - StartResumableWriteRequest.newBuilder().setWriteObjectSpec(spec).build(); - private static final StartResumableWriteResponse startResp = - StartResumableWriteResponse.newBuilder().setUploadId(uploadId).build(); - - private static final ChecksummedData checksummedData = - TestUtils.getChecksummedData(ByteString.copyFrom(bytes), Hasher.noop()); - private static final WriteObjectRequest req1 = - WriteObjectRequest.newBuilder() - .setUploadId(uploadId) - .setChecksummedData(checksummedData) - .setFinishWrite(true) - .build(); - private static final WriteObjectResponse resp1 = - WriteObjectResponse.newBuilder() - .setResource(obj.toBuilder().setSize(objectContentSize)) - .build(); - - private static final class FakeService extends DirectWriteService { - private final AtomicBoolean returnError; - - private FakeService(AtomicBoolean returnError) { - super( - (obs, reqs) -> { - if (reqs.equals(ImmutableList.of(req1))) { - if (returnError.get()) { - // clear the need to error. We only error on the first request. - returnError.compareAndSet(true, false); - obs.onError(TestUtils.apiException(Code.INTERNAL, "should retry")); - } else { - obs.onNext(resp1); - obs.onCompleted(); - } - } else { - obs.onError( - TestUtils.apiException(Code.PERMISSION_DENIED, "Unexpected request chain.")); - } - }); - this.returnError = returnError; - } - - @Override - public void startResumableWrite( - StartResumableWriteRequest request, StreamObserver obs) { - if (request.equals(startReq)) { - obs.onNext(startResp); - obs.onCompleted(); - } else { - obs.onError(TestUtils.apiException(Code.PERMISSION_DENIED, "Unexpected request chain.")); - } - } - - @Override - public void queryWriteStatus( - QueryWriteStatusRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(QueryWriteStatusResponse.newBuilder().setPersistedSize(0).build()); - responseObserver.onCompleted(); - } - - // a bit of constructor lifecycle hackery to appease the compiler - // Even though the thing passed to super() is a lazy function, the closing over of the outer - // fields happens earlier than they are available. To side step this fact, we provide the - // AtomicBoolean as a constructor argument which can be closed over without issue, and then - // bind it to the class field after super(). - static FakeService create() { - return new FakeService(new AtomicBoolean(true)); - } - } - } -} diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/PackagePrivateMethodWorkarounds.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/PackagePrivateMethodWorkarounds.java index 9b577014c5..23b7f3710e 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/PackagePrivateMethodWorkarounds.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/PackagePrivateMethodWorkarounds.java @@ -20,7 +20,6 @@ import com.google.api.core.ApiFutures; import com.google.cloud.ReadChannel; import com.google.cloud.WriteChannel; -import com.google.cloud.storage.BucketInfo.BuilderImpl; import com.google.common.collect.ImmutableList; import java.util.Optional; import java.util.concurrent.ExecutionException; @@ -40,23 +39,11 @@ public final class PackagePrivateMethodWorkarounds { private PackagePrivateMethodWorkarounds() {} public static Bucket bucketCopyWithStorage(Bucket b, Storage s) { - BucketInfo.BuilderImpl builder = - (BuilderImpl) - Conversions.json() - .bucketInfo() - .decode(Conversions.json().bucketInfo().encode(b)) - .toBuilder(); - return new Bucket(s, builder); + return b.asBucket(s); } public static Blob blobCopyWithStorage(Blob b, Storage s) { - BlobInfo.BuilderImpl builder = - (BlobInfo.BuilderImpl) - Conversions.json() - .blobInfo() - .decode(Conversions.json().blobInfo().encode(b)) - .toBuilder(); - return new Blob(s, builder); + return b.asBlob(s); } public static Function> maybeGetBlobInfoFunction() { diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/ITRetryConformanceTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/ITRetryConformanceTest.java index 8957473484..8d0a24a86f 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/ITRetryConformanceTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/ITRetryConformanceTest.java @@ -19,6 +19,7 @@ import static com.google.cloud.storage.PackagePrivateMethodWorkarounds.blobCopyWithStorage; import static com.google.cloud.storage.PackagePrivateMethodWorkarounds.bucketCopyWithStorage; import static com.google.cloud.storage.conformance.retry.Ctx.ctx; +import static com.google.cloud.storage.conformance.retry.ITRetryConformanceTest.RetryTestCaseResolver.lift; import static com.google.cloud.storage.conformance.retry.State.empty; import static com.google.common.truth.Truth.assertThat; import static java.util.Objects.requireNonNull; @@ -117,6 +118,7 @@ public void setUp() throws Throwable { public void tearDown() throws Throwable { LOGGER.fine("Running teardown..."); if (ctx != null) { + ctx = ctx.leftMap(s -> nonTestStorage); getReplaceStorageInObjectsFromCtx() .andThen(mapping.getTearDown()) .apply(ctx, testRetryConformance); @@ -168,7 +170,10 @@ public ImmutableList parameters() { .setHost(testBench.getBaseUri().replaceAll("https?://", "")) .setTestAllowFilter( RetryTestCaseResolver.includeAll() - .and(RetryTestCaseResolver.lift(trc -> trc.getTransport() == Transport.HTTP))) + .and( + (lift(trc -> trc.getTransport() == Transport.GRPC) + .and((m, trc) -> m == RpcMethod.storage.buckets.setIamPolicy)) + .negate())) .build(); List retryTestCases; diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMapping.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMapping.java index 1c3947adbf..6ecc0068d0 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMapping.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMapping.java @@ -21,6 +21,7 @@ import static org.junit.Assert.fail; import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.TransportCompatibility.Transport; import com.google.cloud.storage.conformance.retry.CtxFunctions.ResourceSetup; import com.google.cloud.storage.conformance.retry.CtxFunctions.ResourceTeardown; import com.google.cloud.storage.conformance.retry.Functions.CtxFunction; @@ -42,7 +43,6 @@ */ @Immutable final class RpcMethodMapping { - private final int mappingId; private final RpcMethod method; private final Predicate applicable; @@ -107,6 +107,14 @@ public CtxFunction getTest() { if (instructions.contains("return-reset-connection") && code == 0) { matchExpectedCode = true; } + // testbench resetting the connection is turned into an UNAVAILABLE in grpc, which we then + // map to 503. Add graceful handling here, since we can't disambiguate between reset + // connection and 503 from the service. + if (c.getTransport() == Transport.GRPC + && instructions.contains("return-reset-connection") + && code == 503) { + matchExpectedCode = true; + } if (matchExpectedCode) { return ctx; diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMappings.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMappings.java index 9dc246373c..3f98a94fa4 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMappings.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/RpcMethodMappings.java @@ -53,6 +53,7 @@ import com.google.cloud.storage.Storage.SignUrlOption; import com.google.cloud.storage.Storage.UriScheme; import com.google.cloud.storage.StorageRoles; +import com.google.cloud.storage.TransportCompatibility.Transport; import com.google.cloud.storage.conformance.retry.CtxFunctions.Local; import com.google.cloud.storage.conformance.retry.CtxFunctions.ResourceSetup; import com.google.cloud.storage.conformance.retry.CtxFunctions.Rpc; @@ -1607,7 +1608,9 @@ private static void insert(ArrayList a) { .build()); a.add( RpcMethodMapping.newBuilder(54, objects.insert) - .withApplicable(not(TestRetryConformance::isPreconditionsProvided)) + .withApplicable( + not(TestRetryConformance::isPreconditionsProvided) + .and(trc -> trc.getTransport() == Transport.HTTP)) .withSetup(defaultSetup.andThen(Local.blobInfoWithoutGeneration)) .withTest( (ctx, c) -> diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/TestRetryConformance.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/TestRetryConformance.java index 65c0734807..4f1afd83e5 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/TestRetryConformance.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/conformance/retry/TestRetryConformance.java @@ -27,7 +27,6 @@ import com.google.cloud.storage.DataGenerator; import com.google.cloud.storage.TransportCompatibility.Transport; import com.google.common.base.Joiner; -import com.google.common.base.Suppliers; import com.google.common.io.ByteStreams; import com.google.errorprone.annotations.Immutable; import java.io.ByteArrayInputStream; @@ -42,6 +41,7 @@ import java.time.ZoneId; import java.time.ZoneOffset; import java.time.format.DateTimeFormatter; +import java.util.List; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -153,13 +153,11 @@ final class TestRetryConformance { String.format( "%s_s%03d-%s-m%03d_top1_%s", BASE_ID, scenarioId, instructionsString.toLowerCase(), mappingId, transportTag); + // define a lazy supplier for bytes. this.lazyHelloWorldUtf8Bytes = - Suppliers.memoize( - () -> { - // define a lazy supplier for bytes. - return genBytes(method); - }); - this.helloWorldFilePath = resolvePathForResource(objectName, method); + () -> genBytes(this.method, this.instruction.getInstructionsList()); + this.helloWorldFilePath = + resolvePathForResource(objectName, method, this.instruction.getInstructionsList()); this.serviceAccountCredentials = resolveServiceAccountCredentials(); } @@ -239,13 +237,14 @@ public String toString() { return getTestName(); } - private static Supplier resolvePathForResource(String objectName, Method method) { + private static Supplier resolvePathForResource( + String objectName, Method method, List instructionList) { return () -> { try { File tempFile = File.createTempFile(objectName, ""); tempFile.deleteOnExit(); - byte[] bytes = genBytes(method); + byte[] bytes = genBytes(method, instructionList); try (ByteArrayInputStream in = new ByteArrayInputStream(bytes); FileOutputStream out = new FileOutputStream(tempFile)) { long copy = ByteStreams.copy(in, out); @@ -276,14 +275,19 @@ public String getTopicName() { return topicName; } - private static byte[] genBytes(Method method) { + private static byte[] genBytes(Method method, List instructionsList) { // Not all tests need data for an object, though some tests - resumable upload - needs // more than 8MiB. // We want to avoid allocating 8.1MiB for each test unnecessarily, especially since we // instantiate all permuted test cases. ~1000 * 8.1MiB ~~ > 8GiB. switch (method.getName()) { case "storage.objects.insert": - return DataGenerator.base64Characters().genBytes(_8MiB * 2 + _512KiB); + boolean after8m = instructionsList.stream().anyMatch(s -> s.endsWith("after-8192K")); + if (after8m) { + return DataGenerator.base64Characters().genBytes(_8MiB * 2 + _512KiB); + } else { + return DataGenerator.base64Characters().genBytes(_512KiB); + } case "storage.objects.get": return DataGenerator.base64Characters().genBytes(_512KiB); default: