From d85185f910253a025d7f1156dcc025325c36cab9 Mon Sep 17 00:00:00 2001 From: Alan Zimmer <48699787+alzimmermsft@users.noreply.github.com> Date: Fri, 2 Aug 2019 10:59:06 -0700 Subject: [PATCH] Migrate Additional V11 Test Classes (#4491) * Removed raw clients * Fixed PageBlob getRange and getRangeDiff, turned on all unit tests * Turned on more tests * Partial work for implementing and testing reliable download options * Storage SAS implementation (#4404) * SAS implementation * Fixed some minor formatting issues * Fixed checkstyle problems and test issue * Turned on download retry test * RetryTests migrated * Passing most of HelperTest * Fixing tests after merge * Remove RawClients from Blobs (#4375) Removes RawClients from Storage Blobs * DownloadResponseTest working * Fixed scoping for some classes, Checkstyle fixes * Fixing more unit tests * Fix Checkstyle issue * Move adding acount name into String.format * Additional move of adding account name into String.format * Fixed assertGenerateOK logic for SAS generation * Removed invalid import * Fixed method parameter ordering --- .../storage/blob/AccountSASPermission.java | 65 +- .../storage/blob/AccountSASResourceType.java | 27 +- .../azure/storage/blob/AccountSASService.java | 32 +- .../blob/AccountSASSignatureValues.java | 75 +- .../blob/AnonymousCredentialPolicy.java | 32 - .../azure/storage/blob/BlobClientBuilder.java | 2 - .../azure/storage/blob/BlobConfiguration.java | 4 +- .../azure/storage/blob/BlobOutputStream.java | 2 +- .../azure/storage/blob/BlobProperties.java | 14 +- .../azure/storage/blob/BlobSASPermission.java | 39 +- .../blob/BlobServiceClientBuilder.java | 2 - .../storage/blob/ContainerAsyncClient.java | 3 +- .../storage/blob/ContainerClientBuilder.java | 2 - .../storage/blob/ContainerProperties.java | 20 +- .../storage/blob/ContainerSASPermission.java | 46 +- .../storage/blob/DownloadAsyncResponse.java | 53 +- .../azure/storage/blob/DownloadResponse.java | 1 - .../azure/storage/blob/HTTPGetterInfo.java | 35 +- .../java/com/azure/storage/blob/IPRange.java | 22 +- .../azure/storage/blob/IProgressReceiver.java | 10 +- .../storage/blob/PageBlobAsyncClient.java | 13 +- .../azure/storage/blob/PageBlobClient.java | 17 +- .../azure/storage/blob/ProgressReporter.java | 69 +- .../com/azure/storage/blob/SASProtocol.java | 2 +- .../storage/blob/SASQueryParameters.java | 78 +- .../blob/ServiceSASSignatureValues.java | 162 ++-- .../blob/UnexpectedLengthException.java | 32 + .../blob/models/BlobHierarchyListSegment.java | 3 + .../azure/storage/blob/models/BlobRange.java | 7 +- .../common/policy/RequestRetryOptions.java | 104 +-- .../common/policy/RequestRetryPolicy.java | 157 ++-- .../BlobAsyncClientJavaDocCodeSnippets.java | 4 +- .../blob/BlobClientJavaDocCodeSnippets.java | 4 +- .../com/azure/storage/blob/APISpec.groovy | 72 +- .../com/azure/storage/blob/AadLoginTest.java | 31 - .../storage/blob/AppendBlobAPITest.groovy | 3 + .../com/azure/storage/blob/BlobAPITest.groovy | 106 +-- .../storage/blob/BlockBlobAPITest.groovy | 20 +- .../blob/DownloadResponseMockFlux.java | 214 +++++ .../storage/blob/DownloadResponseTest.groovy | 152 ++++ .../com/azure/storage/blob/HelperTest.groovy | 730 ++++++++++++++++++ .../com/azure/storage/blob/LargeFileTest.java | 54 -- .../azure/storage/blob/PageBlobAPITest.groovy | 100 +-- .../storage/blob/ProgressReporterTest.groovy | 109 +++ .../storage/blob/RequestRetryTestFactory.java | 430 +++++++++++ .../com/azure/storage/blob/RetryTest.groovy | 144 ++++ .../com/azure/storage/blob/SASTest.groovy | 151 ++-- 47 files changed, 2662 insertions(+), 792 deletions(-) delete mode 100644 sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AnonymousCredentialPolicy.java create mode 100644 sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/UnexpectedLengthException.java delete mode 100644 sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AadLoginTest.java create mode 100644 sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseMockFlux.java create mode 100644 sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseTest.groovy create mode 100644 sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/HelperTest.groovy delete mode 100644 sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/LargeFileTest.java create mode 100644 sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/ProgressReporterTest.groovy create mode 100644 sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RequestRetryTestFactory.java create mode 100644 sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RetryTest.groovy diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASPermission.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASPermission.java index 11369aabed0d7..4af9f970ff647 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASPermission.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASPermission.java @@ -12,7 +12,7 @@ * {@link AccountSASSignatureValues} object. It is possible to construct the permissions string without this class, but * the order of the permissions is particular and this class guarantees correctness. */ -final class AccountSASPermission { +public final class AccountSASPermission { private boolean read; @@ -33,7 +33,7 @@ final class AccountSASPermission { /** * Initializes an {@code AccountSASPermission} object with all fields set to false. */ - private AccountSASPermission() { + public AccountSASPermission() { } /** @@ -85,14 +85,17 @@ public static AccountSASPermission parse(String permString) { } /** - * Permission to read resources and list queues and tables granted. + * @return the read permission status */ public boolean read() { return read; } /** - * Permission to read resources and list queues and tables granted. + * Sets the read permission status. + * + * @param read Permission status to set + * @return the updated AccountSASPermission object */ public AccountSASPermission read(boolean read) { this.read = read; @@ -100,14 +103,17 @@ public AccountSASPermission read(boolean read) { } /** - * Permission to add messages, table entities, and append to blobs granted. + * @return the add permission status */ public boolean add() { return add; } /** - * Permission to add messages, table entities, and append to blobs granted. + * Sets the add permission status. + * + * @param add Permission status to set + * @return the updated AccountSASPermission object */ public AccountSASPermission add(boolean add) { this.add = add; @@ -115,14 +121,17 @@ public AccountSASPermission add(boolean add) { } /** - * Permission to create blobs and files granted. + * @return the create permission status */ public boolean create() { return create; } /** - * Permission to create blobs and files granted. + * Sets the create permission status. + * + * @param create Permission status to set + * @return the updated AccountSASPermission object */ public AccountSASPermission create(boolean create) { this.create = create; @@ -130,14 +139,17 @@ public AccountSASPermission create(boolean create) { } /** - * Permission to write resources granted. + * @return the write permission status */ public boolean write() { return write; } /** - * Permission to write resources granted. + * Sets the write permission status. + * + * @param write Permission status to set + * @return the updated AccountSASPermission object */ public AccountSASPermission write(boolean write) { this.write = write; @@ -145,14 +157,17 @@ public AccountSASPermission write(boolean write) { } /** - * Permission to delete resources granted. + * @return the delete permission status */ public boolean delete() { return delete; } /** - * Permission to delete resources granted. + * Sets the delete permission status. + * + * @param delete Permission status to set + * @return the updated AccountSASPermission object */ public AccountSASPermission delete(boolean delete) { this.delete = delete; @@ -160,14 +175,18 @@ public AccountSASPermission delete(boolean delete) { } /** - * Permission to list blob containers, blobs, shares, directories, and files granted. + * @return the list permission status */ public boolean list() { return list; } /** - * Permission to list blob containers, blobs, shares, directories, and files granted. + * Sets the list permission status. This permission grants the ability to list blob containers, blobs, shares, + * directories, and files. + * + * @param list Permission status to set + * @return the updated AccountSASPermission object */ public AccountSASPermission list(boolean list) { this.list = list; @@ -175,14 +194,19 @@ public AccountSASPermission list(boolean list) { } /** - * Permissions to update messages and table entities granted. + * Returns the update permission status, it allows the update of queue message and tables. + * + * @return the update permission status */ public boolean update() { return update; } /** - * Permissions to update messages and table entities granted. + * Sets the update permission status, it allows the update of queue messages and tables. + * + * @param update Permission status to set + * @return the updated AccountSASPermission object */ public AccountSASPermission update(boolean update) { this.update = update; @@ -190,14 +214,19 @@ public AccountSASPermission update(boolean update) { } /** - * Permission to get and delete messages granted. + * Returns the process messages permission, this allows the retrieval and deletion of queue messages. + * + * @return the process messages permission status. */ public boolean processMessages() { return processMessages; } /** - * Permission to get and delete messages granted. + * Sets the process messages permission, this allows the retrieval and deletion of queue messages. + * + * @param processMessages Permission status to set + * @return the updated AccountSASPermission object */ public AccountSASPermission processMessages(boolean processMessages) { this.processMessages = processMessages; diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASResourceType.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASResourceType.java index 6fc53c1902937..a91abf9175011 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASResourceType.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASResourceType.java @@ -12,7 +12,7 @@ * {@link AccountSASSignatureValues} object. It is possible to construct the resources string without this class, but * the order of the resources is particular and this class guarantees correctness. */ -final class AccountSASResourceType { +public final class AccountSASResourceType { private boolean service; @@ -23,7 +23,7 @@ final class AccountSASResourceType { /** * Initializes an {@code AccountSASResourceType} object with all fields set to false. */ - private AccountSASResourceType() { + public AccountSASResourceType() { } /** @@ -61,14 +61,17 @@ public static AccountSASResourceType parse(String resourceTypesString) { } /** - * Permission to access service level APIs granted. + * @return the access status for service level APIs. */ public boolean service() { return service; } /** - * Permission to access service level APIs granted. + * Sets the access status for service level APIs. + * + * @param service Access status to set + * @return the updated AccountSASResourceType object. */ public AccountSASResourceType service(boolean service) { this.service = service; @@ -76,14 +79,18 @@ public AccountSASResourceType service(boolean service) { } /** - * Permission to access container level APIs (Blob Containers, Tables, Queues, File Shares) granted. + * @return the access status for container level APIs, this grants access to Blob Containers, Tables, Queues, + * and File Shares. */ public boolean container() { return container; } /** - * Permission to access container level APIs (Blob Containers, Tables, Queues, File Shares) granted. + * Sets the access status for container level APIs, this grants access to Blob Containers, Tables, Queues, and File Shares. + * + * @param container Access status to set + * @return the updated AccountSASResourceType object. */ public AccountSASResourceType container(boolean container) { this.container = container; @@ -91,14 +98,18 @@ public AccountSASResourceType container(boolean container) { } /** - * Permission to access object level APIs (Blobs, Table Entities, Queue Messages, Files) granted. + * @return the access status for object level APIs, this grants access to Blobs, Table Entities, Queue Messages, + * Files. */ public boolean object() { return object; } /** - * Permission to access object level APIs (Blobs, Table Entities, Queue Messages, Files) granted. + * Sets the access status for object level APIs, this grants access to Blobs, Table Entities, Queue Messages, Files. + * + * @param object Access status to set + * @return the updated AccountSASResourceType object. */ public AccountSASResourceType object(boolean object) { this.object = object; diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASService.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASService.java index 0f88ccb2203a9..9fbb9137bcbd6 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASService.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASService.java @@ -12,7 +12,7 @@ * {@link AccountSASSignatureValues} object. It is possible to construct the services string without this class, but * the order of the services is particular and this class guarantees correctness. */ -final class AccountSASService { +public final class AccountSASService { private boolean blob; @@ -25,7 +25,7 @@ final class AccountSASService { /** * Initializes an {@code AccountSASService} object with all fields set to false. */ - private AccountSASService() { + public AccountSASService() { } /** @@ -66,14 +66,17 @@ public static AccountSASService parse(String servicesString) { } /** - * Permission to access blob resources granted. + * @return the access status for blob resources. */ public boolean blob() { return blob; } /** - * Permission to access blob resources granted. + * Sets the access status for blob resources. + * + * @param blob Access status to set + * @return the updated AccountSASService object. */ public AccountSASService blob(boolean blob) { this.blob = blob; @@ -81,14 +84,17 @@ public AccountSASService blob(boolean blob) { } /** - * Permission to access file resources granted. + * @return the access status for file resources. */ public boolean file() { return file; } /** - * Permission to access file resources granted. + * Sets the access status for file resources. + * + * @param file Access status to set + * @return the updated AccountSASService object. */ public AccountSASService file(boolean file) { this.file = file; @@ -96,14 +102,17 @@ public AccountSASService file(boolean file) { } /** - * Permission to access queue resources granted. + * @return the access status for queue resources. */ public boolean queue() { return queue; } /** - * Permission to access queue resources granted. + * Sets the access status for queue resources. + * + * @param queue Access status to set + * @return the updated AccountSASService object. */ public AccountSASService queue(boolean queue) { this.queue = queue; @@ -111,14 +120,17 @@ public AccountSASService queue(boolean queue) { } /** - * Permission to access table resources granted. + * @return the access status for table resources. */ public boolean table() { return table; } /** - * Permission to access table resources granted. + * Sets the access status for table resources. + * + * @param table Access status to set + * @return the updated AccountSASService object. */ public AccountSASService table(boolean table) { this.table = table; diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASSignatureValues.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASSignatureValues.java index d60118d87b121..5d770a8112e96 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASSignatureValues.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AccountSASSignatureValues.java @@ -24,9 +24,9 @@ * here for further * descriptions of the parameters, including which are required: * - * @apiNote ## Sample Code \n - * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_sas "Sample code for AccountSASSignatureValues")] \n - * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + *

Please see + * here + * for additional samples.

*/ final class AccountSASSignatureValues { @@ -54,14 +54,19 @@ final class AccountSASSignatureValues { } /** - * If null or empty, this defaults to the service version targeted by this version of the library. + * @return the service version that is targeted, if {@code null} or empty the service version targeted by the + * library will be used. */ public String version() { return version; } /** - * If null or empty, this defaults to the service version targeted by this version of the library. + * Sets the service version that is targeted. Leave this {@code null} or empty to target the version used by the + * library. + * + * @param version Target version to set + * @return the updated AccountSASSignatureValues object. */ public AccountSASSignatureValues version(String version) { this.version = version; @@ -69,14 +74,17 @@ public AccountSASSignatureValues version(String version) { } /** - * {@link SASProtocol} + * @return the {@link SASProtocol} which determines the HTTP protocol that will be used. */ public SASProtocol protocol() { return protocol; } /** - * {@link SASProtocol} + * Sets the {@link SASProtocol} which determines the HTTP protocol that will be used. + * + * @param protocol Protocol to set + * @return the updated AccountSASSignatureValues object. */ public AccountSASSignatureValues protocol(SASProtocol protocol) { this.protocol = protocol; @@ -84,14 +92,17 @@ public AccountSASSignatureValues protocol(SASProtocol protocol) { } /** - * When the SAS will take effect. + * @return when the SAS will take effect. */ public OffsetDateTime startTime() { return startTime; } /** - * When the SAS will take effect. + * Sets when the SAS will take effect. + * + * @param startTime Start time to set + * @return the updated AccountSASSignatureValues object. */ public AccountSASSignatureValues startTime(OffsetDateTime startTime) { this.startTime = startTime; @@ -99,14 +110,17 @@ public AccountSASSignatureValues startTime(OffsetDateTime startTime) { } /** - * The time after which the SAS will no longer work. + * @return the time after which the SAS will no longer work. */ public OffsetDateTime expiryTime() { return expiryTime; } /** - * The time after which the SAS will no longer work. + * Sets the time after which the SAS will no longer work. + * + * @param expiryTime Expiry time to set + * @return the updated AccountSASSignatureValues object. */ public AccountSASSignatureValues expiryTime(OffsetDateTime expiryTime) { this.expiryTime = expiryTime; @@ -114,16 +128,19 @@ public AccountSASSignatureValues expiryTime(OffsetDateTime expiryTime) { } /** - * Specifies which operations the SAS user may perform. Please refer to {@link AccountSASPermission} for help - * constructing the permissions string. + * @return the operations the SAS user may perform. Please refer to {@link AccountSASPermission} to help determine + * which permissions are allowed. */ public String permissions() { return permissions; } /** - * Specifies which operations the SAS user may perform. Please refer to {@link AccountSASPermission} for help - * constructing the permissions string. + * Sets the operations the SAS user may perform. Please refer to {@link AccountSASPermission} for help constructing + * the permissions string. + * + * @param permissions Permissions string to set + * @return the updated AccountSASSignatureValues object. */ public AccountSASSignatureValues permissions(String permissions) { this.permissions = permissions; @@ -131,14 +148,17 @@ public AccountSASSignatureValues permissions(String permissions) { } /** - * {@link IPRange} + * @return the {@link IPRange} which determines the IP ranges that are allowed to use the SAS. */ public IPRange ipRange() { return ipRange; } /** - * {@link IPRange} + * Sets the {@link IPRange} which determines the IP ranges that are allowed to use the SAS. + * + * @param ipRange Allowed IP range to set + * @return the updated AccountSASSignatureValues object. */ public AccountSASSignatureValues ipRange(IPRange ipRange) { this.ipRange = ipRange; @@ -146,16 +166,18 @@ public AccountSASSignatureValues ipRange(IPRange ipRange) { } /** - * The values that indicate the services accessible with this SAS. Please refer to {@link AccountSASService} to - * construct this value. + * @return the services accessible with this SAS. Please refer to {@link AccountSASService} to help determine which + * services are accessible. */ public String services() { return services; } /** - * The values that indicate the services accessible with this SAS. Please refer to {@link AccountSASService} to - * construct this value. + * Sets the services accessible with this SAS. Please refer to {@link AccountSASService} to construct this value. + * + * @param services Allowed services string to set + * @return the updated AccountSASSignatureValues object. */ public AccountSASSignatureValues services(String services) { this.services = services; @@ -163,16 +185,19 @@ public AccountSASSignatureValues services(String services) { } /** - * The values that indicate the resource types accessible with this SAS. Please refer - * to {@link AccountSASResourceType} to construct this value. + * @return the resource types accessible with this SAS. Please refer to {@link AccountSASResourceType} to help + * determine the resource types that are accessible. */ public String resourceTypes() { return resourceTypes; } /** - * The values that indicate the resource types accessible with this SAS. Please refer - * to {@link AccountSASResourceType} to construct this value. + * Sets the resource types accessible with this SAS. Please refer to {@link AccountSASResourceType} to construct + * this value. + * + * @param resourceTypes Allowed resource types string to set + * @return the updated AccountSASSignatureValues object. */ public AccountSASSignatureValues resourceTypes(String resourceTypes) { this.resourceTypes = resourceTypes; diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AnonymousCredentialPolicy.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AnonymousCredentialPolicy.java deleted file mode 100644 index d5b1b30df030a..0000000000000 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/AnonymousCredentialPolicy.java +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob; - -import com.azure.core.http.HttpPipelineCallContext; -import com.azure.core.http.HttpPipelineNextPolicy; -import com.azure.core.http.HttpResponse; -import com.azure.core.http.policy.HttpPipelinePolicy; -import reactor.core.publisher.Mono; - -/** - * Anonymous credentials are to be used with with HTTP(S) requests that read blobs from public containers or requests - * that use a Shared Access Signature (SAS). This is because Anonymous credentials will not set an Authorization header. - * Pass an instance of this class as the credentials parameter when creating a new pipeline (typically with - * {@link BlobServiceClient}). - */ -public final class AnonymousCredentialPolicy implements HttpPipelinePolicy { - - /** - * Returns an empty instance of {@code AnonymousCredentials}. - */ - public AnonymousCredentialPolicy() { - } - - - - @Override - public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { - return next.process(); - } -} diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobClientBuilder.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobClientBuilder.java index 4467ea5f6a01a..a4a6d8b97f23a 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobClientBuilder.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobClientBuilder.java @@ -112,8 +112,6 @@ private AzureBlobStorageBuilder buildImpl() { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); - } else { - policies.add(new AnonymousCredentialPolicy()); } policies.add(new RequestRetryPolicy(retryOptions)); diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobConfiguration.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobConfiguration.java index ae1e422a24c0a..cc53933f79305 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobConfiguration.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobConfiguration.java @@ -3,6 +3,6 @@ package com.azure.storage.blob; class BlobConfiguration { - static final String NAME = "storage-blob"; - static final String VERSION = "1.0.0-SNAPSHOT"; + static final String NAME = "azure-storage-blob"; + static final String VERSION = "12.0.0-preview.2"; } diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobOutputStream.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobOutputStream.java index c733a580f2500..668f7a2c20663 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobOutputStream.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobOutputStream.java @@ -30,7 +30,7 @@ import java.util.concurrent.atomic.AtomicLongFieldUpdater; -public class BlobOutputStream extends OutputStream { +public final class BlobOutputStream extends OutputStream { /** * Holds the {@link BlobAccessConditions} object that represents the access conditions for the blob. */ diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobProperties.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobProperties.java index 321a7c061f387..307ebbc140e40 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobProperties.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobProperties.java @@ -8,7 +8,9 @@ import com.azure.storage.blob.models.BlobType; import com.azure.storage.blob.models.Metadata; -public class BlobProperties { +import java.time.OffsetDateTime; + +public final class BlobProperties { private final BlobType blobType; @@ -26,7 +28,7 @@ public class BlobProperties { private final String cacheControl; - //todo decide datetime representation for last modified time + private final OffsetDateTime lastModified; BlobProperties(BlobGetPropertiesHeaders generatedHeaders) { @@ -38,6 +40,7 @@ public class BlobProperties { this.contentDisposition = generatedHeaders.contentDisposition(); this.contentLanguage = generatedHeaders.contentLanguage(); this.cacheControl = generatedHeaders.cacheControl(); + this.lastModified = generatedHeaders.lastModified(); } @@ -96,4 +99,11 @@ public String contentLanguage() { public String cacheControl() { return cacheControl; } + + /** + * @return the time this blob was last modified + */ + public OffsetDateTime lastModified() { + return lastModified; + } } diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobSASPermission.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobSASPermission.java index 6dd79f092ecd3..24f4b0d0a96c4 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobSASPermission.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobSASPermission.java @@ -12,7 +12,7 @@ * {@link ServiceSASSignatureValues} object. It is possible to construct the permissions string without this class, but * the order of the permissions is particular and this class guarantees correctness. */ -final class BlobSASPermission { +public final class BlobSASPermission { private boolean read; @@ -27,7 +27,7 @@ final class BlobSASPermission { /** * Initializes a {@code BlobSASPermission} object with all fields set to false. */ - private BlobSASPermission() { + public BlobSASPermission() { } /** @@ -70,14 +70,17 @@ public static BlobSASPermission parse(String permString) { } /** - * Specifies Read access granted. + * @return the read permission status. */ public boolean read() { return read; } /** - * Specifies Read access granted. + * Sets the read permission status. + * + * @param read Permission status to set + * @return the updated BlobSASPermission object. */ public BlobSASPermission read(boolean read) { this.read = read; @@ -85,14 +88,17 @@ public BlobSASPermission read(boolean read) { } /** - * Specifies Add access granted. + * @return the add permission status. */ public boolean add() { return add; } /** - * Specifies Add access granted. + * Sets the add permission status. + * + * @param add Permission status to set + * @return the updated BlobSASPermission object. */ public BlobSASPermission add(boolean add) { this.add = add; @@ -100,14 +106,17 @@ public BlobSASPermission add(boolean add) { } /** - * Specifies Create access granted. + * @return the create permission status. */ public boolean create() { return create; } /** - * Specifies Create access granted. + * Sets the create permission status. + * + * @param create Permission status to set + * @return the updated BlobSASPermission object. */ public BlobSASPermission create(boolean create) { this.create = create; @@ -115,14 +124,17 @@ public BlobSASPermission create(boolean create) { } /** - * Specifies Write access granted. + * @return the write permission status. */ public boolean write() { return write; } /** - * Specifies Write access granted. + * Sets the write permission status. + * + * @param write Permission status to set + * @return the updated BlobSASPermission object. */ public BlobSASPermission write(boolean write) { this.write = write; @@ -130,14 +142,17 @@ public BlobSASPermission write(boolean write) { } /** - * Specifies Delete access granted. + * @return the delete permission status. */ public boolean delete() { return delete; } /** - * Specifies Delete access granted. + * Sets the delete permission status. + * + * @param delete Permission status to set + * @return the updated BlobSASPermission object. */ public BlobSASPermission delete(boolean delete) { this.delete = delete; diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobServiceClientBuilder.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobServiceClientBuilder.java index 114924273c6cf..461739d29865f 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobServiceClientBuilder.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobServiceClientBuilder.java @@ -96,8 +96,6 @@ private AzureBlobStorageBuilder buildImpl() { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); - } else { - policies.add(new AnonymousCredentialPolicy()); } policies.add(new RequestRetryPolicy(retryOptions)); diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerAsyncClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerAsyncClient.java index 67bd29d4d381d..3e06b364d7853 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerAsyncClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerAsyncClient.java @@ -681,7 +681,8 @@ private Flux listBlobsHierarchyHelper(String delimiter, ListBlobsOptio } else { prefixes = Flux.empty(); } - Flux result = blobs.map(item -> item.isPrefix(false)).concatWith(prefixes.map(prefix -> new BlobItem().name(prefix.name()).isPrefix(true))); + Flux result = blobs.map(item -> item.isPrefix(false)) + .concatWith(prefixes.map(prefix -> new BlobItem().name(prefix.name()).isPrefix(true))); if (response.value().nextMarker() != null) { // Recursively add the continuation items to the observable. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerClientBuilder.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerClientBuilder.java index 97a05be2ed254..22310e6215515 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerClientBuilder.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerClientBuilder.java @@ -102,8 +102,6 @@ private AzureBlobStorageBuilder buildImpl() { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); - } else { - policies.add(new AnonymousCredentialPolicy()); } policies.add(new RequestRetryPolicy(retryOptions)); diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerProperties.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerProperties.java index 9cada777b6236..9aae7ae17a8cd 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerProperties.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerProperties.java @@ -6,20 +6,23 @@ import com.azure.storage.blob.models.ContainerGetPropertiesHeaders; import com.azure.storage.blob.models.PublicAccessType; -public class ContainerProperties { +import java.time.OffsetDateTime; - private PublicAccessType blobPublicAccess; +public final class ContainerProperties { - private boolean hasImmutabilityPolicy; + private final PublicAccessType blobPublicAccess; - private boolean hasLegalHold; + private final boolean hasImmutabilityPolicy; - //todo decide datetime representation for last modified time + private final boolean hasLegalHold; + + private final OffsetDateTime lastModified; ContainerProperties(ContainerGetPropertiesHeaders generatedResponseHeaders) { this.blobPublicAccess = generatedResponseHeaders.blobPublicAccess(); this.hasImmutabilityPolicy = generatedResponseHeaders.hasImmutabilityPolicy(); this.hasLegalHold = generatedResponseHeaders.hasLegalHold(); + this.lastModified = generatedResponseHeaders.lastModified(); } /** @@ -42,4 +45,11 @@ public boolean hasImmutabilityPolicy() { public boolean hasLegalHold() { return hasLegalHold; } + + /** + * @return the time the container was last modified + */ + public OffsetDateTime lastModified() { + return lastModified; + } } diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerSASPermission.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerSASPermission.java index 0ac5e0f6738a3..c6031c9edd374 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerSASPermission.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ContainerSASPermission.java @@ -13,7 +13,7 @@ * {@link ServiceSASSignatureValues} object. It is possible to construct the permissions string without this class, but * the order of the permissions is particular and this class guarantees correctness. */ -final class ContainerSASPermission { +public final class ContainerSASPermission { private boolean read; private boolean add; @@ -29,7 +29,7 @@ final class ContainerSASPermission { /** * Initializes an {@code ContainerSASPermssion} object with all fields set to false. */ - private ContainerSASPermission() { + public ContainerSASPermission() { } /** @@ -75,14 +75,17 @@ public static ContainerSASPermission parse(String permString) { } /** - * Specifies Read access granted. + * @return the read permission status */ public boolean read() { return read; } /** - * Specifies Read access granted. + * Sets the read permission status. + * + * @param read Permission status to set + * @return the updated ContainerSASPermission object */ public ContainerSASPermission read(boolean read) { this.read = read; @@ -90,14 +93,17 @@ public ContainerSASPermission read(boolean read) { } /** - * Specifies Add access granted. + * @return the add permission status */ public boolean add() { return add; } /** - * Specifies Add access granted. + * Sets the add permission status. + * + * @param add Permission status to set + * @return the updated ContainerSASPermission object */ public ContainerSASPermission add(boolean add) { this.add = add; @@ -105,14 +111,17 @@ public ContainerSASPermission add(boolean add) { } /** - * Specifies Create access granted. + * @return the create permission status */ public boolean create() { return create; } /** - * Specifies Create access granted. + * Sets the create permission status. + * + * @param create Permission status to set + * @return the updated ContainerSASPermission object */ public ContainerSASPermission create(boolean create) { this.create = create; @@ -120,14 +129,17 @@ public ContainerSASPermission create(boolean create) { } /** - * Specifies Write access granted. + * @return the write permission status */ public boolean write() { return write; } /** - * Specifies Write access granted. + * Sets the write permission status. + * + * @param write Permission status to set + * @return the updated ContainerSASPermission object */ public ContainerSASPermission write(boolean write) { this.write = write; @@ -135,14 +147,17 @@ public ContainerSASPermission write(boolean write) { } /** - * Specifies Delete access granted. + * @return the delete permission status */ public boolean delete() { return delete; } /** - * Specifies Delete access granted. + * Sets the delete permission status. + * + * @param delete Permission status to set + * @return the updated ContainerSASPermission object */ public ContainerSASPermission delete(boolean delete) { this.delete = delete; @@ -150,14 +165,17 @@ public ContainerSASPermission delete(boolean delete) { } /** - * Specifies List access granted. + * @return the list permission status */ public boolean list() { return list; } /** - * Specifies List access granted. + * Sets the list permission status. + * + * @param list Permission status to set + * @return the updated ContainerSASPermission object */ public ContainerSASPermission list(boolean list) { this.list = list; diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/DownloadAsyncResponse.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/DownloadAsyncResponse.java index c27ad3645d207..819b85891b10d 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/DownloadAsyncResponse.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/DownloadAsyncResponse.java @@ -39,9 +39,8 @@ public final class DownloadAsyncResponse { // The constructor is package-private because customers should not be creating their own responses. - // TODO (unknown): resolve comment vs code mismatch DownloadAsyncResponse(ResponseBase> response, - HTTPGetterInfo info, Function> getter) { + HTTPGetterInfo info, Function> getter) { Utility.assertNotNull("getter", getter); Utility.assertNotNull("info", info); Utility.assertNotNull("info.eTag", info.eTag()); @@ -55,10 +54,8 @@ public final class DownloadAsyncResponse { * {@code options.maxRetryRequests > 0}. If retries are enabled, if a connection fails while reading, the stream * will make additional requests to reestablish a connection and continue reading. * - * @param options - * {@link ReliableDownloadOptions} - * - * @return A {@code Flux} which emits the data as {@code ByteBuffer}s. + * @param options {@link ReliableDownloadOptions} + * @return A {@link Flux} which emits the data as {@link ByteBuf ByteBufs} */ public Flux body(ReliableDownloadOptions options) { ReliableDownloadOptions optionsReal = options == null ? new ReliableDownloadOptions() : options; @@ -86,14 +83,11 @@ possible the method call that returns a Single is what throws (like how our apis call time rather than at subscription time. */ try { - // Get a new response and try reading from it. - return getter.apply(this.info) - .flatMapMany(response -> - /* - Do not compound the number of retries by passing in another set of downloadOptions; just get - the raw body. - */ - this.applyReliableDownload(this.rawResponse.value(), retryCount, options)); + /*Get a new response and try reading from it. + Do not compound the number of retries by passing in another set of downloadOptions; just get + the raw body. + */ + return getter.apply(this.info).flatMapMany(response -> this.applyReliableDownload(this.rawResponse.value(), retryCount, options)); } catch (Exception e) { // If the getter fails, return the getter failure to the user. return Flux.error(e); @@ -101,23 +95,20 @@ possible the method call that returns a Single is what throws (like how our apis } } - private Flux applyReliableDownload(Flux data, - int currentRetryCount, ReliableDownloadOptions options) { - return data - .doOnNext(buffer -> { - /* - Update how much data we have received in case we need to retry and propagate to the user the data we - have received. - */ - this.info.offset(this.info.offset() + buffer.readableBytes()); // was `remaining()` in Rx world - if (this.info.count() != null) { - this.info.count(this.info.count() - buffer.readableBytes()); // was `remaining()` in Rx world - } - }) - .onErrorResume(t2 -> { - // Increment the retry count and try again with the new exception. - return tryContinueFlux(t2, currentRetryCount + 1, options); - }); + private Flux applyReliableDownload(Flux data, int currentRetryCount, ReliableDownloadOptions options) { + return data.doOnNext(buffer -> { + /* + Update how much data we have received in case we need to retry and propagate to the user the data we + have received. + */ + this.info.offset(this.info.offset() + buffer.readableBytes()); // was `remaining()` in Rx world + if (this.info.count() != null) { + this.info.count(this.info.count() - buffer.readableBytes()); // was `remaining()` in Rx world + } + }).onErrorResume(t2 -> { + // Increment the retry count and try again with the new exception. + return tryContinueFlux(t2, currentRetryCount + 1, options); + }); } /** diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/DownloadResponse.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/DownloadResponse.java index 7acc6427dcf87..11e90ba09daf5 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/DownloadResponse.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/DownloadResponse.java @@ -33,7 +33,6 @@ public void body(OutputStream outputStream, ReliableDownloadOptions options) thr //TODO (unknown): determine signature(s) to use /*public InputStream body(ReliableDownloadOptions options) { return new InputStream() { - DownloadAsyncResponse response = asyncResponse; @Override public int read() throws IOException { diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/HTTPGetterInfo.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/HTTPGetterInfo.java index f868d8061d8df..6393d0a1ebbb5 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/HTTPGetterInfo.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/HTTPGetterInfo.java @@ -11,7 +11,7 @@ * HTTPGetterInfo is a passed to the getter function of a reliable download to specify parameters needed for the GET * request. */ -final class HTTPGetterInfo { +public final class HTTPGetterInfo { private long offset = 0; private Long count = null; @@ -19,14 +19,17 @@ final class HTTPGetterInfo { private String eTag = null; /** - * The start offset that should be used when creating the HTTP GET request's Range header. Defaults to 0. + * @return the start offset used when creating the Range header. Defaults to 0. */ public long offset() { return offset; } /** - * The start offset that should be used when creating the HTTP GET request's Range header. Defaults to 0. + * Sets the start offset that is used when creating the Range header. If unchanged this will default to 0. + * + * @param offset Start offset + * @return the updated HTTPGetterInfo object */ public HTTPGetterInfo offset(long offset) { this.offset = offset; @@ -34,16 +37,19 @@ public HTTPGetterInfo offset(long offset) { } /** - * The count of bytes that should be used to calculate the end offset when creating the HTTP GET request's Range - * header. {@code} null is the default and indicates that the entire rest of the blob should be retrieved. + * @return the count of bytes used to calculate the end offset when creating the Range header. {@code} null is the + * default and indicates that the entire rest of the blob should be retrieved. */ public Long count() { return count; } /** - * The count of bytes that should be used to calculate the end offset when creating the HTTP GET request's Range - * header. {@code} null is the default and indicates that the entire rest of the blob should be retrieved. + * Sets the count of bytes used to calculate the end offset when creating the Range header. {@code} null is the + * default and indicates that the entire rest of the blob should be retrieved. + * + * @param count Count of bytes + * @return the updated HTTPGetterInfo object */ public HTTPGetterInfo count(Long count) { if (count != null) { @@ -54,18 +60,21 @@ public HTTPGetterInfo count(Long count) { } /** - * The resource's etag that should be used when creating the HTTP GET request's If-Match header. Note that the - * Etag is returned with any operation that modifies the resource and by a call to {@link - * BlobClient#getProperties(BlobAccessConditions, Duration)}. Defaults to null. + * @return the eTag used when creating If-Match header. eTag is returned with any operation that modifies the + * resource and when retrieving {@link BlobClient#getProperties(BlobAccessConditions, Duration) properties}. + * Defaults to null. */ public String eTag() { return eTag; } /** - * The resource's etag that should be used when creating the HTTP GET request's If-Match header. Note that the - * Etag is returned with any operation that modifies the resource and by a call to {@link - * BlobClient#getProperties(BlobAccessConditions, Duration)}. Defaults to null. + * Sets the eTag used when creating If-Match header. eTag is returned with any operation that modifies the + * resource and when retrieving {@link BlobClient#getProperties(BlobAccessConditions, Duration) properties}. + * Defaults to null. + * + * @param eTag Resource's eTag + * @return the updated HTTPGetterInfo object */ public HTTPGetterInfo eTag(String eTag) { this.eTag = eTag; diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/IPRange.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/IPRange.java index 24665c7aaedda..9e66189fa904e 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/IPRange.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/IPRange.java @@ -8,13 +8,16 @@ * set if it is not desired to confine the sas permissions to an IP range. Please refer to * {@link AccountSASSignatureValues} or {@link ServiceSASSignatureValues} for more information. */ -final class IPRange { +public final class IPRange { private String ipMin; private String ipMax; - IPRange() { + /** + * Constructs an empty IPRange. + */ + public IPRange() { } /** @@ -36,14 +39,17 @@ public static IPRange parse(String rangeStr) { } /** - * The minimum IP address of the range. + * @return the minimum IP address of the range. */ public String ipMin() { return ipMin; } /** - * The minimum IP address of the range. + * Sets the minimum IP address of the range. + * + * @param ipMin Minimum IP of the range + * @return the updated IPRange object */ public IPRange ipMin(String ipMin) { this.ipMin = ipMin; @@ -51,14 +57,18 @@ public IPRange ipMin(String ipMin) { } /** - * The maximum IP address of the range. + * + * @return the maximum IP address of the range. */ public String ipMax() { return ipMax; } /** - * The maximum IP address of the range. + * Sets the maximum IP address of the range. + * + * @param ipMax Maximum IP of the range + * @return the updated IPRange object */ public IPRange ipMax(String ipMax) { this.ipMax = ipMax; diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/IProgressReceiver.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/IProgressReceiver.java index ab336e1862892..70d85fe58090f 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/IProgressReceiver.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/IProgressReceiver.java @@ -3,20 +3,20 @@ package com.azure.storage.blob; +import reactor.core.publisher.Flux; + /** * An {@code IProgressReceiver} is an object that can be used to report progress on network transfers. When specified on * transfer operations, the {@code reportProgress} method will be called periodically with the total number of bytes - * transferred. The user may configure this method to report progress in whatever format desired. It is recommended - * that this type be used in conjunction with - * {@link ProgressReporter#addProgressReporting(reactor.core.publisher.Flux, IProgressReceiver)}. + * transferred. The user may configure this method to report progress in wha tever format desired. It is recommended + * that this type be used in conjunction with {@link ProgressReporter#addProgressReporting(Flux, IProgressReceiver)}. */ interface IProgressReceiver { /** * The callback function invoked as progress is reported. * - * @param bytesTransferred - * The total number of bytes transferred during this transaction. + * @param bytesTransferred The total number of bytes transferred during this transaction. */ void reportProgress(long bytesTransferred); } diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/PageBlobAsyncClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/PageBlobAsyncClient.java index 3f5467a7a6a7b..82e78f762af99 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/PageBlobAsyncClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/PageBlobAsyncClient.java @@ -16,6 +16,7 @@ import com.azure.storage.blob.models.ModifiedAccessConditions; import com.azure.storage.blob.models.PageBlobAccessConditions; import com.azure.storage.blob.models.PageBlobItem; +import com.azure.storage.blob.models.PageList; import com.azure.storage.blob.models.PageRange; import com.azure.storage.blob.models.SequenceNumberActionType; import com.azure.storage.blob.models.SourceModifiedAccessConditions; @@ -336,7 +337,7 @@ public Mono> clearPages(PageRange pageRange, * @return * A reactive response containing the information of the cleared pages. */ - public Flux getPageRanges(BlobRange blobRange) { + public Mono> getPageRanges(BlobRange blobRange) { return this.getPageRanges(blobRange, null); } @@ -352,7 +353,7 @@ public Flux getPageRanges(BlobRange blobRange) { * @return * A reactive response emitting all the page ranges. */ - public Flux getPageRanges(BlobRange blobRange, BlobAccessConditions accessConditions) { + public Mono> getPageRanges(BlobRange blobRange, BlobAccessConditions accessConditions) { blobRange = blobRange == null ? new BlobRange(0) : blobRange; accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; @@ -360,7 +361,7 @@ public Flux getPageRanges(BlobRange blobRange, BlobAccessConditions a null, null, snapshot, null, null, blobRange.toHeaderValue(), null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), Context.NONE)) - .flatMapMany(response -> Flux.fromIterable(response.value().pageRange())); + .map(response -> new SimpleResponse<>(response, response.value())); } /** @@ -377,7 +378,7 @@ public Flux getPageRanges(BlobRange blobRange, BlobAccessConditions a * @return * A reactive response emitting all the different page ranges. */ - public Flux getPageRangesDiff(BlobRange blobRange, String prevSnapshot) { + public Mono> getPageRangesDiff(BlobRange blobRange, String prevSnapshot) { return this.getPageRangesDiff(blobRange, prevSnapshot, null); } @@ -397,7 +398,7 @@ public Flux getPageRangesDiff(BlobRange blobRange, String prevSnapsho * @return A reactive response emitting all the different page ranges. * @throws IllegalArgumentException If {@code prevSnapshot} is {@code null} */ - public Flux getPageRangesDiff(BlobRange blobRange, String prevSnapshot, BlobAccessConditions accessConditions) { + public Mono> getPageRangesDiff(BlobRange blobRange, String prevSnapshot, BlobAccessConditions accessConditions) { blobRange = blobRange == null ? new BlobRange(0) : blobRange; accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; @@ -409,7 +410,7 @@ public Flux getPageRangesDiff(BlobRange blobRange, String prevSnapsho null, null, snapshot, null, null, prevSnapshot, blobRange.toHeaderValue(), null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), Context.NONE)) - .flatMapMany(response -> Flux.fromIterable(response.value().pageRange())); + .map(response -> new SimpleResponse<>(response, response.value())); } /** diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/PageBlobClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/PageBlobClient.java index fc84f9c9ea211..14fdb39d57022 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/PageBlobClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/PageBlobClient.java @@ -12,6 +12,7 @@ import com.azure.storage.blob.models.ModifiedAccessConditions; import com.azure.storage.blob.models.PageBlobAccessConditions; import com.azure.storage.blob.models.PageBlobItem; +import com.azure.storage.blob.models.PageList; import com.azure.storage.blob.models.PageRange; import com.azure.storage.blob.models.SequenceNumberActionType; import com.azure.storage.blob.models.SourceModifiedAccessConditions; @@ -325,7 +326,7 @@ public Response clearPages(PageRange pageRange, * @return * The information of the cleared pages. */ - public Iterable getPageRanges(BlobRange blobRange) { + public Response getPageRanges(BlobRange blobRange) { return this.getPageRanges(blobRange, null, null); } @@ -343,10 +344,8 @@ public Iterable getPageRanges(BlobRange blobRange) { * @return * All the page ranges. */ - public Iterable getPageRanges(BlobRange blobRange, - BlobAccessConditions accessConditions, Duration timeout) { - Flux response = pageBlobAsyncClient.getPageRanges(blobRange, accessConditions); - return timeout == null ? response.toIterable() : response.timeout(timeout).toIterable(); + public Response getPageRanges(BlobRange blobRange, BlobAccessConditions accessConditions, Duration timeout) { + return Utility.blockWithOptionalTimeout(pageBlobAsyncClient.getPageRanges(blobRange, accessConditions), timeout); } /** @@ -363,7 +362,7 @@ public Iterable getPageRanges(BlobRange blobRange, * @return * All the different page ranges. */ - public Iterable getPageRangesDiff(BlobRange blobRange, String prevSnapshot) { + public Response getPageRangesDiff(BlobRange blobRange, String prevSnapshot) { return this.getPageRangesDiff(blobRange, prevSnapshot, null, null); } @@ -385,10 +384,8 @@ public Iterable getPageRangesDiff(BlobRange blobRange, String prevSna * @return * All the different page ranges. */ - public Iterable getPageRangesDiff(BlobRange blobRange, String prevSnapshot, - BlobAccessConditions accessConditions, Duration timeout) { - Flux response = pageBlobAsyncClient.getPageRangesDiff(blobRange, prevSnapshot, accessConditions); - return timeout == null ? response.toIterable() : response.timeout(timeout).toIterable(); + public Response getPageRangesDiff(BlobRange blobRange, String prevSnapshot, BlobAccessConditions accessConditions, Duration timeout) { + return Utility.blockWithOptionalTimeout(pageBlobAsyncClient.getPageRangesDiff(blobRange, prevSnapshot, accessConditions), timeout); } /** diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ProgressReporter.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ProgressReporter.java index fc882d4c30aa4..4ac0ce9cda2f3 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ProgressReporter.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ProgressReporter.java @@ -11,7 +11,7 @@ import java.util.concurrent.locks.Lock; /** - * {@code ProgressReporterImpl} offers a convenient way to add progress tracking to a given Flowable. + * {@code ProgressReporter} offers a convenient way to add progress tracking to a given Flux. */ final class ProgressReporter { @@ -35,29 +35,28 @@ void rewindProgress() { } Flux addProgressReporting(Flux data) { - return Mono.just(this) - .flatMapMany(progressReporter -> { - /* - Each time there is a new subscription, we will rewind the progress. This is desirable specifically - for retries, which resubscribe on each try. The first time this flowable is subscribed to, the - rewind will be a noop as there will have been no progress made. Subsequent rewinds will work as - expected. - */ - progressReporter.rewindProgress(); - /* - Every time we emit some data, report it to the Tracker, which will pass it on to the end user. - */ - return data.doOnNext(buffer -> - progressReporter.reportProgress(buffer.remaining())); - }); + return Mono.just(this).flatMapMany(progressReporter -> { + /* + Each time there is a new subscription, we will rewind the progress. This is desirable specifically + for retries, which resubscribe on each try. The first time this flowable is subscribed to, the + rewind will be a noop as there will have been no progress made. Subsequent rewinds will work as + expected. + */ + progressReporter.rewindProgress(); + + /* + Every time we emit some data, report it to the Tracker, which will pass it on to the end user. + */ + return data.doOnNext(buffer -> progressReporter.reportProgress(buffer.remaining())); + }); } } /** - * This type is used to keep track of the total amount of data transferred for a single request. This is the type - * we will use when the customer uses the factory to add progress reporting to their Flowable. We need this - * additional type because we can't keep local state directly as lambdas require captured local variables to be - * effectively final. + * This type is used to keep track of the total amount of data transferred for a single request. This is the type we + * will use when the customer uses the factory to add progress reporting to their Flowable. We need this additional + * type because we can't keep local state directly as lambdas require captured local variables to be effectively + * final. */ private static class SequentialProgressReporter extends ProgressReporterImpl { SequentialProgressReporter(IProgressReceiver progressReceiver) { @@ -129,24 +128,19 @@ public void rewindProgress() { } /** - * Adds progress reporting functionality to the given {@code Flux}. Each subscription (and therefore each - * retry) will rewind the progress reported so as not to over-report. The data reported will be the total amount - * of data emitted so far, or the "current position" of the Flowable. + * Adds progress reporting functionality to the given {@code Flux}. Each subscription (and therefore each retry) + * will rewind the progress reported so as not to over-report. The data reported will be the total amount of data + * emitted so far, or the "current position" of the Flowable. * - * @param data - * The data whose transfer progress is to be tracked. - * @param progressReceiver - * {@link IProgressReceiver} - * - * @return A {@code Flux} that emits the same data as the source but calls a callback to report the total amount - * of data emitted so far. - * - * @apiNote ## Sample Code \n - * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=progress "Sample code for ProgressReporterFactor.addProgressReporting")] \n - * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + * @param data The data whose transfer progress is to be tracked. + * @param progressReceiver {@link IProgressReceiver} + * @return A {@code Flux} that emits the same data as the source but calls a callback to report the total amount of + * data emitted so far. + * @apiNote ## Sample Code \n [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=progress + * "Sample code for ProgressReporterFactor.addProgressReporting")] \n For more samples, please see the [Samples + * file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) */ - public static Flux addProgressReporting(Flux data, - IProgressReceiver progressReceiver) { + public static Flux addProgressReporting(Flux data, IProgressReceiver progressReceiver) { if (progressReceiver == null) { return data; } else { @@ -155,8 +149,7 @@ public static Flux addProgressReporting(Flux data, } } - static Flux addParallelProgressReporting(Flux data, - IProgressReceiver progressReceiver, Lock lock, AtomicLong totalProgress) { + static Flux addParallelProgressReporting(Flux data, IProgressReceiver progressReceiver, Lock lock, AtomicLong totalProgress) { if (progressReceiver == null) { return data; } else { diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/SASProtocol.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/SASProtocol.java index 64df03714ecb5..475e6f4289f6f 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/SASProtocol.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/SASProtocol.java @@ -9,7 +9,7 @@ * Specifies the set of possible permissions for a shared access signature protocol. Values of this type can be used * to set the fields on the {@link AccountSASSignatureValues} and {@link ServiceSASSignatureValues} types. */ -enum SASProtocol { +public enum SASProtocol { /** * Permission to use SAS only through https granted. */ diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/SASQueryParameters.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/SASQueryParameters.java index 399e7ea1b6e63..7dead915f796d 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/SASQueryParameters.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/SASQueryParameters.java @@ -14,10 +14,10 @@ /** * Represents the components that make up an Azure Storage SAS' query parameters. This type is not constructed directly * by the user; it is only generated by the {@link AccountSASSignatureValues} and {@link ServiceSASSignatureValues} - * types. Once generated, it can be set on a {@link BlobURLParts} object to be constructed as part of a URL or it can - * be encoded into a {@code String} and appended to a URL directly (though caution should be taken here in case there - * are existing query parameters, which might affect the appropriate means of appending these query parameters). - * NOTE: Instances of this class are immutable to ensure thread safety. + * types. Once generated, it can be set on a {@link BlobURLParts} object to be constructed as part of a URL or it can be + * encoded into a {@code String} and appended to a URL directly (though caution should be taken here in case there are + * existing query parameters, which might affect the appropriate means of appending these query parameters). NOTE: + * Instances of this class are immutable to ensure thread safety. */ public final class SASQueryParameters { @@ -69,9 +69,10 @@ public final class SASQueryParameters { * Creates a new {@link SASQueryParameters} object. * * @param queryParamsMap All query parameters for the request as key-value pairs - * @param removeSASParametersFromMap When {@code true}, the SAS query parameters will be removed from queryParamsMap + * @param removeSASParametersFromMap When {@code true}, the SAS query parameters will be removed from + * queryParamsMap */ - SASQueryParameters(Map queryParamsMap, boolean removeSASParametersFromMap) { + public SASQueryParameters(Map queryParamsMap, boolean removeSASParametersFromMap) { this.version = getQueryParameter(queryParamsMap, Constants.UrlConstants.SAS_SERVICE_VERSION, removeSASParametersFromMap); this.services = getQueryParameter(queryParamsMap, Constants.UrlConstants.SAS_SERVICES, removeSASParametersFromMap); this.resourceTypes = getQueryParameter(queryParamsMap, Constants.UrlConstants.SAS_RESOURCES_TYPES, removeSASParametersFromMap); @@ -115,36 +116,27 @@ private T getQueryParameter(Map parameters, String name, B /** - * Creates a new {@link SASQueryParameters} object. These objects are only created internally by - * *SASSignatureValues classes. + * Creates a new {@link SASQueryParameters} object. These objects are only created internally by SASSignatureValues + * classes. * - * @param version - * A {@code String} representing the storage version. - * @param services - * A {@code String} representing the storage services being accessed (only for Account SAS). - * @param resourceTypes - * A {@code String} representing the storage resource types being accessed (only for Account SAS). - * @param protocol - * A {@code String} representing the allowed HTTP protocol(s) or {@code null}. - * @param startTime - * A {@code java.util.Date} representing the start time for this SAS token or {@code null}. - * @param expiryTime - * A {@code java.util.Date} representing the expiry time for this SAS token. - * @param ipRange - * A {@link IPRange} representing the range of valid IP addresses for this SAS token or {@code null}. - * @param identifier - * A {@code String} representing the signed identifier (only for Service SAS) or {@code null}. - * @param resource - * A {@code String} representing the storage container or blob (only for Service SAS). - * @param permissions - * A {@code String} representing the storage permissions or {@code null}. - * @param signature - * A {@code String} representing the signature for the SAS token. + * @param version A {@code String} representing the storage version. + * @param services A {@code String} representing the storage services being accessed (only for Account SAS). + * @param resourceTypes A {@code String} representing the storage resource types being accessed (only for Account + * SAS). + * @param protocol A {@code String} representing the allowed HTTP protocol(s) or {@code null}. + * @param startTime A {@code java.util.Date} representing the start time for this SAS token or {@code null}. + * @param expiryTime A {@code java.util.Date} representing the expiry time for this SAS token. + * @param ipRange A {@link IPRange} representing the range of valid IP addresses for this SAS token or {@code + * null}. + * @param identifier A {@code String} representing the signed identifier (only for Service SAS) or {@code null}. + * @param resource A {@code String} representing the storage container or blob (only for Service SAS). + * @param permissions A {@code String} representing the storage permissions or {@code null}. + * @param signature A {@code String} representing the signature for the SAS token. */ SASQueryParameters(String version, String services, String resourceTypes, SASProtocol protocol, - OffsetDateTime startTime, OffsetDateTime expiryTime, IPRange ipRange, String identifier, - String resource, String permissions, String signature, String cacheControl, String contentDisposition, - String contentEncoding, String contentLanguage, String contentType, UserDelegationKey key) { + OffsetDateTime startTime, OffsetDateTime expiryTime, IPRange ipRange, String identifier, + String resource, String permissions, String signature, String cacheControl, String contentDisposition, + String contentEncoding, String contentLanguage, String contentType, UserDelegationKey key) { this.version = version; this.services = services; @@ -196,8 +188,8 @@ public String services() { } /** - * @return The storage resource types being accessed (only for Account SAS). Please refer to - * {@link AccountSASResourceType} for more details. + * @return The storage resource types being accessed (only for Account SAS). Please refer to {@link + * AccountSASResourceType} for more details. */ public String resourceTypes() { return resourceTypes; @@ -248,8 +240,8 @@ public String resource() { } /** - * @return Please refer to {@link AccountSASPermission}, {@link BlobSASPermission}, or {@link ContainerSASPermission} - * for more details. + * @return Please refer to {@link AccountSASPermission}, {@link BlobSASPermission}, or {@link + * ContainerSASPermission} for more details. */ public String permissions() { return permissions; @@ -341,12 +333,12 @@ public String keyVersion() { UserDelegationKey userDelegationKey() { return new UserDelegationKey() - .signedExpiry(this.keyExpiry) - .signedOid(this.keyOid) - .signedService(this.keyService) - .signedStart(this.keyStart) - .signedTid(this.keyTid) - .signedVersion(this.keyVersion); + .signedExpiry(this.keyExpiry) + .signedOid(this.keyOid) + .signedService(this.keyService) + .signedStart(this.keyStart) + .signedTid(this.keyTid) + .signedVersion(this.keyVersion); } private void tryAppendQueryParameter(StringBuilder sb, String param, Object value) { diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ServiceSASSignatureValues.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ServiceSASSignatureValues.java index f6b4767cada56..9fd8eaa2fc64a 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ServiceSASSignatureValues.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/ServiceSASSignatureValues.java @@ -24,9 +24,9 @@ * Please see here for * more details on each value, including which are required. * - * @apiNote ## Sample Code \n [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_sas - * "Sample code for ServiceSASSignatureValues")] \n For more samples, please see the [Samples - * file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + *

Please see + * here + * for additional samples.

*/ final class ServiceSASSignatureValues { @@ -69,8 +69,8 @@ final class ServiceSASSignatureValues { /** * Creates an object with the specified expiry time and permissions * - * @param expiryTime - * @param permissions + * @param expiryTime Time the SAS becomes valid + * @param permissions Permissions granted by the SAS */ ServiceSASSignatureValues(OffsetDateTime expiryTime, String permissions) { this.expiryTime = expiryTime; @@ -80,7 +80,7 @@ final class ServiceSASSignatureValues { /** * Creates an object with the specified identifier * - * @param identifier + * @param identifier Identifier for the SAS */ ServiceSASSignatureValues(String identifier) { this.identifier = identifier; @@ -106,16 +106,19 @@ final class ServiceSASSignatureValues { } /** - * The version of the service this SAS will target. If not specified, it will default to the version targeted by the - * library. + * @return the version of the service this SAS will target. If not specified, it will default to the version targeted + * by the library. */ public String version() { return version; } /** - * The version of the service this SAS will target. If not specified, it will default to the version targeted by the - * library. + * Sets the version of the service this SAS will target. If not specified, it will default to the version targeted + * by the library. + * + * @param version Version to target + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues version(String version) { this.version = version; @@ -123,14 +126,17 @@ public ServiceSASSignatureValues version(String version) { } /** - * {@link SASProtocol} + * @return the {@link SASProtocol} which determines the protocols allowed by the SAS. */ public SASProtocol protocol() { return protocol; } /** - * {@link SASProtocol} + * Sets the {@link SASProtocol} which determines the protocols allowed by the SAS. + * + * @param protocol Protocol for the SAS + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues protocol(SASProtocol protocol) { this.protocol = protocol; @@ -138,14 +144,17 @@ public ServiceSASSignatureValues protocol(SASProtocol protocol) { } /** - * When the SAS will take effect. + * @return when the SAS will take effect. */ public OffsetDateTime startTime() { return startTime; } /** - * When the SAS will take effect. + * Sets when the SAS will take effect. + * + * @param startTime When the SAS takes effect + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues startTime(OffsetDateTime startTime) { this.startTime = startTime; @@ -153,14 +162,17 @@ public ServiceSASSignatureValues startTime(OffsetDateTime startTime) { } /** - * The time after which the SAS will no longer work. + * @return the time after which the SAS will no longer work. */ public OffsetDateTime expiryTime() { return expiryTime; } /** - * The time after which the SAS will no longer work. + * Sets the time after which the SAS will no longer work. + * + * @param expiryTime When the SAS will no longer work + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues expiryTime(OffsetDateTime expiryTime) { this.expiryTime = expiryTime; @@ -168,16 +180,19 @@ public ServiceSASSignatureValues expiryTime(OffsetDateTime expiryTime) { } /** - * Please refer to either {@link ContainerSASPermission} or {@link BlobSASPermission} depending on the resource - * being accessed for help constructing the permissions string. + * @return the permissions string allowed by the SAS. Please refer to either {@link ContainerSASPermission} or + * {@link BlobSASPermission} depending on the resource being accessed for help determining the pernissions allowed. */ public String permissions() { return permissions; } /** - * Please refer to either {@link ContainerSASPermission} or {@link BlobSASPermission} depending on the resource - * being accessed for help constructing the permissions string. + * Sets the permissions string allowed by the SAS. Please refer to either {@link ContainerSASPermission} or + * {@link BlobSASPermission} depending on the resource being accessed for help constructing the permissions string. + * + * @param permissions Permissions string for the SAS + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues permissions(String permissions) { this.permissions = permissions; @@ -185,14 +200,17 @@ public ServiceSASSignatureValues permissions(String permissions) { } /** - * {@link IPRange} + * @return the {@link IPRange} which determines the IP ranges that are allowed to use the SAS. */ public IPRange ipRange() { return ipRange; } /** - * {@link IPRange} + * Sets the {@link IPRange} which determines the IP ranges that are allowed to use the SAS. + * + * @param ipRange Allowed IP range to set + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues ipRange(IPRange ipRange) { this.ipRange = ipRange; @@ -200,14 +218,17 @@ public ServiceSASSignatureValues ipRange(IPRange ipRange) { } /** - * The resource the SAS user may access. + * @return the resource the SAS user may access. */ public String resource() { return resource; } /** - * The resource the SAS user may access. + * Sets the resource the SAS user may access. + * + * @param resource Allowed resources string to set + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues resource(String resource) { this.resource = resource; @@ -215,14 +236,17 @@ public ServiceSASSignatureValues resource(String resource) { } /** - * The canonical name of the object the SAS user may access. + * @return the canonical name of the object the SAS user may access. */ public String canonicalName() { return canonicalName; } /** - * The canonical name of the object the SAS user may access. + * Sets the canonical name of the object the SAS user may access. + * + * @param canonicalName Canonical name of the object the SAS grants access + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues canonicalName(String canonicalName) { this.canonicalName = canonicalName; @@ -230,34 +254,38 @@ public ServiceSASSignatureValues canonicalName(String canonicalName) { } /** - * The canonical name of the object the SAS user may access. + * Sets the canonical name of the object the SAS user may access. Constructs a canonical name of + * "/blob/{accountName}{Path of urlString}". * - * @throws RuntimeException If urlString is a malformed URL. + * @param urlString URL string that contains the path to the object + * @param accountName Name of the account that contains the object + * @return the updated ServiceSASSignatureValues object + * @throws RuntimeException If {@code urlString} is a malformed URL. */ public ServiceSASSignatureValues canonicalName(String urlString, String accountName) { - URL url = null; + URL url; try { url = new URL(urlString); } catch (MalformedURLException e) { throw new RuntimeException(e); } - StringBuilder canonicalName = new StringBuilder("/blob"); - canonicalName.append('/').append(accountName).append(url.getPath()); - this.canonicalName = canonicalName.toString(); - + this.canonicalName = String.format("/blob/%s%s", accountName, url.getPath()); return this; } /** - * The specific snapshot the SAS user may access. + * @return the specific snapshot the SAS user may access. */ public String snapshotId() { return this.snapshotId; } /** - * The specific snapshot the SAS user may access. + * Sets the specific snapshot the SAS user may access. + * + * @param snapshotId Identifier of the snapshot + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues snapshotId(String snapshotId) { this.snapshotId = snapshotId; @@ -265,7 +293,7 @@ public ServiceSASSignatureValues snapshotId(String snapshotId) { } /** - * The name of the access policy on the container this SAS references if any. Please see + * @return the name of the access policy on the container this SAS references if any. Please see * here * for more information. */ @@ -274,9 +302,12 @@ public String identifier() { } /** - * The name of the access policy on the container this SAS references if any. Please see + * Sets the name of the access policy on the container this SAS references if any. Please see * here * for more information. + * + * @param identifier Name of the access policy + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues identifier(String identifier) { this.identifier = identifier; @@ -284,14 +315,17 @@ public ServiceSASSignatureValues identifier(String identifier) { } /** - * The cache-control header for the SAS. + * @return the cache-control header for the SAS. */ public String cacheControl() { return cacheControl; } /** - * The cache-control header for the SAS. + * Sets the cache-control header for the SAS. + * + * @param cacheControl Cache-Control header value + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues cacheControl(String cacheControl) { this.cacheControl = cacheControl; @@ -299,14 +333,17 @@ public ServiceSASSignatureValues cacheControl(String cacheControl) { } /** - * The content-disposition header for the SAS. + * @return the content-disposition header for the SAS. */ public String contentDisposition() { return contentDisposition; } /** - * The content-disposition header for the SAS. + * Sets the content-disposition header for the SAS. + * + * @param contentDisposition Content-Disposition header value + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues contentDisposition(String contentDisposition) { this.contentDisposition = contentDisposition; @@ -314,14 +351,17 @@ public ServiceSASSignatureValues contentDisposition(String contentDisposition) { } /** - * The content-encoding header for the SAS. + * @return the content-encoding header for the SAS. */ public String contentEncoding() { return contentEncoding; } /** - * The content-encoding header for the SAS. + * Sets the content-encoding header for the SAS. + * + * @param contentEncoding Content-Encoding header value + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues contentEncoding(String contentEncoding) { this.contentEncoding = contentEncoding; @@ -329,14 +369,17 @@ public ServiceSASSignatureValues contentEncoding(String contentEncoding) { } /** - * The content-language header for the SAS. + * @return the content-language header for the SAS. */ public String contentLanguage() { return contentLanguage; } /** - * The content-language header for the SAS. + * Sets the content-language header for the SAS. + * + * @param contentLanguage Content-Language header value + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues contentLanguage(String contentLanguage) { this.contentLanguage = contentLanguage; @@ -344,14 +387,17 @@ public ServiceSASSignatureValues contentLanguage(String contentLanguage) { } /** - * The content-type header for the SAS. + * @return the content-type header for the SAS. */ public String contentType() { return contentType; } /** - * The content-type header for the SAS. + * Sets the content-type header for the SAS. + * + * @param contentType Content-Type header value + * @return the updated ServiceSASSignatureValues object */ public ServiceSASSignatureValues contentType(String contentType) { this.contentType = contentType; @@ -373,7 +419,7 @@ public SASQueryParameters generateSASQueryParameters(SharedKeyCredential sharedK // Signature is generated on the un-url-encoded values. final String stringToSign = stringToSign(); - String signature = null; + String signature; try { signature = sharedKeyCredentials.computeHmac256(stringToSign); } catch (InvalidKeyException e) { @@ -400,7 +446,7 @@ public SASQueryParameters generateSASQueryParameters(UserDelegationKey delegatio // Signature is generated on the un-url-encoded values. final String stringToSign = stringToSign(delegationKey); - String signature = null; + String signature; try { signature = Utility.delegateComputeHmac256(delegationKey, stringToSign); } catch (InvalidKeyException e) { @@ -420,21 +466,17 @@ private void assertGenerateOK(boolean usingUserDelegation) { Utility.assertNotNull("version", this.version); Utility.assertNotNull("canonicalName", this.canonicalName); - // Ensure either (expiryTime and permissions) or (identifier) is set - if (this.expiryTime == null || this.permissions == null) { - // Identifier is not required if user delegation is being used - if (!usingUserDelegation) { - Utility.assertNotNull("identifier", this.identifier); - } - } else { + // If a UserDelegation key or a SignedIdentifier is not being used both expiryDate and permissions must be set. + if (usingUserDelegation || identifier == null) { Utility.assertNotNull("expiryTime", this.expiryTime); Utility.assertNotNull("permissions", this.permissions); + } else if (!usingUserDelegation) { + // Otherwise a SignedIdentifier must be used. + Utility.assertNotNull("identifier", this.identifier); } - if (this.resource != null && this.resource.equals(Constants.UrlConstants.SAS_CONTAINER_CONSTANT)) { - if (this.snapshotId != null) { - throw new IllegalArgumentException("Cannot set a snapshotId without resource being a blob."); - } + if (Constants.UrlConstants.SAS_CONTAINER_CONSTANT.equals(this.resource) && this.snapshotId != null) { + throw new IllegalArgumentException("Cannot set a snapshotId without resource being a blob."); } } diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/UnexpectedLengthException.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/UnexpectedLengthException.java new file mode 100644 index 0000000000000..3fa8b292768d3 --- /dev/null +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/UnexpectedLengthException.java @@ -0,0 +1,32 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +/** + * This exception class represents an error when the specified input length doesn't match the data length. + */ +public final class UnexpectedLengthException extends IllegalStateException { + private final long bytesRead; + private final long bytesExpected; + + UnexpectedLengthException(String message, long bytesRead, long bytesExpected) { + super(message); + this.bytesRead = bytesRead; + this.bytesExpected = bytesExpected; + } + + /** + * @return the number of bytes read from the input + */ + public long bytesRead() { + return this.bytesRead; + } + + /** + * @return the number of bytes that were expected to be read from the input + */ + public long bytesExpected() { + return this.bytesExpected; + } +} diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/models/BlobHierarchyListSegment.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/models/BlobHierarchyListSegment.java index d90e89ab602f3..c1885f617481d 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/models/BlobHierarchyListSegment.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/models/BlobHierarchyListSegment.java @@ -6,7 +6,9 @@ import com.azure.core.implementation.annotation.Fluent; import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + import java.util.ArrayList; import java.util.List; @@ -14,6 +16,7 @@ * The BlobHierarchyListSegment model. */ @JacksonXmlRootElement(localName = "Blobs") +@JsonDeserialize(using = CustomHierarchicalListingDeserializer.class) @Fluent public final class BlobHierarchyListSegment { /* diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/models/BlobRange.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/models/BlobRange.java index 183ebbf5664ad..6dd489b399813 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/models/BlobRange.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/models/BlobRange.java @@ -40,11 +40,10 @@ public BlobRange(long offset) { * @param count * the number of bytes to download */ - public BlobRange(long offset, long count) { + public BlobRange(long offset, Long count) { this(offset); - if (count < 0) { - throw new IllegalArgumentException( - "BlobRange count must be greater than or equal to 0 if specified."); + if (count != null || count < 0) { + throw new IllegalArgumentException("BlobRange count must be greater than or equal to 0 if specified."); } this.count = count; } diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/common/policy/RequestRetryOptions.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/common/policy/RequestRetryOptions.java index 80b157bff4716..ae0519ee6a03c 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/common/policy/RequestRetryOptions.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/common/policy/RequestRetryOptions.java @@ -6,9 +6,9 @@ import java.util.concurrent.TimeUnit; /** - * Options for configuring the {@link RequestRetryPolicy}. Please refer to the Factory for more information. Note - * that there is no option for overall operation timeout. This is because Rx object have a timeout field which provides - * this functionality. + * Options for configuring the {@link RequestRetryPolicy}. Please refer to the Factory for more information. Note that + * there is no option for overall operation timeout. This is because Rx object have a timeout field which provides this + * functionality. */ public final class RequestRetryOptions { @@ -16,11 +16,8 @@ public final class RequestRetryOptions { private final int tryTimeout; private final long retryDelayInMs; private final long maxRetryDelayInMs; - /** - * A {@link RetryPolicyType} telling the pipeline what kind of retry policy to use. - */ - private RetryPolicyType retryPolicyType; - private String secondaryHost; + private final RetryPolicyType retryPolicyType; + private final String secondaryHost; /** * Constructor with default retry values: Exponential backoff, maxTries=4, tryTimeout=30, retryDelayInMs=4000, @@ -28,47 +25,43 @@ public final class RequestRetryOptions { */ public RequestRetryOptions() { this(RetryPolicyType.EXPONENTIAL, null, - null, null, null, null); + null, null, null, null); } /** * Configures how the {@link com.azure.core.http.HttpPipeline} should retry requests. * - * @param retryPolicyType - * A {@link RetryPolicyType} specifying the type of retry pattern to use. A value of {@code null} accepts - * the default. - * @param maxTries - * Specifies the maximum number of attempts an operation will be tried before producing an error. A value of - * {@code null} means that you accept our default policy. A value of 1 means 1 try and no retries. - * @param tryTimeout - * Indicates the maximum time allowed for any single try of an HTTP request. A value of {@code null} means - * that you accept our default. NOTE: When transferring large amounts of data, the default TryTimeout will - * probably not be sufficient. You should override this value based on the bandwidth available to the host - * machine and proximity to the Storage service. A good starting point may be something like (60 seconds per - * MB of anticipated-payload-size). - * @param retryDelayInMs - * Specifies the amount of delay to use before retrying an operation. A value of {@code null} means you - * accept the default value. The delay increases (exponentially or linearly) with each retry up to a maximum - * specified by MaxRetryDelay. If you specify {@code null}, then you must also specify {@code null} for - * MaxRetryDelay. - * @param maxRetryDelayInMs - * Specifies the maximum delay allowed before retrying an operation. A value of {@code null} means you - * accept the default value. If you specify {@code null}, then you must also specify {@code null} for - * RetryDelay. - * @param secondaryHost - * If a secondaryHost is specified, retries will be tried against this host. If secondaryHost is - * {@code null} (the default) then operations are not retried against another host. NOTE: Before setting - * this field, make sure you understand the issues around reading stale and potentially-inconsistent data at - * this webpage - * @throws IllegalArgumentException If {@code retryDelayInMs} and {@code maxRetryDelayInMs} are not both null or non-null - * or {@code retryPolicyType} isn't {@link RetryPolicyType#EXPONENTIAL} or {@link RetryPolicyType#FIXED}. + * @param retryPolicyType A {@link RetryPolicyType} specifying the type of retry pattern to use. A value of {@code + * null} accepts the default. + * @param maxTries Specifies the maximum number of attempts an operation will be tried before producing an error. A + * value of {@code null} means that you accept our default policy. A value of 1 means 1 try and no retries. + * @param tryTimeout Indicates the maximum time allowed for any single try of an HTTP request. A value of {@code + * null} means that you accept our default. NOTE: When transferring large amounts of data, the default TryTimeout + * will probably not be sufficient. You should override this value based on the bandwidth available to the host + * machine and proximity to the Storage service. A good starting point may be something like (60 seconds per MB of + * anticipated-payload-size). + * @param retryDelayInMs Specifies the amount of delay to use before retrying an operation. A value of {@code null} + * means you accept the default value. The delay increases (exponentially or linearly) with each retry up to a + * maximum specified by MaxRetryDelay. If you specify {@code null}, then you must also specify {@code null} for + * MaxRetryDelay. + * @param maxRetryDelayInMs Specifies the maximum delay allowed before retrying an operation. A value of {@code + * null} means you accept the default value. If you specify {@code null}, then you must also specify {@code null} + * for RetryDelay. + * @param secondaryHost If a secondaryHost is specified, retries will be tried against this host. If secondaryHost + * is {@code null} (the default) then operations are not retried against another host. NOTE: Before setting this + * field, make sure you understand the issues around reading stale and potentially-inconsistent data at + * this + * webpage + * @throws IllegalArgumentException If {@code retryDelayInMs} and {@code maxRetryDelayInMs} are not both null or + * non-null or {@code retryPolicyType} isn't {@link RetryPolicyType#EXPONENTIAL} or {@link RetryPolicyType#FIXED}. * *

Sample Code

* - *

For more samples, please see the samples file

+ *

For more samples, please see the samples + * file

*/ public RequestRetryOptions(RetryPolicyType retryPolicyType, Integer maxTries, Integer tryTimeout, - Long retryDelayInMs, Long maxRetryDelayInMs, String secondaryHost) { + Long retryDelayInMs, Long maxRetryDelayInMs, String secondaryHost) { this.retryPolicyType = retryPolicyType == null ? RetryPolicyType.EXPONENTIAL : retryPolicyType; if (maxTries != null) { assertInBounds("maxRetries", maxTries, 1, Integer.MAX_VALUE); @@ -85,7 +78,7 @@ public RequestRetryOptions(RetryPolicyType retryPolicyType, Integer maxTries, In } if ((retryDelayInMs == null && maxRetryDelayInMs != null) - || (retryDelayInMs != null && maxRetryDelayInMs == null)) { + || (retryDelayInMs != null && maxRetryDelayInMs == null)) { throw new IllegalArgumentException("Both retryDelay and maxRetryDelay must be null or neither can be null"); } @@ -111,36 +104,49 @@ public RequestRetryOptions(RetryPolicyType retryPolicyType, Integer maxTries, In this.secondaryHost = secondaryHost; } - int maxTries() { + /** + * @return the maximum number attempts that will be retried before the operation finally fails. + */ + public int maxTries() { return this.maxTries; } - int tryTimeout() { + /** + * @return the timeout in seconds allowed for each retry operation. + */ + public int tryTimeout() { return this.tryTimeout; } - String secondaryHost() { + /** + * @return the secondary host that retries could be attempted against. + */ + public String secondaryHost() { return this.secondaryHost; } - long retryDelayInMs() { + /** + * @return the delay in milliseconds between retry attempts. + */ + public long retryDelayInMs() { return retryDelayInMs; } - long maxRetryDelayInMs() { + /** + * @return the maximum delay in milliseconds between retry attempts. + */ + public long maxRetryDelayInMs() { return maxRetryDelayInMs; } /** * Calculates how long to delay before sending the next request. * - * @param tryCount - * An {@code int} indicating which try we are on. - * + * @param tryCount An {@code int} indicating which try we are on. * @return A {@code long} value of how many milliseconds to delay. */ long calculateDelayInMs(int tryCount) { - long delay = 0; + long delay; switch (this.retryPolicyType) { case EXPONENTIAL: delay = (pow(2L, tryCount - 1) - 1L) * this.retryDelayInMs; diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/common/policy/RequestRetryPolicy.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/common/policy/RequestRetryPolicy.java index 774a44999ce76..83bef0b2b49fd 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/common/policy/RequestRetryPolicy.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/common/policy/RequestRetryPolicy.java @@ -22,10 +22,10 @@ import java.util.concurrent.TimeoutException; /** - * This is a request policy in an {@link com.azure.core.http.HttpPipeline} for retrying a given HTTP request. The request - * that is retried will be identical each time it is reissued. Retries will try against a secondary if one is specified - * and the type of operation/error indicates that the secondary can handle the request. Exponential and fixed backoff are - * supported. The policy must only be used directly when creating a custom pipeline. + * This is a request policy in an {@link com.azure.core.http.HttpPipeline} for retrying a given HTTP request. The + * request that is retried will be identical each time it is reissued. Retries will try against a secondary if one is + * specified and the type of operation/error indicates that the secondary can handle the request. Exponential and fixed + * backoff are supported. The policy must only be used directly when creating a custom pipeline. */ public final class RequestRetryPolicy implements HttpPipelinePolicy { private final RequestRetryOptions requestRetryOptions; @@ -41,40 +41,32 @@ public RequestRetryPolicy(RequestRetryOptions requestRetryOptions) { @Override public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { - HttpRequest httpRequest = context.httpRequest(); - boolean considerSecondary = (httpRequest.httpMethod().equals(HttpMethod.GET) - || httpRequest.httpMethod().equals(HttpMethod.HEAD)) - && (this.requestRetryOptions.secondaryHost() != null); + boolean considerSecondary = (this.requestRetryOptions.secondaryHost() != null) + && (HttpMethod.GET.equals(context.httpRequest().httpMethod()) || HttpMethod.HEAD.equals(context.httpRequest().httpMethod())); - return this.attemptAsync(httpRequest, next, 1, considerSecondary, 1); + return attemptAsync(context, next, context.httpRequest(), considerSecondary, 1, 1); } /** - * This method actually attempts to send the request and determines if we should attempt again and, if so, how - * long to wait before sending out the next request. + * This method actually attempts to send the request and determines if we should attempt again and, if so, how long + * to wait before sending out the next request. *

- * Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure - * or an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against - * primary; even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, - * 1.2) If secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying - * against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) + * Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure or + * an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against primary; + * even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) If + * secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying against a + * secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) * - * @param httpRequest - * The request to try. - * @param primaryTry - * This indicates how man tries we've attempted against the primary DC. - * @param considerSecondary - * Before each try, we'll select either the primary or secondary URL if appropriate. - * @param attempt - * This indicates the total number of attempts to send the request. - * - * @return A single containing either the successful response or an error that was not retryable because either - * the maxTries was exceeded or retries will not mitigate the issue. + * @param context The request to try. + * @param next The next policy to apply to the request + * @param originalRequest The unmodified original request + * @param primaryTry This indicates how man tries we've attempted against the primary DC. + * @param attempt This indicates the total number of attempts to send the request. + * @return A single containing either the successful response or an error that was not retryable because either the + * maxTries was exceeded or retries will not mitigate the issue. */ - private Mono attemptAsync(final HttpRequest httpRequest, HttpPipelineNextPolicy next, final int primaryTry, - final boolean considerSecondary, - final int attempt) { - + private Mono attemptAsync(final HttpPipelineCallContext context, HttpPipelineNextPolicy next, final HttpRequest originalRequest, + boolean considerSecondary, final int primaryTry, final int attempt) { // Determine which endpoint to try. It's primary if there is no secondary or if it is an odd number attempt. final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0); @@ -96,14 +88,14 @@ stream, the buffers that were emitted will have already been consumed (their pos ByteBuffers downstream will only actually consume a duplicate so the original is preserved. This only duplicates the ByteBuffer object, not the underlying data. */ - Flux bufferedBody = httpRequest.body() == null - ? null : httpRequest.body().map(ByteBuf::duplicate); - httpRequest.body(bufferedBody); + context.httpRequest(originalRequest.buffer()); + Flux bufferedBody = (context.httpRequest().body() == null) ? null : context.httpRequest().body().map(ByteBuf::duplicate); + context.httpRequest().body(bufferedBody); if (!tryingPrimary) { - UrlBuilder builder = UrlBuilder.parse(httpRequest.url()); + UrlBuilder builder = UrlBuilder.parse(context.httpRequest().url()); builder.host(this.requestRetryOptions.secondaryHost()); try { - httpRequest.url(builder.toURL()); + context.httpRequest().url(builder.toURL()); } catch (MalformedURLException e) { return Mono.error(e); } @@ -114,55 +106,53 @@ stream, the buffers that were emitted will have already been consumed (their pos until after the retry backoff delay, so we call delaySubscription. */ return next.clone().process() - .timeout(Duration.ofSeconds(this.requestRetryOptions.tryTimeout())) - .delaySubscription(Duration.ofMillis(delayMs)) - .flatMap(response -> { - boolean newConsiderSecondary = considerSecondary; - String action; - int statusCode = response.statusCode(); + .timeout(Duration.ofSeconds(this.requestRetryOptions.tryTimeout())) + .delaySubscription(Duration.ofMillis(delayMs)) + .flatMap(response -> { + boolean newConsiderSecondary = considerSecondary; + String action; + int statusCode = response.statusCode(); /* If attempt was against the secondary & it returned a StatusNotFound (404), then the resource was not found. This may be due to replication delay. So, in this case, we'll never try the secondary again for this operation. */ - if (!tryingPrimary && statusCode == 404) { - newConsiderSecondary = false; - action = "Retry: Secondary URL returned 404"; - } else if (statusCode == 503 || statusCode == 500) { - action = "Retry: Temporary error or server timeout"; - } else { - action = "NoRetry: Successful HTTP request"; - } - - if (action.charAt(0) == 'R' && attempt < requestRetryOptions.maxTries()) { + if (!tryingPrimary && statusCode == 404) { + newConsiderSecondary = false; + action = "Retry: Secondary URL returned 404"; + } else if (statusCode == 503 || statusCode == 500) { + action = "Retry: Temporary error or server timeout"; + } else { + action = "NoRetry: Successful HTTP request"; + } + + if (action.charAt(0) == 'R' && attempt < requestRetryOptions.maxTries()) { /* We increment primaryTry if we are about to try the primary again (which is when we consider the secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to calculate the delay. */ - int newPrimaryTry = !tryingPrimary || !considerSecondary - ? primaryTry + 1 : primaryTry; - return attemptAsync(httpRequest, next, newPrimaryTry, newConsiderSecondary, - attempt + 1); - } - return Mono.just(response); - }) - .onErrorResume(throwable -> { + int newPrimaryTry = (!tryingPrimary || !considerSecondary) ? primaryTry + 1 : primaryTry; + return attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry, attempt + 1); + } + + return Mono.just(response); + }).onErrorResume(throwable -> { /* It is likely that many users will not realize that their Flux must be replayable and get an error upon retries when the provided data length does not match the length of the exact data. We cannot enforce the desired Flux behavior, so we provide a hint when this is likely the root cause. */ - if (throwable instanceof IllegalStateException && attempt > 1) { - return Mono.error(new IllegalStateException("The request failed because the " - + "size of the contents of the provided Flux did not match the provided " - + "data size upon attempting to retry. This is likely caused by the Flux " - + "not being replayable. To support retries, all Fluxes must produce the " - + "same data for each subscriber. Please ensure this behavior.", throwable)); - } + if (throwable instanceof IllegalStateException && attempt > 1) { + return Mono.error(new IllegalStateException("The request failed because the " + + "size of the contents of the provided Flux did not match the provided " + + "data size upon attempting to retry. This is likely caused by the Flux " + + "not being replayable. To support retries, all Fluxes must produce the " + + "same data for each subscriber. Please ensure this behavior.", throwable)); + } /* IOException is a catch-all for IO related errors. Technically it includes many types which may @@ -170,28 +160,27 @@ we do not consider the secondary at all (considerSecondary==false)). This will either case, it is better to optimistically retry instead of failing too soon. A Timeout Exception is a client-side timeout coming from Rx. */ - String action; - if (throwable instanceof IOException) { - action = "Retry: Network error"; - } else if (throwable instanceof TimeoutException) { - action = "Retry: Client timeout"; - } else { - action = "NoRetry: Unknown error"; - } - - if (action.charAt(0) == 'R' && attempt < requestRetryOptions.maxTries()) { + String action; + if (throwable instanceof IOException) { + action = "Retry: Network error"; + } else if (throwable instanceof TimeoutException) { + action = "Retry: Client timeout"; + } else { + action = "NoRetry: Unknown error"; + } + + if (action.charAt(0) == 'R' && attempt < requestRetryOptions.maxTries()) { /* We increment primaryTry if we are about to try the primary again (which is when we consider the secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to calculate the delay. */ - int newPrimaryTry = !tryingPrimary || !considerSecondary - ? primaryTry + 1 : primaryTry; - return attemptAsync(httpRequest, next, newPrimaryTry, considerSecondary, - attempt + 1); - } - return Mono.error(throwable); - }); + int newPrimaryTry = (!tryingPrimary || !considerSecondary) ? primaryTry + 1 : primaryTry; + return attemptAsync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1); + } + + return Mono.error(throwable); + }); } } diff --git a/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobAsyncClientJavaDocCodeSnippets.java b/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobAsyncClientJavaDocCodeSnippets.java index 3b82eabc7bcfc..7f0e2ca99eca7 100644 --- a/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobAsyncClientJavaDocCodeSnippets.java +++ b/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobAsyncClientJavaDocCodeSnippets.java @@ -121,7 +121,7 @@ public void download() { // END: com.azure.storage.blob.BlobAsyncClient.download // BEGIN: com.azure.storage.blob.BlobAsyncClient.download#BlobRange-ReliableDownloadOptions-BlobAccessConditions-boolean - BlobRange range = new BlobRange(1024, 2048); + BlobRange range = new BlobRange(1024, 2048L); ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5); client.download(range, options, null, false).subscribe(response -> { @@ -147,7 +147,7 @@ public void downloadToFile() { // END: com.azure.storage.blob.BlobAsyncClient.downloadToFile#String // BEGIN: com.azure.storage.blob.BlobAsyncClient.downloadToFile#String-BlobRange-Integer-ReliableDownloadOptions-BlobAccessConditions-boolean - BlobRange range = new BlobRange(1024, 2048); + BlobRange range = new BlobRange(1024, 2048L); ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5); client.downloadToFile(file, range, null, options, null, false) diff --git a/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobClientJavaDocCodeSnippets.java b/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobClientJavaDocCodeSnippets.java index d99b83f910ad2..e19f92dc1a6b9 100644 --- a/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobClientJavaDocCodeSnippets.java +++ b/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobClientJavaDocCodeSnippets.java @@ -115,7 +115,7 @@ public void download() { // END: com.azure.storage.blob.BlobClient.download#OutputStream // BEGIN: com.azure.storage.blob.BlobClient.download#OutputStream-BlobRange-ReliableDownloadOptions-BlobAccessConditions-boolean-Duration - BlobRange range = new BlobRange(1024, 2048); + BlobRange range = new BlobRange(1024, 2048L); ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5); System.out.printf("Download completed with status %d%n", @@ -134,7 +134,7 @@ public void downloadToFile() { // END: com.azure.storage.blob.BlobClient.downloadToFile#String // BEGIN: com.azure.storage.blob.BlobClient.downloadToFile#String-BlobRange-Integer-ReliableDownloadOptions-BlobAccessConditions-boolean-Duration - BlobRange range = new BlobRange(1024, 2048); + BlobRange range = new BlobRange(1024, 2048L); ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5); client.downloadToFile(file, range, null, options, null, false, timeout); diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/APISpec.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/APISpec.groovy index abac9b167e18f..1768e02fab744 100644 --- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/APISpec.groovy +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/APISpec.groovy @@ -17,6 +17,7 @@ import com.azure.core.util.configuration.ConfigurationManager import com.azure.identity.credential.EnvironmentCredential import com.azure.storage.blob.models.* import com.azure.storage.common.credentials.SharedKeyCredential +import io.netty.buffer.ByteBuf import org.junit.Assume import org.spockframework.lang.ISpecificationContext import reactor.core.publisher.Flux @@ -409,7 +410,7 @@ class APISpec extends Specification { response.value().contentEncoding() == contentEncoding && response.value().contentLanguage() == contentLanguage && response.value().contentMD5() == contentMD5 && - response.headers().value("Content-Type") == (contentType == null ? "application/octet-stream" : contentType) + response.headers().value("Content-Type") == contentType } static Metadata getMetadataFromHeaders(HttpHeaders headers) { @@ -502,43 +503,51 @@ class APISpec extends Specification { to play too nicely with mocked objects and the complex reflection stuff on both ends made it more difficult to work with than was worth it. Because this type is just for BlobDownload, we don't need to accept a header type. */ - def getStubResponseForBlobDownload(int code, Flux body, String etag) { - return new HttpResponse() { + static class MockDownloadHttpResponse extends HttpResponse { + private final int statusCode + private final HttpHeaders headers + private final Flux body + + MockDownloadHttpResponse(HttpResponse response, int statusCode, Flux body) { + this.request(response.request()) + this.statusCode = statusCode + this.headers = response.headers() + this.body = body + } - @Override - int statusCode() { - return code - } + @Override + int statusCode() { + return statusCode + } - @Override - String headerValue(String s) { - return null - } + @Override + String headerValue(String s) { + return headers.value(s) + } - @Override - HttpHeaders headers() { - return new HttpHeaders() - } + @Override + HttpHeaders headers() { + return headers + } - @Override - Flux body() { - return body - } + @Override + Flux body() { + return body + } - @Override - Mono bodyAsByteArray() { - return null - } + @Override + Mono bodyAsByteArray() { + return Mono.error(new IOException()) + } - @Override - Mono bodyAsString() { - return null - } + @Override + Mono bodyAsString() { + return Mono.error(new IOException()) + } - @Override - Mono bodyAsString(Charset charset) { - return null - } + @Override + Mono bodyAsString(Charset charset) { + return Mono.error(new IOException()) } } @@ -559,6 +568,7 @@ class APISpec extends Specification { return new BlobServiceClientBuilder() .endpoint(String.format("https://%s.blob.core.windows.net/", primaryCreds.accountName())) .credential(new EnvironmentCredential()) // AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET + .httpLogDetailLevel(HttpLogDetailLevel.BODY_AND_HEADERS) .buildClient() } diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AadLoginTest.java b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AadLoginTest.java deleted file mode 100644 index b284e65beb8ce..0000000000000 --- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AadLoginTest.java +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob; - -import com.azure.identity.credential.EnvironmentCredential; -import com.azure.storage.blob.models.ContainerItem; -import org.junit.BeforeClass; - -import java.util.Random; - -public class AadLoginTest { - private static final Random RANDOM = new Random(); - private static BlobServiceClient storageClient; - - @BeforeClass - public static void setup() { - storageClient = new BlobServiceClientBuilder() - .endpoint("https://" + System.getenv("ACCOUNT_NAME") + ".blob.core.windows.net") - .credential(new EnvironmentCredential()) -// .httpClient(HttpClient.createDefault().proxy(() -> new ProxyOptions(Type.HTTP, new InetSocketAddress("localhost", 8888)))) - .buildClient(); - } - - //@Test - public void listContainers() { - for (ContainerItem item : storageClient.listContainers()) { - System.out.println(item.name()); - } - } -} diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AppendBlobAPITest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AppendBlobAPITest.groovy index b295c16c976d9..5d6f364f7e5e7 100644 --- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AppendBlobAPITest.groovy +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AppendBlobAPITest.groovy @@ -57,6 +57,9 @@ class AppendBlobAPITest extends APISpec { bu.create(headers, null, null, null) Response response = bu.getProperties() + // If the value isn't set the service will automatically set it + contentType = (contentType == null) ? "application/octet-stream" : contentType + then: validateBlobProperties(response, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentMD5, contentType) diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobAPITest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobAPITest.groovy index 5c1bd11787368..ec294bee15419 100644 --- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobAPITest.groovy +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobAPITest.groovy @@ -4,10 +4,15 @@ package com.azure.storage.blob import com.azure.core.http.HttpHeaders +import com.azure.core.http.HttpPipelineCallContext +import com.azure.core.http.HttpPipelineNextPolicy +import com.azure.core.http.policy.HttpPipelinePolicy import com.azure.core.http.rest.Response import com.azure.core.http.rest.VoidResponse import com.azure.core.implementation.util.ImplUtils import com.azure.storage.blob.models.* +import reactor.core.publisher.Flux +import reactor.core.publisher.Mono import spock.lang.Unroll import java.nio.ByteBuffer @@ -76,50 +81,50 @@ class BlobAPITest extends APISpec { This is to test the appropriate integration of DownloadResponse, including setting the correct range values on HTTPGetterInfo. */ -// def "Download with retry range"() { -// /* -// We are going to make a request for some range on a blob. The Flux returned will throw an exception, forcing -// a retry per the ReliableDownloadOptions. The next request should have the same range header, which was generated -// from the count and offset values in HTTPGetterInfo that was constructed on the initial call to download. We -// don't need to check the data here, but we want to ensure that the correct range is set each time. This will -// test the correction of a bug that was found which caused HTTPGetterInfo to have an incorrect offset when it was -// constructed in BlobClient.download(). -// */ -// setup: -// HttpPipelinePolicy mockPolicy = Mock(HttpPipelinePolicy) { -// process(_ as HttpPipelineCallContext, _ as HttpPipelineNextPolicy) >> { -// HttpPipelineCallContext context, HttpPipelineNextPolicy next -> -// HttpRequest request = context.httpRequest() -// if (request.headers().value("x-ms-range") != "bytes=2-6") { -// return Mono.error(new IllegalArgumentException("The range header was not set correctly on retry.")) -// } -// else { -// // ETag can be a dummy value. It's not validated, but DownloadResponse requires one -// // TODO stub responses failing azure.core.implementation checks; too many nulls -// return Mono.just(getStubResponseForBlobDownload(206, Flux.error(new IOException()), "etag")) -// } -// } -// } -// -// BlobClient bu2 = new BlobClientBuilder() -// .endpoint(bu.getBlobUrl().toString()) -// .credential(primaryCreds) -// .addPolicy(mockPolicy) -// .buildClient() -// -// when: -// def range = new BlobRange(2, 5L) -// def options = new ReliableDownloadOptions().maxRetryRequests(3) -// bu2.download(new ByteArrayOutputStream(), options, range, null, false, null) -// -// then: -// /* -// Because the dummy Flux always throws an error. This will also validate that an IllegalArgumentException is -// NOT thrown because the types would not match. -// */ -// def e = thrown(RuntimeException) -// e.getCause() instanceof IOException -// } + def "Download with retry range"() { + /* + We are going to make a request for some range on a blob. The Flux returned will throw an exception, forcing + a retry per the ReliableDownloadOptions. The next request should have the same range header, which was generated + from the count and offset values in HTTPGetterInfo that was constructed on the initial call to download. We + don't need to check the data here, but we want to ensure that the correct range is set each time. This will + test the correction of a bug that was found which caused HTTPGetterInfo to have an incorrect offset when it was + constructed in BlobClient.download(). + */ + setup: + HttpPipelinePolicy mockPolicy = Mock(HttpPipelinePolicy) { + process(_ as HttpPipelineCallContext, _ as HttpPipelineNextPolicy) >> { + HttpPipelineCallContext context, HttpPipelineNextPolicy next -> + return next.process() + .flatMap { + if (it.request().headers().value("x-ms-range") != "bytes=2-6") { + return Mono.error(new IllegalArgumentException("The range header was not set correctly on retry.")) + } else { + // ETag can be a dummy value. It's not validated, but DownloadResponse requires one + return Mono.just(new MockDownloadHttpResponse(it, 206, Flux.error(new IOException()))) + } + } + } + } + + BlobClient bu2 = new BlobClientBuilder() + .endpoint(bu.getBlobUrl().toString()) + .credential(primaryCreds) + .addPolicy(mockPolicy) + .buildBlobClient() + + when: + BlobRange range = new BlobRange(2, 5L) + ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(3) + bu2.download(new ByteArrayOutputStream(), range, options, null, false, null) + + then: + /* + Because the dummy Flux always throws an error. This will also validate that an IllegalArgumentException is + NOT thrown because the types would not match. + */ + def e = thrown(RuntimeException) + e.getCause() instanceof IOException + } def "Download min"() { when: @@ -358,8 +363,7 @@ class BlobAPITest extends APISpec { validateBasicHeaders(response.headers()) } - // TODO (alzimmer): Figure out why getProperties returns null after setHTTPHeaders - /*def "Set HTTP headers min"() { + def "Set HTTP headers min"() { when: BlobProperties properties = bu.getProperties().value() BlobHTTPHeaders headers = new BlobHTTPHeaders() @@ -368,15 +372,15 @@ class BlobAPITest extends APISpec { .blobContentType("type") .blobCacheControl(properties.cacheControl()) .blobContentLanguage(properties.contentLanguage()) - .blobContentMD5(Base64.getDecoder().decode(properties.contentMD5())) + .blobContentMD5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(defaultData.array()))) bu.setHTTPHeaders(headers) then: - bu.getProperties().headers().value("x-ms-blob-content-type") == "type" - }*/ + bu.getProperties().headers().value("Content-Type") == "type" + } - /*@Unroll + @Unroll def "Set HTTP headers headers"() { setup: BlobHTTPHeaders putHeaders = new BlobHTTPHeaders().blobCacheControl(cacheControl) @@ -397,7 +401,7 @@ class BlobAPITest extends APISpec { cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType null | null | null | null | null | null "control" | "disposition" | "encoding" | "language" | Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(defaultData.array())) | "type" - }*/ + } @Unroll diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlockBlobAPITest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlockBlobAPITest.groovy index 63ec8837697b1..d5aaa8a65dcd5 100644 --- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlockBlobAPITest.groovy +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlockBlobAPITest.groovy @@ -349,13 +349,16 @@ class BlockBlobAPITest extends APISpec { bu.commitBlockList(ids, headers, null, null, null) Response response = bu.getProperties() + // If the value isn't set the service will automatically set it + contentType = (contentType == null) ? "application/octet-stream" : contentType + then: validateBlobProperties(response, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentMD5, contentType) where: cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType null | null | null | null | null | null - // "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type" TODO (alzimmer): Figure out why getProperties returns null for this one + "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type" } @Unroll @@ -614,8 +617,7 @@ class BlockBlobAPITest extends APISpec { bu.upload(null, 0).statusCode() == 201 } - // TODO (alzimmer): Determine why getProperties returns null - /*@Unroll + @Unroll def "Upload headers"() { setup: BlobHTTPHeaders headers = new BlobHTTPHeaders().blobCacheControl(cacheControl) @@ -629,14 +631,18 @@ class BlockBlobAPITest extends APISpec { bu.upload(defaultInputStream.get(), defaultDataSize, headers, null, null, null) Response response = bu.getProperties() + // If the value isn't set the service will automatically set it + contentMD5 = (contentMD5 == null) ? MessageDigest.getInstance("MD5").digest(defaultData.array()) : contentMD5 + contentType = (contentType == null) ? "application/octet-stream" : contentType + then: validateBlobProperties(response, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentMD5, contentType) where: - cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType - null | null | null | null | null | null - "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type" - }*/ + cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType + null | null | null | null | null | null + "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type" + } @Unroll def "Upload metadata"() { diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseMockFlux.java b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseMockFlux.java new file mode 100644 index 0000000000000..864e23a7a0825 --- /dev/null +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseMockFlux.java @@ -0,0 +1,214 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpHeaders; +import com.azure.core.http.HttpResponse; +import com.azure.storage.blob.models.BlobDownloadHeaders; +import com.azure.storage.blob.models.BlobsDownloadResponse; +import com.azure.storage.blob.models.StorageErrorException; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import reactor.core.CoreSubscriber; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.publisher.Operators; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; + +class DownloadResponseMockFlux extends Flux { + static final int DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK = 0; + static final int DR_TEST_SCENARIO_SUCCESSFUL_MULTI_CHUNK = 1; + static final int DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES = 2; + static final int DR_TEST_SCENARIO_MAX_RETRIES_EXCEEDED = 3; + static final int DR_TEST_SCENARIO_NON_RETRYABLE_ERROR = 4; + static final int DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE = 6; + static final int DR_TEST_SCENARIO_INFO_TEST = 8; + + private int scenario; + private int tryNumber; + private HTTPGetterInfo info; + private ByteBuffer scenarioData; + + DownloadResponseMockFlux(int scenario) { + this.scenario = scenario; + switch (this.scenario) { + case DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK: + this.scenarioData = APISpec.getRandomData(512 * 1024); + break; + case DR_TEST_SCENARIO_SUCCESSFUL_MULTI_CHUNK: + case DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES: + this.scenarioData = APISpec.getRandomData(1024); + break; + case DR_TEST_SCENARIO_MAX_RETRIES_EXCEEDED: + case DR_TEST_SCENARIO_NON_RETRYABLE_ERROR: + case DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE: + case DR_TEST_SCENARIO_INFO_TEST: + break; + default: + throw new IllegalArgumentException("Invalid download resource test scenario."); + } + } + + ByteBuffer getScenarioData() { + return this.scenarioData; + } + + int getTryNumber() { + return this.tryNumber; + } + + @Override + public void subscribe(CoreSubscriber subscriber) { + switch (this.scenario) { + case DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK: + subscriber.onNext(Unpooled.wrappedBuffer(this.scenarioData.duplicate())); + Operators.complete(subscriber); + break; + + case DR_TEST_SCENARIO_SUCCESSFUL_MULTI_CHUNK: + for (int i = 0; i < 4; i++) { + ByteBuffer toSend = this.scenarioData.duplicate(); + toSend.position(i * 256); + toSend.limit((i + 1) * 256); + subscriber.onNext(Unpooled.wrappedBuffer(toSend)); + } + Operators.complete(subscriber); + break; + + case DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES: + if (this.tryNumber <= 3) { + // tryNumber is 1 indexed, so we have to sub 1. + if (this.info.offset() != (this.tryNumber - 1) * 256 + || this.info.count() != this.scenarioData.remaining() - (this.tryNumber - 1) * 256) { + Operators.error(subscriber, new IllegalArgumentException("Info values are incorrect.")); + return; + } + ByteBuffer toSend = this.scenarioData.duplicate(); + toSend.position((this.tryNumber - 1) * 256); + toSend.limit(this.tryNumber * 256); + subscriber.onNext(Unpooled.wrappedBuffer(toSend)); + Operators.error(subscriber, new IOException()); + break; + } + if (this.info.offset() != (this.tryNumber - 1) * 256 + || this.info.count() != this.scenarioData.remaining() - (this.tryNumber - 1) * 256) { + Operators.error(subscriber, new IllegalArgumentException("Info values are incorrect.")); + return; + } + ByteBuffer toSend = this.scenarioData.duplicate(); + toSend.position((this.tryNumber - 1) * 256); + toSend.limit(this.tryNumber * 256); + subscriber.onNext(Unpooled.wrappedBuffer(toSend)); + Operators.complete(subscriber); + break; + + case DR_TEST_SCENARIO_MAX_RETRIES_EXCEEDED: + Operators.error(subscriber, new IOException()); + break; + + case DR_TEST_SCENARIO_NON_RETRYABLE_ERROR: + Operators.error(subscriber, new Exception()); + break; + + case DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE: + if (this.tryNumber == 1) { + /* + We return a retryable error here so we have to invoke the getter, which will throw an error in + this case. + */ + Operators.error(subscriber, new IOException()); + } else { + Operators.error(subscriber, new IllegalArgumentException("Retried after getter error.")); + } + break; + + case DR_TEST_SCENARIO_INFO_TEST: + switch (this.tryNumber) { + case 1: // Test the value of info when getting the initial response. + case 2: // Test the value of info when getting an intermediate response. + Operators.error(subscriber, new IOException()); + break; + case 3: + // All calls to getter checked. Exit. This test does not check for data. + Operators.complete(subscriber); + break; + default: + throw new IllegalArgumentException("Invalid try number."); + } + break; + + default: + Operators.error(subscriber, new IllegalArgumentException("Invalid test case")); + } + } + + Mono getter(HTTPGetterInfo info) { + this.tryNumber++; + this.info = info; + BlobsDownloadResponse rawResponse = new BlobsDownloadResponse(null, 200, new HttpHeaders(), this, new BlobDownloadHeaders()); + DownloadAsyncResponse response = new DownloadAsyncResponse(rawResponse, info, this::getter); + + switch (this.scenario) { + case DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE: + switch (this.tryNumber) { + case 1: + return Mono.just(response); + case 2: + /* + This validates that we don't retry in the getter even if it's a retryable error from the + service. + */ + throw new StorageErrorException("Message", new HttpResponse() { + @Override + public int statusCode() { + return 500; + } + + @Override + public String headerValue(String s) { + return null; + } + + @Override + public HttpHeaders headers() { + return null; + } + + @Override + public Flux body() { + return null; + } + + @Override + public Mono bodyAsByteArray() { + return null; + } + + @Override + public Mono bodyAsString() { + return null; + } + + @Override + public Mono bodyAsString(Charset charset) { + return null; + } + }); + default: + throw new IllegalArgumentException("Retried after error in getter"); + } + case DR_TEST_SCENARIO_INFO_TEST: + // We also test that the info is updated in DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES. + if (info.count() != 10 || info.offset() != 20 || !info.eTag().equals("etag")) { + throw new IllegalArgumentException("Info values incorrect"); + } + return Mono.just(response); + default: + return Mono.just(response); + } + } +} diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseTest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseTest.groovy new file mode 100644 index 0000000000000..f784283d4de6c --- /dev/null +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseTest.groovy @@ -0,0 +1,152 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob + + +import com.azure.core.implementation.util.FluxUtil +import com.azure.storage.blob.models.ReliableDownloadOptions +import com.azure.storage.blob.models.StorageErrorException +import spock.lang.Unroll + +class DownloadResponseTest extends APISpec { + BlockBlobClient bu + + def setup() { + bu = cu.getBlockBlobClient(generateBlobName()) + bu.upload(defaultInputStream.get(), defaultText.length()) + } + + /* + This shouldn't really be different from anything else we're doing in the other tests. Just a sanity check against + a real use case. + */ + + def "Network call"() { + expect: + OutputStream outputStream = new ByteArrayOutputStream() + bu.download(outputStream) + outputStream.toByteArray() == defaultData.array() + } + + @Unroll + def "Successful"() { + setup: + DownloadResponseMockFlux flux = new DownloadResponseMockFlux(scenario) + + HTTPGetterInfo info = new HTTPGetterInfo() + .offset(0) + .count(flux.getScenarioData().remaining()) + .eTag("etag") + + ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5) + + when: + DownloadAsyncResponse response = flux.getter(info).block() + + then: + FluxUtil.collectByteBufStream(response.body(options), false).block().nioBuffer() == flux.getScenarioData() + flux.getTryNumber() == tryNumber + + + where: + scenario | tryNumber + DownloadResponseMockFlux.DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK | 1 + DownloadResponseMockFlux.DR_TEST_SCENARIO_SUCCESSFUL_MULTI_CHUNK | 1 + DownloadResponseMockFlux.DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES | 4 + } + + @Unroll + def "Failure"() { + setup: + DownloadResponseMockFlux flux = new DownloadResponseMockFlux(scenario) + ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5) + HTTPGetterInfo info = new HTTPGetterInfo().eTag("etag") + + when: + DownloadAsyncResponse response = flux.getter(info).block() + response.body(options).blockFirst() + + then: + def e = thrown(Throwable) // Blocking subscribe will sometimes wrap the IOException in a RuntimeException. + if (e.getCause() != null) { + e = e.getCause() + } + exceptionType.isInstance(e) + flux.getTryNumber() == tryNumber + + /* + tryNumber is 7 because the initial request is the first try, then it will fail when retryCount>maxRetryCount, + which is when retryCount=6 and therefore tryNumber=7 + */ + where: + scenario | exceptionType | tryNumber + DownloadResponseMockFlux.DR_TEST_SCENARIO_MAX_RETRIES_EXCEEDED | IOException | 7 + DownloadResponseMockFlux.DR_TEST_SCENARIO_NON_RETRYABLE_ERROR | Exception | 1 + DownloadResponseMockFlux.DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE | StorageErrorException | 2 + } + + @Unroll + def "Info null IA"() { + setup: + DownloadResponseMockFlux flux = new DownloadResponseMockFlux(DownloadResponseMockFlux.DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK) + + when: + new DownloadAsyncResponse(flux.getter(info).block().rawResponse(), info, { HTTPGetterInfo newInfo -> flux.getter(newInfo) }) + + then: + thrown(IllegalArgumentException) + + where: + info | _ + null | _ + new HTTPGetterInfo().eTag(null) | _ + } + + def "Options IA"() { + when: + new ReliableDownloadOptions().maxRetryRequests(-1) + + then: + thrown(IllegalArgumentException) + } + + def "Getter IA"() { + setup: + DownloadResponseMockFlux flux = new DownloadResponseMockFlux(DownloadResponseMockFlux.DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK) + + when: + DownloadAsyncResponse response = new DownloadAsyncResponse(flux.getter(new HTTPGetterInfo()).block() + .rawResponse(), new HTTPGetterInfo().eTag("etag"), null) + response.body(null).blockFirst() + + then: + thrown(IllegalArgumentException) + } + + def "Info"() { + setup: + DownloadResponseMockFlux flux = new DownloadResponseMockFlux(DownloadResponseMockFlux.DR_TEST_SCENARIO_INFO_TEST) + HTTPGetterInfo info = new HTTPGetterInfo() + .offset(20) + .count(10) + .eTag("etag") + + ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5) + + when: + DownloadAsyncResponse response = flux.getter(info).block() + response.body(options).blockFirst() + + then: + flux.getTryNumber() == 3 + } + + def "Info count IA"() { + when: + new HTTPGetterInfo().count(-1) + + then: + thrown(IllegalArgumentException) + } +} diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/HelperTest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/HelperTest.groovy new file mode 100644 index 0000000000000..0142f21c9bd41 --- /dev/null +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/HelperTest.groovy @@ -0,0 +1,730 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob + +import com.azure.core.http.rest.Response +import com.azure.core.http.rest.VoidResponse +import com.azure.storage.blob.models.BlobRange +import com.azure.storage.blob.models.UserDelegationKey +import com.azure.storage.common.credentials.SASTokenCredential +import com.azure.storage.common.credentials.SharedKeyCredential +import spock.lang.Unroll + +import java.time.LocalDateTime +import java.time.OffsetDateTime +import java.time.ZoneOffset + +class HelperTest extends APISpec { + + // TODO (alzimmer): Turn this on when nextPageLink can be passed into listing + /*def "responseError"() { + when: + cu.listBlobsFlat().iterator().hasNext() + + then: + def e = thrown(StorageException) + e.errorCode() == StorageErrorCode.INVALID_QUERY_PARAMETER_VALUE + e.statusCode() == 400 + e.message().contains("Value for one of the query parameters specified in the request URI is invalid.") + e.getMessage().contains(" properties = bsu.getProperties() + + then: + properties.value().cacheControl() == "cache" + properties.value().contentDisposition() == "disposition" + properties.value().contentEncoding() == "encoding" + properties.value().contentLanguage() == "language" + properties.headers().value("Content-Type") == "type" + } + + /* + This test will ensure that each field gets placed into the proper location within the string to sign and that null + values are handled correctly. We will validate the whole SAS with service calls as well as correct serialization of + individual parts later. + */ + + @Unroll + def "serviceSasSignatures string to sign"() { + when: + ServiceSASSignatureValues v = new ServiceSASSignatureValues() + if (permissions != null) { + v.permissions(new BlobSASPermission().read(true).toString()) + } else { + v.permissions("") + } + + if (snapId != null) { + v.resource(Constants.UrlConstants.SAS_BLOB_SNAPSHOT_CONSTANT) + } else { + v.resource(Constants.UrlConstants.SAS_BLOB_CONSTANT) + } + + v.startTime(startTime) + .expiryTime(expiryTime) + .canonicalName(String.format("/blob/%s/containerName/blobName", primaryCreds.accountName())) + .snapshotId(snapId) + + if (ipRange != null) { + v.ipRange(new IPRange().ipMin("ip")) + } + + v.identifier(identifier) + .protocol(protocol) + .cacheControl(cacheControl) + .contentDisposition(disposition) + .contentEncoding(encoding) + .contentLanguage(language) + .contentType(type) + + SASQueryParameters token = v.generateSASQueryParameters(primaryCreds) + + if (startTime != null) { + expectedStringToSign = String.format(expectedStringToSign, + Utility.ISO_8601_UTC_DATE_FORMATTER.format(startTime), + Utility.ISO_8601_UTC_DATE_FORMATTER.format(expiryTime), + primaryCreds.accountName()) + } else { + expectedStringToSign = String.format(expectedStringToSign, + Utility.ISO_8601_UTC_DATE_FORMATTER.format(expiryTime), + primaryCreds.accountName()) + } + + then: + token.signature() == primaryCreds.computeHmac256(expectedStringToSign) + + /* + We don't test the blob or containerName properties because canonicalized resource is always added as at least + /blob/accountName. We test canonicalization of resources later. Again, this is not to test a fully functional + sas but the construction of the string to sign. + Signed resource is tested elsewhere, as we work some minor magic in choosing which value to use. + */ + where: + permissions | startTime | expiryTime | identifier | ipRange | protocol | snapId | cacheControl | disposition | encoding | language | type || expectedStringToSign + new BlobSASPermission() | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | null | null | null | null || "r\n\n%s\n" + "/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | OffsetDateTime.now(ZoneOffset.UTC).minusDays(1) | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | null | null | null | null || "\n%s\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | null | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | "id" | null | null | null | null | null | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\nid\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | new IPRange() | null | null | null | null | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\nip\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | SASProtocol.HTTPS_ONLY | null | null | null | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n" + SASProtocol.HTTPS_ONLY + "\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | "snapId" | null | null | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nbs\nsnapId\n\n\n\n\n" + null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | "control" | null | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\ncontrol\n\n\n\n" + null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | "disposition" | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\ndisposition\n\n\n" + null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | null | "encoding" | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\nencoding\n\n" + null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | null | null | "language" | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\nlanguage\n" + null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | null | null | null | "type" || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\ntype" + } + + @Unroll + def "serviceSasSignatures string to sign user delegation key"() { + when: + ServiceSASSignatureValues v = new ServiceSASSignatureValues() + if (permissions != null) { + v.permissions(new BlobSASPermission().read(true).toString()) + } + + v.startTime(startTime) + .canonicalName(String.format("/blob/%s/containerName/blobName", primaryCreds.accountName())) + .snapshotId(snapId) + + if (expiryTime == null) { + v.expiryTime(OffsetDateTime.now()) + } else { + v.expiryTime(expiryTime) + } + + if (snapId != null) { + v.resource(Constants.UrlConstants.SAS_BLOB_SNAPSHOT_CONSTANT) + } else { + v.resource(Constants.UrlConstants.SAS_BLOB_CONSTANT) + } + + if (ipRange != null) { + v.ipRange(new IPRange().ipMin("ip")) + } + + v.protocol(protocol) + .cacheControl(cacheControl) + .contentDisposition(disposition) + .contentEncoding(encoding) + .contentLanguage(language) + .contentType(type) + + UserDelegationKey key = new UserDelegationKey() + .signedOid(keyOid) + .signedTid(keyTid) + .signedStart(keyStart) + .signedExpiry(keyExpiry) + .signedService(keyService) + .signedVersion(keyVersion) + .value(keyValue) + + SASQueryParameters token = v.generateSASQueryParameters(key) + + expectedStringToSign = String.format(expectedStringToSign, primaryCreds.accountName()) + + then: + token.signature() == Utility.delegateComputeHmac256(key, expectedStringToSign) + + /* + We test string to sign functionality directly related to user delegation sas specific parameters + */ + where: + permissions | startTime | expiryTime | keyOid | keyTid | keyStart | keyExpiry | keyService | keyVersion | keyValue | ipRange | protocol | snapId | cacheControl | disposition | encoding | language | type || expectedStringToSign + new BlobSASPermission() | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "r\n\n\n" + "/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n" + Utility.ISO_8601_UTC_DATE_FORMATTER.format(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + "\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n" + Utility.ISO_8601_UTC_DATE_FORMATTER.format(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + "\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | null | "11111111-1111-1111-1111-111111111111" | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n11111111-1111-1111-1111-111111111111\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | null | null | "22222222-2222-2222-2222-222222222222" | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n22222222-2222-2222-2222-222222222222\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | null | null | null | OffsetDateTime.of(LocalDateTime.of(2018, 1, 1, 0, 0), ZoneOffset.UTC) | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n2018-01-01T00:00:00Z\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | null | null | null | null | OffsetDateTime.of(LocalDateTime.of(2018, 1, 1, 0, 0), ZoneOffset.UTC) | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n2018-01-01T00:00:00Z\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | null | null | null | null | null | "b" | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\nb\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | null | null | null | null | null | null | "2018-06-17" | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n2018-06-17\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | new IPRange() | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\nip\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | SASProtocol.HTTPS_ONLY | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n" + SASProtocol.HTTPS_ONLY + "\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | "snapId" | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nbs\nsnapId\n\n\n\n\n" + null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | "control" | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\ncontrol\n\n\n\n" + null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | "disposition" | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\ndisposition\n\n\n" + null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | "encoding" | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\nencoding\n\n" + null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | "language" | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\nlanguage\n" + null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | "type" || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\ntype" + } + + @Unroll + def "serviceSASSignatureValues canonicalizedResource"() { + setup: + ServiceSASSignatureValues v = new ServiceSASSignatureValues() + .expiryTime(expiryTime) + .permissions(new BlobSASPermission().toString()) + .resource(expectedResource) + .canonicalName(String.format("/blob/%s/%s", primaryCreds.accountName(), containerName)) + .snapshotId(snapId) + + if (blobName != null) { + v.canonicalName(v.canonicalName() + "/" + blobName) + } + + expectedStringToSign = String.format(expectedStringToSign, + Utility.ISO_8601_UTC_DATE_FORMATTER.format(expiryTime), + primaryCreds.accountName()) + + when: + SASQueryParameters token = v.generateSASQueryParameters(primaryCreds) + + then: + token.signature() == primaryCreds.computeHmac256(expectedStringToSign) + token.resource() == expectedResource + + where: + containerName | blobName | snapId | expiryTime || expectedResource | expectedStringToSign + "c" | "b" | "id" | OffsetDateTime.now() || "bs" | "\n\n%s\n" + "/blob/%s/c/b\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nbs\nid\n\n\n\n\n" + "c" | "b" | null | OffsetDateTime.now() || "b" | "\n\n%s\n" + "/blob/%s/c/b\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n" + "c" | null | null | OffsetDateTime.now() || "c" | "\n\n%s\n" + "/blob/%s/c\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nc\n\n\n\n\n\n" + + } + + @Unroll + def "serviceSasSignatureValues IA"() { + setup: + ServiceSASSignatureValues v = new ServiceSASSignatureValues() + .permissions(new AccountSASPermission().toString()) + .expiryTime(OffsetDateTime.now()) + .resource(containerName) + .canonicalName(blobName) + .snapshotId("2018-01-01T00:00:00.0000000Z") + .version(version) + + when: + v.generateSASQueryParameters((SharedKeyCredential)creds) + + then: + def e = thrown(IllegalArgumentException) + e.getMessage().contains(parameter) + + where: + containerName | version | creds | blobName || parameter + "c" | null | primaryCreds | "b" | "version" + "c" | "v" | null | "b" | "sharedKeyCredentials" + "c" | "v" | primaryCreds | null | "canonicalName" + } + + @Unroll + def "BlobSASPermissions toString"() { + setup: + BlobSASPermission perms = new BlobSASPermission() + .read(read) + .write(write) + .delete(delete) + .create(create) + .add(add) + + expect: + perms.toString() == expectedString + + where: + read | write | delete | create | add || expectedString + true | false | false | false | false || "r" + false | true | false | false | false || "w" + false | false | true | false | false || "d" + false | false | false | true | false || "c" + false | false | false | false | true || "a" + true | true | true | true | true || "racwd" + } + + @Unroll + def "BlobSASPermissions parse"() { + when: + BlobSASPermission perms = BlobSASPermission.parse(permString) + + then: + perms.read() == read + perms.write() == write + perms.delete() == delete + perms.create() == create + perms.add() == add + + where: + permString || read | write | delete | create | add + "r" || true | false | false | false | false + "w" || false | true | false | false | false + "d" || false | false | true | false | false + "c" || false | false | false | true | false + "a" || false | false | false | false | true + "racwd" || true | true | true | true | true + "dcwra" || true | true | true | true | true + } + + def "BlobSASPermissions parse IA"() { + when: + BlobSASPermission.parse("rwaq") + + then: + thrown(IllegalArgumentException) + } + + @Unroll + def "ContainerSASPermissions toString"() { + setup: + ContainerSASPermission perms = new ContainerSASPermission() + .read(read) + .write(write) + .delete(delete) + .create(create) + .add(add) + .list(list) + + expect: + perms.toString() == expectedString + + where: + read | write | delete | create | add | list || expectedString + true | false | false | false | false | false || "r" + false | true | false | false | false | false || "w" + false | false | true | false | false | false || "d" + false | false | false | true | false | false || "c" + false | false | false | false | true | false || "a" + false | false | false | false | false | true || "l" + true | true | true | true | true | true || "racwdl" + } + + @Unroll + def "ContainerSASPermissions parse"() { + when: + ContainerSASPermission perms = ContainerSASPermission.parse(permString) + + then: + perms.read() == read + perms.write() == write + perms.delete() == delete + perms.create() == create + perms.add() == add + perms.list() == list + + where: + permString || read | write | delete | create | add | list + "r" || true | false | false | false | false | false + "w" || false | true | false | false | false | false + "d" || false | false | true | false | false | false + "c" || false | false | false | true | false | false + "a" || false | false | false | false | true | false + "l" || false | false | false | false | false | true + "racwdl" || true | true | true | true | true | true + "dcwrla" || true | true | true | true | true | true + } + + def "ContainerSASPermissions parse IA"() { + when: + ContainerSASPermission.parse("rwaq") + + then: + thrown(IllegalArgumentException) + } + + @Unroll + def "IPRange toString"() { + setup: + def ip = new IPRange() + .ipMin(min) + .ipMax(max) + + expect: + ip.toString() == expectedString + + where: + min | max || expectedString + "a" | "b" || "a-b" + "a" | null || "a" + null | "b" || "" + } + + @Unroll + def "IPRange parse"() { + when: + IPRange ip = IPRange.parse(rangeStr) + + then: + ip.ipMin() == min + ip.ipMax() == max + + where: + rangeStr || min | max + "a-b" || "a" | "b" + "a" || "a" | null + "" || "" | null + } + + @Unroll + def "SASProtocol parse"() { + expect: + SASProtocol.parse(protocolStr) == protocol + + where: + protocolStr || protocol + "https" || SASProtocol.HTTPS_ONLY + "https,http" || SASProtocol.HTTPS_HTTP + } + + /* + This test will ensure that each field gets placed into the proper location within the string to sign and that null + values are handled correctly. We will validate the whole SAS with service calls as well as correct serialization of + individual parts later. + */ + + @Unroll + def "accountSasSignatures string to sign"() { + when: + AccountSASSignatureValues v = new AccountSASSignatureValues() + .permissions(new AccountSASPermission().read(true).toString()) + .services("b") + .resourceTypes("o") + .startTime(startTime) + .expiryTime(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + .protocol(protocol) + + if (ipRange != null) { + v.ipRange(new IPRange().ipMin("ip")) + } + + def token = v.generateSASQueryParameters(primaryCreds) + + expectedStringToSign = String.format(expectedStringToSign, primaryCreds.accountName()) + + then: + token.signature() == primaryCreds.computeHmac256(expectedStringToSign) + + where: + startTime | ipRange | protocol || expectedStringToSign + OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) | null | null || "%s\nr\nb\no\n" + Utility.ISO_8601_UTC_DATE_FORMATTER.format(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + "\n" + Utility.ISO_8601_UTC_DATE_FORMATTER.format(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + "\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\n" + null | new IPRange() | null || "%s\nr\nb\no\n\n" + Utility.ISO_8601_UTC_DATE_FORMATTER.format(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + "\nip\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\n" + null | null | SASProtocol.HTTPS_ONLY || "%s\nr\nb\no\n\n" + Utility.ISO_8601_UTC_DATE_FORMATTER.format(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + "\n\n" + SASProtocol.HTTPS_ONLY + "\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\n" + } + + @Unroll + def "accountSasSignatureValues IA"() { + setup: + AccountSASSignatureValues v = new AccountSASSignatureValues() + .permissions(permissions) + .services(service) + .resourceTypes(resourceType) + .expiryTime(expiryTime) + .version(version) + + when: + v.generateSASQueryParameters(creds) + + then: + def e = thrown(IllegalArgumentException) + e.getMessage().contains(parameter) + + where: + permissions | service | resourceType | expiryTime | version | creds || parameter + null | "b" | "c" | OffsetDateTime.now() | "v" | primaryCreds || "permissions" + "c" | null | "c" | OffsetDateTime.now() | "v" | primaryCreds || "services" + "c" | "b" | null | OffsetDateTime.now() | "v" | primaryCreds || "resourceTypes" + "c" | "b" | "c" | null | "v" | primaryCreds || "expiryTime" + "c" | "b" | "c" | OffsetDateTime.now() | null | primaryCreds || "version" + "c" | "b" | "c" | OffsetDateTime.now() | "v" | null || "SharedKeyCredential" + } + + @Unroll + def "AccountSASPermissions toString"() { + setup: + AccountSASPermission perms = new AccountSASPermission() + .read(read) + .write(write) + .delete(delete) + .list(list) + .add(add) + .create(create) + .update(update) + .processMessages(process) + + expect: + perms.toString() == expectedString + + where: + read | write | delete | list | add | create | update | process || expectedString + true | false | false | false | false | false | false | false || "r" + false | true | false | false | false | false | false | false || "w" + false | false | true | false | false | false | false | false || "d" + false | false | false | true | false | false | false | false || "l" + false | false | false | false | true | false | false | false || "a" + false | false | false | false | false | true | false | false || "c" + false | false | false | false | false | false | true | false || "u" + false | false | false | false | false | false | false | true || "p" + true | true | true | true | true | true | true | true || "rwdlacup" + } + + @Unroll + def "AccountSASPermissions parse"() { + when: + AccountSASPermission perms = AccountSASPermission.parse(permString) + + then: + perms.read() == read + perms.write() == write + perms.delete() == delete + perms.list() == list + perms.add() == add + perms.create() == create + perms.update() == update + perms.processMessages() == process + + where: + permString || read | write | delete | list | add | create | update | process + "r" || true | false | false | false | false | false | false | false + "w" || false | true | false | false | false | false | false | false + "d" || false | false | true | false | false | false | false | false + "l" || false | false | false | true | false | false | false | false + "a" || false | false | false | false | true | false | false | false + "c" || false | false | false | false | false | true | false | false + "u" || false | false | false | false | false | false | true | false + "p" || false | false | false | false | false | false | false | true + "rwdlacup" || true | true | true | true | true | true | true | true + "lwrupcad" || true | true | true | true | true | true | true | true + } + + def "AccountSASPermissions parse IA"() { + when: + AccountSASPermission.parse("rwaq") + + then: + thrown(IllegalArgumentException) + } + + @Unroll + def "AccountSASResourceType toString"() { + setup: + AccountSASResourceType resourceTypes = new AccountSASResourceType() + .service(service) + .container(container) + .object(object) + + expect: + resourceTypes.toString() == expectedString + + where: + service | container | object || expectedString + true | false | false || "s" + false | true | false || "c" + false | false | true || "o" + true | true | true || "sco" + } + + @Unroll + def "AccountSASResourceType parse"() { + when: + AccountSASResourceType resourceTypes = AccountSASResourceType.parse(resourceTypeString) + + then: + resourceTypes.service() == service + resourceTypes.container() == container + resourceTypes.object() == object + + where: + resourceTypeString || service | container | object + "s" || true | false | false + "c" || false | true | false + "o" || false | false | true + "sco" || true | true | true + } + + @Unroll + def "AccountSASResourceType IA"() { + when: + AccountSASResourceType.parse("scq") + + then: + thrown(IllegalArgumentException) + } + + def "BlobURLParts"() { + setup: + BlobURLParts parts = new BlobURLParts() + .scheme("http") + .host("host") + .containerName("container") + .blobName("blob") + .snapshot("snapshot") + + ServiceSASSignatureValues sasValues = new ServiceSASSignatureValues() + .expiryTime(OffsetDateTime.now(ZoneOffset.UTC).plusDays(1)) + .permissions("r") + .canonicalName(String.format("/blob/%s/container/blob", primaryCreds.accountName())) + .resource(Constants.UrlConstants.SAS_BLOB_SNAPSHOT_CONSTANT) + + parts.sasQueryParameters(sasValues.generateSASQueryParameters(primaryCreds)) + + when: + String[] splitParts = parts.toURL().toString().split("\\?") + + then: + splitParts.size() == 2 // Ensure that there is only one question mark even when sas and snapshot are present + splitParts[0] == "http://host/container/blob" + splitParts[1].contains("snapshot=snapshot") + splitParts[1].contains("sp=r") + splitParts[1].contains("sig=") + splitParts[1].split("&").size() == 6 // snapshot & sv & sr & sp & sig & se + } + + def "URLParser"() { + when: + BlobURLParts parts = URLParser.parse(new URL("http://host/container/blob?snapshot=snapshot&sv=" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "&sr=c&sp=r&sig=Ee%2BSodSXamKSzivSdRTqYGh7AeMVEk3wEoRZ1yzkpSc%3D")) + + then: + parts.scheme() == "http" + parts.host() == "host" + parts.containerName() == "container" + parts.blobName() == "blob" + parts.snapshot() == "snapshot" + parts.sasQueryParameters().permissions() == "r" + parts.sasQueryParameters().version() == Constants.HeaderConstants.TARGET_STORAGE_VERSION + parts.sasQueryParameters().resource() == "c" + parts.sasQueryParameters().signature() == Utility.safeURLDecode("Ee%2BSodSXamKSzivSdRTqYGh7AeMVEk3wEoRZ1yzkpSc%3D") + } +} diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/LargeFileTest.java b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/LargeFileTest.java deleted file mode 100644 index 8bff57d559513..0000000000000 --- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/LargeFileTest.java +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob; - -import com.azure.storage.common.credentials.SharedKeyCredential; - -import java.io.File; -import java.time.Duration; -import java.time.OffsetDateTime; -import java.util.Random; - -public class LargeFileTest { - private static final Random RANDOM = new Random(); - private static final String FILE_PATH = "C:\\Users\\jianghlu\\10g.dat"; - private static BlobServiceClient storageClient; - private static ContainerClient containerClient; - - //@BeforeClass - public static void setup() { - storageClient = new BlobServiceClientBuilder() - .credential(new SharedKeyCredential(System.getenv("ACCOUNT_NAME"), System.getenv("ACCOUNT_KEY"))) - .endpoint("https://" + System.getenv("ACCOUNT_NAME") + ".blob.core.windows.net") -// .httpClient(HttpClient.createDefault().proxy(() -> new ProxyOptions(Type.HTTP, new InetSocketAddress("localhost", 8888)))) - .buildClient(); - - containerClient = storageClient.getContainerClient("testcontainer-10g"); - if (!containerClient.exists().value()) { - containerClient.create(); - } - } - - //@Test - public void uploadLargeBlockBlob() throws Exception { - BlockBlobClient blockBlobClient = containerClient.getBlockBlobClient("testblob" + RANDOM.nextInt(1000)); - blockBlobClient.uploadFromFile(FILE_PATH); - } - - //@Test - public void downloadLargeBlockBlob() throws Exception { - OffsetDateTime start = OffsetDateTime.now(); - BlockBlobClient blockBlobClient = containerClient.getBlockBlobClient("testblob-10g"); -// blockBlobClient.uploadFromFile(filePath); -// File uploadedFile = new File(filePath); -// System.out.println("Upload " + uploadedFile.length() + " bytes took " + Duration.between(start, OffsetDateTime.now()).getSeconds() + " seconds"); -// start = OffsetDateTime.now(); - String downloadName = "D:\\10g-downloaded.dat"; - File downloaded = new File(downloadName); - downloaded.createNewFile(); - blockBlobClient.downloadToFile(downloadName); - System.out.println("Download " + downloaded.length() + " bytes took " + Duration.between(start, OffsetDateTime.now()).getSeconds() + " seconds"); - downloaded.delete(); - } -} diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/PageBlobAPITest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/PageBlobAPITest.groovy index eee346b007288..d0d26275228fa 100644 --- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/PageBlobAPITest.groovy +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/PageBlobAPITest.groovy @@ -40,7 +40,7 @@ class PageBlobAPITest extends APISpec { def "Create sequence number"() { when: bu.create(PageBlobClient.PAGE_BYTES, 2, null, null, - null, null) + null, null) then: Integer.parseInt(bu.getProperties().headers().value("x-ms-blob-sequence-number")) == 2 @@ -50,24 +50,27 @@ class PageBlobAPITest extends APISpec { def "Create headers"() { setup: BlobHTTPHeaders headers = new BlobHTTPHeaders().blobCacheControl(cacheControl) - .blobContentDisposition(contentDisposition) - .blobContentEncoding(contentEncoding) - .blobContentLanguage(contentLanguage) - .blobContentMD5(contentMD5) - .blobContentType(contentType) + .blobContentDisposition(contentDisposition) + .blobContentEncoding(contentEncoding) + .blobContentLanguage(contentLanguage) + .blobContentMD5(contentMD5) + .blobContentType(contentType) when: bu.create(PageBlobClient.PAGE_BYTES, null, headers, null, null, null) Response response = bu.getProperties(null, null) + // If the value isn't set the service will automatically set it + contentType = (contentType == null) ? "application/octet-stream" : contentType + then: validateBlobProperties(response, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentMD5, contentType) where: cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType null | null | null | null | null | null - // "control" | "disposition" | "encoding" | "language" | Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(defaultData.array())) | "type" TODO (alzimmer): Determine why getProperties returns null + "control" | "disposition" | "encoding" | "language" | Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(defaultData.array())) | "type" } @Unroll @@ -109,7 +112,7 @@ class PageBlobAPITest extends APISpec { expect: bu.create(PageBlobClient.PAGE_BYTES, null, null, null, bac, null) - .statusCode() == 201 + .statusCode() == 201 where: modified | unmodified | match | noneMatch | leaseID @@ -159,7 +162,7 @@ class PageBlobAPITest extends APISpec { def "Upload page"() { when: Response response = bu.uploadPages(new PageRange().start(0).end(PageBlobClient.PAGE_BYTES - 1), - new ByteArrayInputStream(getRandomByteArray(PageBlobClient.PAGE_BYTES))) + new ByteArrayInputStream(getRandomByteArray(PageBlobClient.PAGE_BYTES))) then: response.statusCode() == 201 @@ -306,7 +309,7 @@ class PageBlobAPITest extends APISpec { when: destURL.uploadPagesFromURL(new PageRange().start(0).end(PageBlobClient.PAGE_BYTES * 2 - 1), - sourceURL.getBlobUrl(), PageBlobClient.PAGE_BYTES * 2) + sourceURL.getBlobUrl(), PageBlobClient.PAGE_BYTES * 2) then: ByteArrayOutputStream outputStream = new ByteArrayOutputStream() @@ -323,8 +326,8 @@ class PageBlobAPITest extends APISpec { thrown(IllegalArgumentException) where: - sourceOffset | range - (Long)PageBlobClient.PAGE_BYTES | null + sourceOffset | range + (Long) PageBlobClient.PAGE_BYTES | null } def "Upload page from URL MD5"() { @@ -354,14 +357,14 @@ class PageBlobAPITest extends APISpec { when: destURL.uploadPagesFromURL(pageRange, bu.getBlobUrl(), null, - MessageDigest.getInstance("MD5").digest("garbage".getBytes()), null, null, null) + MessageDigest.getInstance("MD5").digest("garbage".getBytes()), null, null, null) then: thrown(StorageException) } @Unroll - def "Upload page from URL destination AC"() { + def "Upload page from URL destination AC"() { setup: cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null) def sourceURL = cu.getPageBlobClient(generateBlobName()) @@ -504,7 +507,7 @@ class PageBlobAPITest extends APISpec { Response response = bu.clearPages(new PageRange().start(0).end(PageBlobClient.PAGE_BYTES - 1)) then: - !bu.getPageRanges(new BlobRange(0)).iterator().hasNext() + bu.getPageRanges(new BlobRange(0)).value().pageRange().size() == 0 validateBasicHeaders(response.headers()) response.value().contentMD5() == null response.value().blobSequenceNumber() == 0 @@ -536,7 +539,7 @@ class PageBlobAPITest extends APISpec { expect: bu.clearPages(new PageRange().start(0).end(PageBlobClient.PAGE_BYTES - 1), pac, null) - .statusCode() == 201 + .statusCode() == 201 where: modified | unmodified | match | noneMatch | leaseID | sequenceNumberLT | sequenceNumberLTE | sequenceNumberEqual @@ -600,21 +603,20 @@ class PageBlobAPITest extends APISpec { thrown(StorageException) } - // TODO (alzimmer): Turn this on once paged responses become available - /*def "Get page ranges"() { + def "Get page ranges"() { setup: bu.uploadPages(new PageRange().start(0).end(PageBlobClient.PAGE_BYTES - 1), new ByteArrayInputStream(getRandomByteArray(PageBlobClient.PAGE_BYTES))) when: - Iterable response = bu.getPageRanges(new BlobRange(0, PageBlobClient.PAGE_BYTES)) + Response response = bu.getPageRanges(new BlobRange(0, PageBlobClient.PAGE_BYTES)) then: response.statusCode() == 200 - response.body().pageRange().size() == 1 - validateBasicHeaders(headers) - headers.blobContentLength() == (long) PageBlobClient.PAGE_BYTES - }*/ + response.value().pageRange().size() == 1 + validateBasicHeaders(response.headers()) + Integer.parseInt(response.headers().value("x-ms-blob-content-length")) == PageBlobClient.PAGE_BYTES + } def "Get page ranges min"() { when: @@ -639,7 +641,7 @@ class PageBlobAPITest extends APISpec { when: - bu.getPageRanges(new BlobRange(0, PageBlobClient.PAGE_BYTES), bac, null).iterator().hasNext() + bu.getPageRanges(new BlobRange(0, PageBlobClient.PAGE_BYTES), bac, null) then: notThrown(StorageException) @@ -666,7 +668,7 @@ class PageBlobAPITest extends APISpec { .ifNoneMatch(setupBlobMatchCondition(bu, noneMatch))) when: - bu.getPageRanges(new BlobRange(0, PageBlobClient.PAGE_BYTES), bac, null).iterator().hasNext() + bu.getPageRanges(new BlobRange(0, PageBlobClient.PAGE_BYTES), bac, null) then: thrown(StorageException) @@ -685,14 +687,13 @@ class PageBlobAPITest extends APISpec { bu = cu.getPageBlobClient(generateBlobName()) when: - bu.getPageRanges(null).iterator().hasNext() + bu.getPageRanges(null) then: thrown(StorageException) } - // TODO (alzimmer): This test needs to be restructured to support the Iterable T return - /*def "Get page ranges diff"() { + def "Get page ranges diff"() { setup: bu.create(PageBlobClient.PAGE_BYTES * 2) @@ -707,19 +708,18 @@ class PageBlobAPITest extends APISpec { bu.clearPages(new PageRange().start(PageBlobClient.PAGE_BYTES).end(PageBlobClient.PAGE_BYTES * 2 - 1)) when: - Iterable response = bu.getPageRangesDiff(new BlobRange(0, PageBlobClient.PAGE_BYTES * 2), snapshot) - PageBlobGetPageRangesDiffHeaders headers = response.headers() + Response response = bu.getPageRangesDiff(new BlobRange(0, PageBlobClient.PAGE_BYTES * 2), snapshot) then: - response.body().pageRange().size() == 1 - response.body().pageRange().get(0).start() == 0 - response.body().pageRange().get(0).end() == PageBlobClient.PAGE_BYTES - 1 - response.body().clearRange().size() == 1 - response.body().clearRange().get(0).start() == PageBlobClient.PAGE_BYTES - response.body().clearRange().get(0).end() == PageBlobClient.PAGE_BYTES * 2 - 1 - validateBasicHeaders(headers) - headers.blobContentLength() == PageBlobClient.PAGE_BYTES * 2 - }*/ + response.value().pageRange().size() == 1 + response.value().pageRange().get(0).start() == 0 + response.value().pageRange().get(0).end() == PageBlobClient.PAGE_BYTES - 1 + response.value().clearRange().size() == 1 + response.value().clearRange().get(0).start() == PageBlobClient.PAGE_BYTES + response.value().clearRange().get(0).end() == PageBlobClient.PAGE_BYTES * 2 - 1 + validateBasicHeaders(response.headers()) + Integer.parseInt(response.headers().value("x-ms-blob-content-length")) == PageBlobClient.PAGE_BYTES * 2 + } def "Get page ranges diff min"() { setup: @@ -745,7 +745,7 @@ class PageBlobAPITest extends APISpec { .ifNoneMatch(noneMatch)) when: - bu.getPageRangesDiff(new BlobRange(0, PageBlobClient.PAGE_BYTES), snapshot, bac, null).iterator().hasNext() + bu.getPageRangesDiff(new BlobRange(0, PageBlobClient.PAGE_BYTES), snapshot, bac, null) then: notThrown(StorageException) @@ -774,7 +774,7 @@ class PageBlobAPITest extends APISpec { .ifNoneMatch(setupBlobMatchCondition(bu, noneMatch))) when: - bu.getPageRangesDiff(new BlobRange(0, PageBlobClient.PAGE_BYTES), snapshot, bac, null).iterator().hasNext() + bu.getPageRangesDiff(new BlobRange(0, PageBlobClient.PAGE_BYTES), snapshot, bac, null) then: thrown(StorageException) @@ -793,7 +793,7 @@ class PageBlobAPITest extends APISpec { bu = cu.getPageBlobClient(generateBlobName()) when: - bu.getPageRangesDiff(null, "snapshot").iterator().hasNext() + bu.getPageRangesDiff(null, "snapshot") then: thrown(StorageException) @@ -811,13 +811,13 @@ class PageBlobAPITest extends APISpec { thrown(IllegalArgumentException) where: - start | end - 1 | 1 - -PageBlobClient.PAGE_BYTES | PageBlobClient.PAGE_BYTES - 1 - 0 | 0 - 1 | PageBlobClient.PAGE_BYTES - 1 - 0 | PageBlobClient.PAGE_BYTES - PageBlobClient.PAGE_BYTES * 2 | PageBlobClient.PAGE_BYTES - 1 + start | end + 1 | 1 + -PageBlobClient.PAGE_BYTES | PageBlobClient.PAGE_BYTES - 1 + 0 | 0 + 1 | PageBlobClient.PAGE_BYTES - 1 + 0 | PageBlobClient.PAGE_BYTES + PageBlobClient.PAGE_BYTES * 2 | PageBlobClient.PAGE_BYTES - 1 } def "Resize"() { @@ -931,7 +931,7 @@ class PageBlobAPITest extends APISpec { expect: bu.updateSequenceNumber(SequenceNumberActionType.UPDATE, 1, bac, null) - .statusCode() == 200 + .statusCode() == 200 where: modified | unmodified | match | noneMatch | leaseID diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/ProgressReporterTest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/ProgressReporterTest.groovy new file mode 100644 index 0000000000000..773d552ea3197 --- /dev/null +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/ProgressReporterTest.groovy @@ -0,0 +1,109 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob + +import io.netty.buffer.ByteBuf +import io.netty.buffer.Unpooled +import reactor.core.publisher.Flux + +import java.nio.ByteBuffer +import java.util.concurrent.atomic.AtomicLong +import java.util.concurrent.locks.ReentrantLock + +class ProgressReporterTest extends APISpec { + def "Report progress sequential"() { + setup: + ByteBuffer buf1 = getRandomData(10) + ByteBuffer buf2 = getRandomData(15) + ByteBuffer buf3 = getRandomData(5) + + IProgressReceiver mockReceiver = Mock(IProgressReceiver) + + Flux data = Flux.just(buf1, buf2, buf3) + data = ProgressReporter.addProgressReporting(data, mockReceiver) + + when: + data.subscribe() + data.subscribe() // Subscribing twice enforces invocation of rewind + + then: + // The same benchmarks should be reported on each subscription (retry). We should never go over total data size. + 2 * mockReceiver.reportProgress(10) + 2 * mockReceiver.reportProgress(25) + 2 * mockReceiver.reportProgress(30) + 0 * mockReceiver.reportProgress({it > 30}) + } + + def "Report progress sequential network test"() { + setup: + IProgressReceiver mockReceiver = Mock(IProgressReceiver) + + ByteBuffer buffer = getRandomData(1 * 1024 * 1024) + Flux data = ProgressReporter.addProgressReporting(Flux.just(buffer), mockReceiver) + .map({ it -> Unpooled.wrappedBuffer(it) }) + + when: + BlockBlobAsyncClient bu = new BlobClientBuilder() + .endpoint(cu.getContainerUrl().toString()) + .blobName(generateBlobName()) + .credential(primaryCreds) + .buildBlockBlobAsyncClient() + bu.upload(data, buffer.remaining()).block() + + then: + /* + With the HTTP client, etc. involved, the best we can guarantee is that it's called once with the total. There + may or may not be any intermediary calls. This test mostly looks to validate that there is no interference + with actual network calls. + */ + 1 * mockReceiver.reportProgress(1 * 1024 * 1024) + } + + def "Report progress parallel"() { + setup: + ByteBuffer buf1 = getRandomData(10) + ByteBuffer buf2 = getRandomData(15) + ByteBuffer buf3 = getRandomData(5) + + ReentrantLock lock = new ReentrantLock() + AtomicLong totalProgress = new AtomicLong(0) + + IProgressReceiver mockReceiver = Mock(IProgressReceiver) + Flux data = Flux.just(buf1, buf2, buf3) + Flux data2 = Flux.just(buf3, buf2, buf1) + data = ProgressReporter.addParallelProgressReporting(data, mockReceiver, lock, totalProgress) + data2 = ProgressReporter.addParallelProgressReporting(data2, mockReceiver, lock, totalProgress) + + when: + data.subscribe() + data2.subscribe() + data.subscribe() + data2.subscribe() + + sleep(3000) // These Fluxes should complete quickly, but we don't want to block or it'll order everything + + then: + /* + There should be at least one call reporting the total length of the data. There may be two if both data and + data2 complete before the second batch of subscriptions + */ + (1..2) * mockReceiver.reportProgress(60) + + /* + There should be 12 calls total, but either one or two of them could be reporting the total length, so we + can only guarantee four calls with an unknown parameter. This test doesn't strictly mimic the network as + there would never be concurrent subscriptions to the same Flux as may be the case here, but it is good + enough. + */ + (10..11) * mockReceiver.reportProgress(_) + + /* + We should never report more progress than the 60 total (30 from each Flux--Resubscribing is a retry and + therefore rewinds). + */ + 0 * mockReceiver.reportProgress({it > 60}) + } + + // See TransferManagerTest for network tests of the parallel ProgressReporter. +} diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RequestRetryTestFactory.java b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RequestRetryTestFactory.java new file mode 100644 index 0000000000000..a0b3c2d69e847 --- /dev/null +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RequestRetryTestFactory.java @@ -0,0 +1,430 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpClient; +import com.azure.core.http.HttpHeaders; +import com.azure.core.http.HttpMethod; +import com.azure.core.http.HttpPipelineBuilder; +import com.azure.core.http.HttpRequest; +import com.azure.core.http.HttpResponse; +import com.azure.core.http.ProxyOptions; +import com.azure.core.implementation.http.UrlBuilder; +import com.azure.storage.common.policy.RequestRetryOptions; +import com.azure.storage.common.policy.RequestRetryPolicy; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import reactor.core.Disposable; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.charset.Charset; +import java.time.Duration; +import java.time.OffsetDateTime; +import java.time.temporal.ChronoUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Supplier; + +import static java.lang.StrictMath.pow; + +class RequestRetryTestFactory { + static final int RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS = 1; + + static final int RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES = 2; + + static final int RETRY_TEST_SCENARIO_NON_RETRYABLE = 3; + + static final int RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY = 4; + + static final int RETRY_TEST_SCENARIO_NETWORK_ERROR = 5; + + static final int RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING = 6; + + static final int RETRY_TEST_SCENARIO_FIXED_TIMING = 7; + + static final int RETRY_TEST_SCENARIO_TRY_TIMEOUT = 8; + + static final int RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE = 9; + + // Cancelable + + static final String RETRY_TEST_PRIMARY_HOST = "PrimaryDC"; + + static final String RETRY_TEST_SECONDARY_HOST = "SecondaryDC"; + static final ByteBuf RETRY_TEST_DEFAULT_DATA = Unpooled.wrappedBuffer("Default data".getBytes()); + private static final String RETRY_TEST_HEADER = "TestHeader"; + private static final String RETRY_TEST_QUERY_PARAM = "TestQueryParam"; + private static final Mono RETRY_TEST_OK_RESPONSE = Mono.just(new RetryTestResponse(200)); + + /* + We wrap the response in a StorageErrorException to mock the HttpClient. Any responses that the HttpClient receives + that is not an expected response is wrapped in a StorageErrorException. + */ + private static final Mono RETRY_TEST_TEMPORARY_ERROR_RESPONSE = Mono.just(new RetryTestResponse(503)); + + private static final Mono RETRY_TEST_TIMEOUT_ERROR_RESPONSE = Mono.just(new RetryTestResponse(500)); + + private static final Mono RETRY_TEST_NON_RETRYABLE_ERROR = Mono.just(new RetryTestResponse(400)); + + private static final Mono RETRY_TEST_NOT_FOUND_RESPONSE = Mono.just(new RetryTestResponse(404)); + + private int retryTestScenario; + + private RequestRetryOptions options; + + /* + It is atypical and not recommended to have mutable state on the factory itself. However, the tests will need to + be able to validate the number of tries, and the tests will not have access to the policies, so we break our own + rule here. + */ + private int tryNumber; + + private OffsetDateTime time; + + RequestRetryTestFactory(int scenario, RequestRetryOptions options) { + this.retryTestScenario = scenario; + this.options = options; + } + + Mono send(URL url) { + return new HttpPipelineBuilder() + .policies(new RequestRetryPolicy(this.options)) + .httpClient(new RetryTestClient(this)) + .build() + .send(new HttpRequest(HttpMethod.GET, url).body(Flux.just(RETRY_TEST_DEFAULT_DATA))); + } + + int getTryNumber() { + return this.tryNumber; + } + + // The retry factory only really cares about the status code. + private static final class RetryTestResponse extends HttpResponse { + int statusCode; + + RetryTestResponse(int statusCode) { + this.statusCode = statusCode; + } + + @Override + public int statusCode() { + return this.statusCode; + } + + @Override + public String headerValue(String headerName) { + return null; + } + + @Override + public HttpHeaders headers() { + return null; + } + + @Override + public Flux body() { + return null; + } + + @Override + public Mono bodyAsByteArray() { + return null; + } + + @Override + public Mono bodyAsString() { + return null; + } + + @Override + public Mono bodyAsString(Charset charset) { + return null; + } + } + + private final class RetryTestClient implements HttpClient { + private RequestRetryTestFactory factory; + + RetryTestClient(RequestRetryTestFactory parent) { + this.factory = parent; + } + + @Override + public Mono send(HttpRequest request) { + this.factory.tryNumber++; + if (this.factory.tryNumber > this.factory.options.maxTries()) { + throw new IllegalArgumentException("Try number has exceeded max tries"); + } + + // Validate the expected preconditions for each try: The correct host is used. + String expectedHost = RETRY_TEST_PRIMARY_HOST; + if (this.factory.tryNumber % 2 == 0) { + /* + Special cases: retry until success scenario fail's on the 4th try with a 404 on the secondary, so we + never expect it to check the secondary after that. All other tests should continue to check the + secondary. + Exponential timing only tests secondary backoff once but uses the rest of the retries to hit the max + delay. + */ + if (!((this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS && this.factory.tryNumber > 4) + || (this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING && this.factory.tryNumber > 2))) { + expectedHost = RETRY_TEST_SECONDARY_HOST; + } + } + + if (!request.url().getHost().equals(expectedHost)) { + throw new IllegalArgumentException("The host does not match the expected host"); + } + + /* + This policy will add test headers and query parameters. Ensure they are removed/reset for each retry. + The retry policy should be starting with a fresh copy of the request for every try. + */ + if (request.headers().value(RETRY_TEST_HEADER) != null) { + throw new IllegalArgumentException("Headers not reset."); + } + if ((request.url().getQuery() != null && request.url().getQuery().contains(RETRY_TEST_QUERY_PARAM))) { + throw new IllegalArgumentException("Query params not reset."); + } + + // Subscribe and block until all information is read to prevent a blocking on another thread exception from Reactor. + ByteBuf buf = Unpooled.buffer(); + Disposable disposable = request.body().subscribe(buf::writeBytes); + while (!disposable.isDisposed()) { + System.out.println("Waiting for Flux to finish to prevent blocking on another thread exception"); + } + if (RETRY_TEST_DEFAULT_DATA.compareTo(buf) != 0) { + throw new IllegalArgumentException(("Body not reset.")); + } + + /* + Modify the request as policies downstream of the retry policy are likely to do. These must be reset on each + try. + */ + request.headers().put(RETRY_TEST_HEADER, "testheader"); + UrlBuilder builder = UrlBuilder.parse(request.url()); + builder.setQueryParameter(RETRY_TEST_QUERY_PARAM, "testquery"); + try { + request.url(builder.toURL()); + } catch (MalformedURLException e) { + throw new IllegalArgumentException("The URL has been mangled"); + } + + switch (this.factory.retryTestScenario) { + case RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS: + switch (this.factory.tryNumber) { + case 1: + /* + The timer is set with a timeout on the Mono used to make the request. If the Mono + doesn't return success fast enough, it will throw a TimeoutException. We can short circuit + the waiting by simply returning an error. We will validate the time parameter later. Here, + we just test that a timeout is retried. + */ + return Mono.error(new TimeoutException()); + case 2: + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + case 3: + return RETRY_TEST_TIMEOUT_ERROR_RESPONSE; + case 4: + /* + By returning 404 when we should be testing against the secondary, we exercise the logic + that should prevent further tries to secondary when the secondary evidently doesn't have the + data. + */ + return RETRY_TEST_NOT_FOUND_RESPONSE; + case 5: + // Just to get to a sixth try where we ensure we should not be trying the secondary again. + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + case 6: + return RETRY_TEST_OK_RESPONSE; + default: + throw new IllegalArgumentException("Continued trying after success."); + } + + case RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES: + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + + case RETRY_TEST_SCENARIO_NON_RETRYABLE: + if (this.factory.tryNumber == 1) { + return RETRY_TEST_NON_RETRYABLE_ERROR; + } else { + throw new IllegalArgumentException("Continued trying after non retryable error."); + } + + case RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY: + switch (this.factory.tryNumber) { + case 1: + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + case 2: + return RETRY_TEST_NON_RETRYABLE_ERROR; + default: + throw new IllegalArgumentException("Continued trying after non retryable error."); + } + + case RETRY_TEST_SCENARIO_NETWORK_ERROR: + switch (this.factory.tryNumber) { + case 1: + // fall through + case 2: + return Mono.error(new IOException()); + case 3: + return RETRY_TEST_OK_RESPONSE; + default: + throw new IllegalArgumentException("Continued retrying after success."); + } + + case RETRY_TEST_SCENARIO_TRY_TIMEOUT: + switch (this.factory.tryNumber) { + case 1: + case 2: + return RETRY_TEST_OK_RESPONSE.delaySubscription(Duration.ofSeconds(options.tryTimeout() + 1)); + case 3: + return RETRY_TEST_OK_RESPONSE.delaySubscription(Duration.ofSeconds(options.tryTimeout() - 1)); + default: + throw new IllegalArgumentException("Continued retrying after success"); + } + + case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING: + switch (this.factory.tryNumber) { + case 1: + this.factory.time = OffsetDateTime.now(); + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + case 2: + /* + Calculation for secondary is always the same, so we don't need to keep testing it. Not + trying the secondary any more will also speed up the test. + */ + return testDelayBounds(1, false, RETRY_TEST_NOT_FOUND_RESPONSE); + case 3: + return testDelayBounds(2, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); + case 4: + return testDelayBounds(3, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); + case 5: + /* + With the current configuration in RetryTest, the maxRetryDelay should be reached upon the + fourth try to the primary. + */ + return testMaxDelayBounds(RETRY_TEST_TEMPORARY_ERROR_RESPONSE); + case 6: + return testMaxDelayBounds(RETRY_TEST_OK_RESPONSE); + default: + throw new IllegalArgumentException("Max retries exceeded/continued retrying after success"); + } + + case RETRY_TEST_SCENARIO_FIXED_TIMING: + switch (this.factory.tryNumber) { + case 1: + this.factory.time = OffsetDateTime.now(); + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + case 2: + return testDelayBounds(1, false, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); + case 3: + return testDelayBounds(2, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); + case 4: + /* + Fixed backoff means it's always the same and we never hit the max, no need to keep testing. + */ + return RETRY_TEST_OK_RESPONSE; + default: + throw new IllegalArgumentException("Retries continued after success."); + } + + case RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE: + switch (this.factory.tryNumber) { + case 1: + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + case 2: + return Mono.error(new UnexpectedLengthException("Unexpected length", 5, 6)); + default: + throw new IllegalArgumentException("Retries continued on non retryable error."); + } + default: + throw new IllegalArgumentException("Invalid retry test scenario."); + } + } + + @Override + public HttpClient proxy(Supplier supplier) { + return null; + } + + @Override + public HttpClient wiretap(boolean b) { + return null; + } + + @Override + public HttpClient port(int i) { + return null; + } + + /* + Calculate the delay in seconds. Round up to ensure we include the maximum value and some offset for the code + executing between the original calculation in the retry policy and this check. + */ + private long calcPrimaryDelay(int tryNumber) { + switch (this.factory.retryTestScenario) { + case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING: + return (long) Math.ceil( + ((pow(2L, tryNumber - 1) - 1L) * this.factory.options.retryDelayInMs()) / 1000); + case RETRY_TEST_SCENARIO_FIXED_TIMING: + return (long) Math.ceil(this.factory.options.retryDelayInMs() / 1000); + default: + throw new IllegalArgumentException("Invalid test scenario"); + } + } + + private OffsetDateTime calcUpperBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) { + if (tryingPrimary) { + return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 + 500, ChronoUnit.MILLIS); + } else { + return start.plus(1400, ChronoUnit.MILLIS); + } + } + + private OffsetDateTime calcLowerBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) { + if (tryingPrimary) { + return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 - 500, ChronoUnit.MILLIS); + } else { + return start.plus(700, ChronoUnit.MILLIS); + } + } + + private Mono testDelayBounds(int primaryTryNumber, boolean tryingPrimary, Mono response) { + /* + We have to return a new Mono so that the calculation for time is performed at the correct time, i.e. when + the Mono is actually subscribed to. This mocks an HttpClient because the requests are made only when + the Mono is subscribed to, not when all the infrastructure around it is put in place, and we care about + the delay before the request itself. + */ + return Mono.defer(() -> Mono.fromCallable(() -> { + OffsetDateTime now = OffsetDateTime.now(); + if (now.isAfter(calcUpperBound(factory.time, primaryTryNumber, tryingPrimary)) + || now.isBefore(calcLowerBound(factory.time, primaryTryNumber, tryingPrimary))) { + throw new IllegalArgumentException("Delay was not within jitter bounds"); + } + + factory.time = now; + return response.block(); + })); + } + + private Mono testMaxDelayBounds(Mono response) { + return Mono.defer(() -> Mono.fromCallable(() -> { + OffsetDateTime now = OffsetDateTime.now(); + if (now.isAfter(factory.time.plusSeconds((long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) + 1)))) { + throw new IllegalArgumentException("Max retry delay exceeded"); + } else if (now.isBefore(factory.time.plusSeconds((long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) - 1)))) { + throw new IllegalArgumentException("Retry did not delay long enough"); + } + + factory.time = now; + return response.block(); + })); + } + } +} diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RetryTest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RetryTest.groovy new file mode 100644 index 0000000000000..ab5f9aabd8726 --- /dev/null +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RetryTest.groovy @@ -0,0 +1,144 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob + + +import com.azure.core.http.HttpResponse +import com.azure.storage.common.policy.RequestRetryOptions +import com.azure.storage.common.policy.RetryPolicyType +import spock.lang.Unroll +// Tests for package-private functionality. +class RetryTest extends APISpec { + static URL retryTestURL = new URL("https://" + RequestRetryTestFactory.RETRY_TEST_PRIMARY_HOST) + static RequestRetryOptions retryTestOptions = new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, 6, 2, + 1000L, 4000L, RequestRetryTestFactory.RETRY_TEST_SECONDARY_HOST) + + def "Retries until success"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS, retryTestOptions) + + when: + HttpResponse response = retryTestFactory.send(retryTestURL).block() + + then: + response.statusCode() == 200 + retryTestFactory.getTryNumber() == 6 + } + + def "Retries until max retries"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES, retryTestOptions) + + when: + HttpResponse response = retryTestFactory.send(retryTestURL).block() + + then: + response.statusCode() == 503 + retryTestFactory.getTryNumber() == retryTestOptions.maxTries() + } + + def "Retries non retryable"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_NON_RETRYABLE, retryTestOptions) + + when: + HttpResponse response = retryTestFactory.send(retryTestURL).block() + + then: + response.statusCode() == 400 + retryTestFactory.getTryNumber() == 1 + } + + def "Retries non retryable secondary"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY, retryTestOptions) + + when: + HttpResponse response = retryTestFactory.send(retryTestURL).block() + + then: + response.statusCode() == 400 + retryTestFactory.getTryNumber() == 2 + } + + def "Retries network error"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_NETWORK_ERROR, retryTestOptions) + + when: + HttpResponse response = retryTestFactory.send(retryTestURL).block() + + then: + response.statusCode() == 200 + retryTestFactory.getTryNumber() == 3 + } + + def "Retries try timeout"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_TRY_TIMEOUT, retryTestOptions) + + when: + HttpResponse response = retryTestFactory.send(retryTestURL).block() + + then: + response.statusCode() == 200 + retryTestFactory.getTryNumber() == 3 + } + + def "Retries exponential delay"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING, retryTestOptions) + + when: + HttpResponse response = retryTestFactory.send(retryTestURL).block() + + then: + response.statusCode() == 200 + retryTestFactory.getTryNumber() == 6 + } + + def "Retries fixed delay"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_FIXED_TIMING, retryTestOptions) + + when: + HttpResponse response = retryTestFactory.send(retryTestURL).block() + + then: + response.statusCode() == 200 + retryTestFactory.getTryNumber() == 4 + } + + def "Retries non replyable flux"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE, retryTestOptions) + + when: + retryTestFactory.send(retryTestURL).block() + + then: + def e = thrown(IllegalStateException) + e.getMessage().startsWith("The request failed because") + e.getCause() instanceof UnexpectedLengthException + } + + @Unroll + def "Retries options invalid"() { + when: + new RequestRetryOptions(null, maxTries, tryTimeout, retryDelayInMs, maxRetryDelayInMs, null) + + then: + thrown(IllegalArgumentException) + + where: + maxTries | tryTimeout | retryDelayInMs | maxRetryDelayInMs + 0 | null | null | null + null | 0 | null | null + null | null | 0 | 1 + null | null | 1 | 0 + null | null | null | 1 + null | null | 1 | null + null | null | 5 | 4 + } +} diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/SASTest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/SASTest.groovy index 705453b54968d..9a7cab385f2e6 100644 --- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/SASTest.groovy +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/SASTest.groovy @@ -3,10 +3,10 @@ package com.azure.storage.blob +import com.azure.core.http.policy.HttpLogDetailLevel import com.azure.storage.blob.models.AccessPolicy import com.azure.storage.blob.models.BlobRange import com.azure.storage.blob.models.SignedIdentifier -import com.azure.storage.blob.models.StorageErrorCode import com.azure.storage.blob.models.UserDelegationKey import com.azure.storage.common.credentials.SASTokenCredential import com.azure.storage.common.credentials.SharedKeyCredential @@ -17,24 +17,10 @@ import java.time.OffsetDateTime import java.time.ZoneOffset class SASTest extends APISpec { - - def "responseError"() { - when: - cu.listBlobsFlat() - - then: - def e = thrown(StorageException) - e.errorCode() == StorageErrorCode.INVALID_QUERY_PARAMETER_VALUE - e.statusCode() == 400 - e.message().contains("Value for one of the query parameters specified in the request URI is invalid.") - e.getMessage().contains("