diff --git a/eng/config.json b/eng/config.json index fac75e631dc9..070b0daf25b0 100644 --- a/eng/config.json +++ b/eng/config.json @@ -32,6 +32,10 @@ "Name": "azqueue", "CoverageGoal": 0.60 }, + { + "Name": "azdatalake", + "CoverageGoal": 0.60 + }, { "Name": "azfile", "CoverageGoal": 0.75 diff --git a/sdk/storage/azdatalake/CHANGELOG.md b/sdk/storage/azdatalake/CHANGELOG.md new file mode 100644 index 000000000000..5ed8f538c47e --- /dev/null +++ b/sdk/storage/azdatalake/CHANGELOG.md @@ -0,0 +1,7 @@ +# Release History + +## 0.1.0-beta.1 (2023-08-16) + +### Features Added + +* This is the initial preview release of the `azdatalake` library diff --git a/sdk/storage/azdatalake/LICENSE.txt b/sdk/storage/azdatalake/LICENSE.txt new file mode 100644 index 000000000000..d1ca00f20a89 --- /dev/null +++ b/sdk/storage/azdatalake/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/sdk/storage/azdatalake/README.md b/sdk/storage/azdatalake/README.md new file mode 100644 index 000000000000..cf1600dd706c --- /dev/null +++ b/sdk/storage/azdatalake/README.md @@ -0,0 +1,280 @@ +# ADLS Gen2 Storage SDK for Go + +> Service Version: 2020-10-02 + +Azure Data Lake Storage Gen2 (ADLS Gen2) is Microsoft's hierarchical object storage solution for the cloud with converged capabilities with Azure Blob Storage. +For example, Data Lake Storage Gen2 provides file system semantics, file-level security, and scale. +Because these capabilities are built on Blob storage, you also get low-cost, tiered storage, with high availability/disaster recovery capabilities. +ADLS Gen2 makes Azure Storage the foundation for building enterprise data lakes on Azure. +Designed from the start to service multiple petabytes of information while sustaining hundreds of gigabits of throughput, ADLS Gen2 allows you to easily manage massive amounts of data. + +[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] + +## Getting started + +### Install the package + +Install the ADLS Gen2 Storage SDK for Go with [go get][goget]: + +```Powershell +go get github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake +``` + +If you're going to authenticate with Azure Active Directory (recommended), install the [azidentity][azidentity] module. +```Powershell +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +### Prerequisites + +A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases). + +You need an [Azure subscription][azure_sub] and a +[Storage Account][storage_account_docs] to use this package. + +To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal], +[Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. +Here's an example using the Azure CLI: + +```Powershell +az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS +``` + +### Authenticate the client + +In order to interact with the ADLS Gen2 Storage service, you'll need to create an instance of the `Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. + +```go +// create a credential for authenticating with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle err + +// create a service.Client for the specified storage account that uses the above credential +client, err := service.NewClient("https://MYSTORAGEACCOUNT.dfs.core.windows.net/", cred, nil) +// TODO: handle err +// you can also create filesystem, file and directory clients +``` + +Learn more about enabling Azure Active Directory for authentication with Azure Storage in [our documentation][storage_ad] and [our samples](#next-steps). + +## Key concepts + +ADLS Gen2 provides: +- Hadoop-compatible access +- Hierarchical directory structure +- Optimized cost and performance +- Finer grain security model +- Massive scalability + +ADLS Gen2 storage is designed for: + +- Serving images or documents directly to a browser. +- Storing files for distributed access. +- Streaming video and audio. +- Writing to log files. +- Storing data for backup and restore, disaster recovery, and archiving. +- Storing data for analysis by an on-premises or Azure-hosted service. + +ADLS Gen2 storage offers three types of resources: + +- The _storage account_ +- One or more _filesystems_ in a storage account +- One or more _files_ or _directories_ in a filesystem + +Instances of the `Client` type provide methods for manipulating filesystems and paths within a storage account. +The storage account is specified when the `Client` is constructed. The clients available are referenced below. +Use the appropriate client constructor function for the authentication mechanism you wish to use. + +### Goroutine safety +We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines. + +### About metadata +ADLS Gen2 metadata name/value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. + +### Additional concepts + +[Client options](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy#ClientOptions) | +[Accessing the response](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime#WithCaptureResponse) | +[Handling failures](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError) | +[Logging](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/log) + + +## Examples + +### Creating and uploading a file (assuming filesystem exists) + +```go +const ( + path = "https://MYSTORAGEACCOUNT.dfs.core.windows.net/sample-fs/sample-file" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a client for the specified storage account +client, err := file.NewClient(path, cred, nil) +// TODO: handle error + +_, err = client.Create(context.TODO(), nil) +// TODO: handle error + +// open the file for reading +fh, err := os.OpenFile(sampleFile, os.O_RDONLY, 0) +// TODO: handle error +defer fh.Close() + +// upload the file to the specified filesystem with the specified file name +_, err = client.UploadFile(context.TODO(), fh, nil) +// TODO: handle error +``` + +### Downloading a file + +```go +const ( + path = "https://MYSTORAGEACCOUNT.dfs.core.windows.net/sample-fs/cloud.jpg" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a client for the specified storage account +client, err := file.NewClient(path, cred, nil) +// TODO: handle error + +// create or open a local file where we can download the file +file, err := os.Create("cloud.jpg") +// TODO: handle error +defer file.Close() + +// download the file +_, err = client.DownloadFile(context.TODO(), file, nil) +// TODO: handle error +``` + +### Creating and deleting a filesystem + +```go +const ( + fs = "https://MYSTORAGEACCOUNT.dfs.core.windows.net/sample-fs" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a client for the specified storage account +client, err := filesystem.NewClient(fs, cred, nil) +// TODO: handle error + +_, err = client.Create(context.TODO(), nil) +// TODO: handle error + +_, err = client.Delete(context.TODO(), nil) +// TODO: handle error +``` + +### Enumerating paths (assuming filesystem exists) + +```go +const ( + fs = "https://MYSTORAGEACCOUNT.dfs.core.windows.net/sample-fs" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a filesystem client for the specified storage account +client, err := filesystem.NewClient(fs, cred, nil) +// TODO: handle error + +// path listings are returned across multiple pages +pager := client.NewListPathsPager(true, nil) + +// continue fetching pages until no more remain +for pager.More() { + // advance to the next page + page, err := pager.NextPage(context.TODO()) + // TODO: handle error + + // print the path names for this page + for _, path := range page.PathList.Paths { + fmt.Println(*path.Name) + fmt.Println(*path.IsDirectory) + } +} +``` + +## Troubleshooting + +All Datalake service operations will return an +[*azcore.ResponseError][azcore_response_error] on failure with a +populated `ErrorCode` field. Many of these errors are recoverable. +The [datalakeerror][datalake_error] package provides the possible Storage error codes +along with various helper facilities for error handling. + + +### Specialized clients + +The ADLS Gen2 Storage SDK for Go provides specialized clients in various subpackages. + +The [file][file] package contains APIs related to file path types. + +The [directory][directory] package contains APIs related to directory path types. + +The [lease][lease] package contains clients for managing leases on paths (paths represent both directory and file paths) and filesystems. Please see the [reference docs](https://docs.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases. + +The [filesystem][filesystem] package contains APIs specific to filesystems. This includes APIs setting access policies or properties, and more. + +The [service][service] package contains APIs specific to Datalake service. This includes APIs for manipulating filesystems, retrieving account information, and more. + +The [sas][sas] package contains utilities to aid in the creation and manipulation of Shared Access Signature tokens. +See the package's documentation for more information. + + +You can find additional context and examples in our samples for each subpackage (named examples_test.go). + +## Contributing + +See the [Storage CONTRIBUTING.md][storage_contrib] for details on building, +testing, and contributing to this library. + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. For +details, visit [cla.microsoft.com][cla]. + +This project has adopted the [Microsoft Open Source Code of Conduct][coc]. +For more information see the [Code of Conduct FAQ][coc_faq] +or contact [opencode@microsoft.com][coc_contact] with any +additional questions or comments. + + +[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azdatalake +[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake +[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/data-lake-storage-gen2 +[godevdl]: https://go.dev/dl/ +[goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them +[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview +[storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell +[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli +[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[azure_sub]: https://azure.microsoft.com/free/ +[azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[storage_ad]: https://docs.microsoft.com/azure/storage/common/storage-auth-aad +[azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError +[datalake_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azdatalake/datalakeerror/error_codes.go +[filesystem]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azdatalake/filesystem/client.go +[lease]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azdatalake/lease +[file]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azdatalake/file/client.go +[directory]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azdatalake/directory/client.go +[sas]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azdatalake/sas +[service]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azdatalake/service/client.go +[storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md +[cla]: https://cla.microsoft.com +[coc]: https://opensource.microsoft.com/codeofconduct/ +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[coc_contact]: mailto:opencode@microsoft.com diff --git a/sdk/storage/azdatalake/assets.json b/sdk/storage/azdatalake/assets.json new file mode 100644 index 000000000000..6794bca42de8 --- /dev/null +++ b/sdk/storage/azdatalake/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/storage/azdatalake", + "Tag": "go/storage/azdatalake_c3c16cffab" +} \ No newline at end of file diff --git a/sdk/storage/azdatalake/ci.yml b/sdk/storage/azdatalake/ci.yml new file mode 100644 index 000000000000..612cde201ffa --- /dev/null +++ b/sdk/storage/azdatalake/ci.yml @@ -0,0 +1,28 @@ +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azdatalake + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azdatalake + + +stages: + - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'storage/azdatalake' + RunLiveTests: true \ No newline at end of file diff --git a/sdk/storage/azdatalake/common.go b/sdk/storage/azdatalake/common.go new file mode 100644 index 000000000000..6baefa3c6857 --- /dev/null +++ b/sdk/storage/azdatalake/common.go @@ -0,0 +1,81 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azdatalake + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange + +// ===================================== LEASE CONSTANTS ============================================================ + +// StatusType defines values for StatusType +type StatusType = lease.StatusType + +const ( + StatusTypeLocked StatusType = lease.StatusTypeLocked + StatusTypeUnlocked StatusType = lease.StatusTypeUnlocked +) + +// PossibleStatusTypeValues returns the possible values for the StatusType const type. +func PossibleStatusTypeValues() []StatusType { + return lease.PossibleStatusTypeValues() +} + +// DurationType defines values for DurationType +type DurationType = lease.DurationType + +const ( + DurationTypeInfinite DurationType = lease.DurationTypeInfinite + DurationTypeFixed DurationType = lease.DurationTypeFixed +) + +// PossibleDurationTypeValues returns the possible values for the DurationType const type. +func PossibleDurationTypeValues() []DurationType { + return lease.PossibleDurationTypeValues() +} + +// StateType defines values for StateType +type StateType = lease.StateType + +const ( + StateTypeAvailable StateType = lease.StateTypeAvailable + StateTypeLeased StateType = lease.StateTypeLeased + StateTypeExpired StateType = lease.StateTypeExpired + StateTypeBreaking StateType = lease.StateTypeBreaking + StateTypeBroken StateType = lease.StateTypeBroken +) + +// PossibleStateTypeValues returns the possible values for the StateType const type. +func PossibleStateTypeValues() []StateType { + return lease.PossibleStateTypeValues() +} diff --git a/sdk/storage/azdatalake/datalakeerror/error_codes.go b/sdk/storage/azdatalake/datalakeerror/error_codes.go new file mode 100644 index 000000000000..0c703a242606 --- /dev/null +++ b/sdk/storage/azdatalake/datalakeerror/error_codes.go @@ -0,0 +1,179 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package datalakeerror + +import ( + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +// HasCode returns true if the provided error is an *azcore.ResponseError +// with its ErrorCode field equal to one of the specified Codes. +func HasCode(err error, codes ...StorageErrorCode) bool { + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return false + } + + for _, code := range codes { + if respErr.ErrorCode == string(code) { + return true + } + } + + return false +} + +// StorageErrorCode - Error codes returned by the service +type StorageErrorCode string + +// dfs errors +const ( + ContentLengthMustBeZero StorageErrorCode = "ContentLengthMustBeZero" + InvalidFlushPosition StorageErrorCode = "InvalidFlushPosition" + InvalidPropertyName StorageErrorCode = "InvalidPropertyName" + InvalidSourceURI StorageErrorCode = "InvalidSourceUri" + UnsupportedRestVersion StorageErrorCode = "UnsupportedRestVersion" + RenameDestinationParentPathNotFound StorageErrorCode = "RenameDestinationParentPathNotFound" + SourcePathNotFound StorageErrorCode = "SourcePathNotFound" + DestinationPathIsBeingDeleted StorageErrorCode = "DestinationPathIsBeingDeleted" + InvalidDestinationPath StorageErrorCode = "InvalidDestinationPath" + InvalidRenameSourcePath StorageErrorCode = "InvalidRenameSourcePath" + InvalidSourceOrDestinationResourceType StorageErrorCode = "InvalidSourceOrDestinationResourceType" + LeaseIsAlreadyBroken StorageErrorCode = "LeaseIsAlreadyBroken" + LeaseNameMismatch StorageErrorCode = "LeaseNameMismatch" + PathConflict StorageErrorCode = "PathConflict" + SourcePathIsBeingDeleted StorageErrorCode = "SourcePathIsBeingDeleted" +) + +// (converted) blob errors - these errors are what we expect after we do a replace on the error string using the ConvertBlobError function +const ( + AccountAlreadyExists StorageErrorCode = "AccountAlreadyExists" + AccountBeingCreated StorageErrorCode = "AccountBeingCreated" + AccountIsDisabled StorageErrorCode = "AccountIsDisabled" + AppendPositionConditionNotMet StorageErrorCode = "AppendPositionConditionNotMet" + AuthenticationFailed StorageErrorCode = "AuthenticationFailed" + AuthorizationFailure StorageErrorCode = "AuthorizationFailure" + AuthorizationPermissionMismatch StorageErrorCode = "AuthorizationPermissionMismatch" + AuthorizationProtocolMismatch StorageErrorCode = "AuthorizationProtocolMismatch" + AuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch" + AuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch" + AuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch" + PathAlreadyExists StorageErrorCode = "PathAlreadyExists" + PathArchived StorageErrorCode = "PathArchived" + PathBeingRehydrated StorageErrorCode = "PathBeingRehydrated" + PathImmutableDueToPolicy StorageErrorCode = "PathImmutableDueToPolicy" + PathNotArchived StorageErrorCode = "PathNotArchived" + PathNotFound StorageErrorCode = "PathNotFound" + PathOverwritten StorageErrorCode = "PathOverwritten" + PathTierInadequateForContentLength StorageErrorCode = "PathTierInadequateForContentLength" + PathUsesCustomerSpecifiedEncryption StorageErrorCode = "PathUsesCustomerSpecifiedEncryption" + BlockCountExceedsLimit StorageErrorCode = "BlockCountExceedsLimit" + BlockListTooLong StorageErrorCode = "BlockListTooLong" + CannotChangeToLowerTier StorageErrorCode = "CannotChangeToLowerTier" + CannotVerifyCopySource StorageErrorCode = "CannotVerifyCopySource" + ConditionHeadersNotSupported StorageErrorCode = "ConditionHeadersNotSupported" + ConditionNotMet StorageErrorCode = "ConditionNotMet" + FileSystemAlreadyExists StorageErrorCode = "FileSystemAlreadyExists" + FileSystemBeingDeleted StorageErrorCode = "FileSystemBeingDeleted" + FileSystemDisabled StorageErrorCode = "FileSystemDisabled" + FileSystemNotFound StorageErrorCode = "FileSystemNotFound" + ContentLengthLargerThanTierLimit StorageErrorCode = "ContentLengthLargerThanTierLimit" + CopyAcrossAccountsNotSupported StorageErrorCode = "CopyAcrossAccountsNotSupported" + CopyIDMismatch StorageErrorCode = "CopyIdMismatch" + EmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" + FeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" + IncrementalCopyPathMismatch StorageErrorCode = "IncrementalCopyPathMismatch" + IncrementalCopyOfEarlierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" + IncrementalCopySourceMustBeSnapshot StorageErrorCode = "IncrementalCopySourceMustBeSnapshot" + InfiniteLeaseDurationRequired StorageErrorCode = "InfiniteLeaseDurationRequired" + InsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" + InternalError StorageErrorCode = "InternalError" + InvalidAuthenticationInfo StorageErrorCode = "InvalidAuthenticationInfo" + InvalidPathOrBlock StorageErrorCode = "InvalidPathOrBlock" + InvalidPathTier StorageErrorCode = "InvalidPathTier" + InvalidPathType StorageErrorCode = "InvalidPathType" + InvalidBlockID StorageErrorCode = "InvalidBlockId" + InvalidBlockList StorageErrorCode = "InvalidBlockList" + InvalidHTTPVerb StorageErrorCode = "InvalidHttpVerb" + InvalidHeaderValue StorageErrorCode = "InvalidHeaderValue" + InvalidInput StorageErrorCode = "InvalidInput" + InvalidMD5 StorageErrorCode = "InvalidMd5" + InvalidMetadata StorageErrorCode = "InvalidMetadata" + InvalidOperation StorageErrorCode = "InvalidOperation" + InvalidPageRange StorageErrorCode = "InvalidPageRange" + InvalidQueryParameterValue StorageErrorCode = "InvalidQueryParameterValue" + InvalidRange StorageErrorCode = "InvalidRange" + InvalidResourceName StorageErrorCode = "InvalidResourceName" + InvalidSourcePathType StorageErrorCode = "InvalidSourcePathType" + InvalidSourcePathURL StorageErrorCode = "InvalidSourcePathUrl" + InvalidURI StorageErrorCode = "InvalidUri" + InvalidVersionForPagePathOperation StorageErrorCode = "InvalidVersionForPagePathOperation" + InvalidXMLDocument StorageErrorCode = "InvalidXmlDocument" + InvalidXMLNodeValue StorageErrorCode = "InvalidXmlNodeValue" + LeaseAlreadyBroken StorageErrorCode = "LeaseAlreadyBroken" + LeaseAlreadyPresent StorageErrorCode = "LeaseAlreadyPresent" + LeaseIDMismatchWithPathOperation StorageErrorCode = "LeaseIdMismatchWithPathOperation" + LeaseIDMismatchWithFileSystemOperation StorageErrorCode = "LeaseIdMismatchWithFileSystemOperation" + LeaseIDMismatchWithLeaseOperation StorageErrorCode = "LeaseIdMismatchWithLeaseOperation" + LeaseIDMissing StorageErrorCode = "LeaseIdMissing" + LeaseIsBreakingAndCannotBeAcquired StorageErrorCode = "LeaseIsBreakingAndCannotBeAcquired" + LeaseIsBreakingAndCannotBeChanged StorageErrorCode = "LeaseIsBreakingAndCannotBeChanged" + LeaseIsBrokenAndCannotBeRenewed StorageErrorCode = "LeaseIsBrokenAndCannotBeRenewed" + LeaseLost StorageErrorCode = "LeaseLost" + LeaseNotPresentWithPathOperation StorageErrorCode = "LeaseNotPresentWithPathOperation" + LeaseNotPresentWithFileSystemOperation StorageErrorCode = "LeaseNotPresentWithFileSystemOperation" + LeaseNotPresentWithLeaseOperation StorageErrorCode = "LeaseNotPresentWithLeaseOperation" + MD5Mismatch StorageErrorCode = "Md5Mismatch" + CRC64Mismatch StorageErrorCode = "Crc64Mismatch" + MaxPathSizeConditionNotMet StorageErrorCode = "MaxPathSizeConditionNotMet" + MetadataTooLarge StorageErrorCode = "MetadataTooLarge" + MissingContentLengthHeader StorageErrorCode = "MissingContentLengthHeader" + MissingRequiredHeader StorageErrorCode = "MissingRequiredHeader" + MissingRequiredQueryParameter StorageErrorCode = "MissingRequiredQueryParameter" + MissingRequiredXMLNode StorageErrorCode = "MissingRequiredXmlNode" + MultipleConditionHeadersNotSupported StorageErrorCode = "MultipleConditionHeadersNotSupported" + NoAuthenticationInformation StorageErrorCode = "NoAuthenticationInformation" + NoPendingCopyOperation StorageErrorCode = "NoPendingCopyOperation" + OperationNotAllowedOnIncrementalCopyPath StorageErrorCode = "OperationNotAllowedOnIncrementalCopyPath" + OperationTimedOut StorageErrorCode = "OperationTimedOut" + OutOfRangeInput StorageErrorCode = "OutOfRangeInput" + OutOfRangeQueryParameterValue StorageErrorCode = "OutOfRangeQueryParameterValue" + PendingCopyOperation StorageErrorCode = "PendingCopyOperation" + PreviousSnapshotCannotBeNewer StorageErrorCode = "PreviousSnapshotCannotBeNewer" + PreviousSnapshotNotFound StorageErrorCode = "PreviousSnapshotNotFound" + PreviousSnapshotOperationNotSupported StorageErrorCode = "PreviousSnapshotOperationNotSupported" + RequestBodyTooLarge StorageErrorCode = "RequestBodyTooLarge" + RequestURLFailedToParse StorageErrorCode = "RequestUrlFailedToParse" + ResourceAlreadyExists StorageErrorCode = "ResourceAlreadyExists" + ResourceNotFound StorageErrorCode = "ResourceNotFound" + ResourceTypeMismatch StorageErrorCode = "ResourceTypeMismatch" + SequenceNumberConditionNotMet StorageErrorCode = "SequenceNumberConditionNotMet" + SequenceNumberIncrementTooLarge StorageErrorCode = "SequenceNumberIncrementTooLarge" + ServerBusy StorageErrorCode = "ServerBusy" + SnapshotCountExceeded StorageErrorCode = "SnapshotCountExceeded" + SnapshotOperationRateExceeded StorageErrorCode = "SnapshotOperationRateExceeded" + SnapshotsPresent StorageErrorCode = "SnapshotsPresent" + SourceConditionNotMet StorageErrorCode = "SourceConditionNotMet" + SystemInUse StorageErrorCode = "SystemInUse" + TargetConditionNotMet StorageErrorCode = "TargetConditionNotMet" + UnauthorizedPathOverwrite StorageErrorCode = "UnauthorizedPathOverwrite" + UnsupportedHTTPVerb StorageErrorCode = "UnsupportedHttpVerb" + UnsupportedHeader StorageErrorCode = "UnsupportedHeader" + UnsupportedQueryParameter StorageErrorCode = "UnsupportedQueryParameter" + UnsupportedXMLNode StorageErrorCode = "UnsupportedXmlNode" +) + +var ( + // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. + MissingSharedKeyCredential = bloberror.MissingSharedKeyCredential + + // MissingParameters - Error is returned when at least one parameter should be set for any API. + MissingParameters = errors.New("at least one parameter should be set for SetAccessControl API") +) diff --git a/sdk/storage/azdatalake/directory/client.go b/sdk/storage/azdatalake/directory/client.go new file mode 100644 index 000000000000..45c9c21b6696 --- /dev/null +++ b/sdk/storage/azdatalake/directory/client.go @@ -0,0 +1,441 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated_blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/path" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" + "net/http" + "net/url" + "strings" + "time" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure Datalake Storage service. +type Client base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client] + +// NewClient creates an instance of Client with the specified values. +// - directoryURL - the URL of the directory e.g. https://.dfs.core.windows.net/fs/dir +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(directoryURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + blobURL, directoryURL := shared.GetURLs(directoryURL) + + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + } + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.DirectoryClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + blobClientOpts := blockblob.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobClient, _ := blockblob.NewClient(blobURL, cred, &blobClientOpts) + dirClient := base.NewPathClient(directoryURL, blobURL, blobClient, azClient, nil, &cred, (*base.ClientOptions)(conOptions)) + + return (*Client)(dirClient), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - directoryURL - the URL of the storage account e.g. https://.dfs.core.windows.net/fs/dir? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(directoryURL string, options *ClientOptions) (*Client, error) { + blobURL, directoryURL := shared.GetURLs(directoryURL) + + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{} + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.DirectoryClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + blobClientOpts := blockblob.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobClient, _ := blockblob.NewClientWithNoCredential(blobURL, &blobClientOpts) + dirClient := base.NewPathClient(directoryURL, blobURL, blobClient, azClient, nil, nil, (*base.ClientOptions)(conOptions)) + + return (*Client)(dirClient), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - directoryURL - the URL of the storage account e.g. https://.dfs.core.windows.net/fs/dir +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(directoryURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + blobURL, directoryURL := shared.GetURLs(directoryURL) + + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + } + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.DirectoryClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + blobClientOpts := blockblob.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobSharedKey, err := exported.ConvertToBlobSharedKey(cred) + if err != nil { + return nil, err + } + blobClient, _ := blockblob.NewClientWithSharedKeyCredential(blobURL, blobSharedKey, &blobClientOpts) + dirClient := base.NewPathClient(directoryURL, blobURL, blobClient, azClient, cred, nil, (*base.ClientOptions)(conOptions)) + + return (*Client)(dirClient), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, dirPath, fsName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + dirPath = strings.ReplaceAll(dirPath, "\\", "/") + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, fsName, dirPath) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (d *Client) generatedDirClientWithDFS() *generated.PathClient { + dirClientWithDFS, _, _ := base.InnerClients((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(d)) + return dirClientWithDFS +} + +func (d *Client) generatedDirClientWithBlob() *generated_blob.BlobClient { + _, dirClientWithBlob, _ := base.InnerClients((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(d)) + return dirClientWithBlob +} + +func (d *Client) blobClient() *blockblob.Client { + _, _, blobClient := base.InnerClients((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(d)) + return blobClient +} + +func (d *Client) getClientOptions() *base.ClientOptions { + return base.GetCompositeClientOptions((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(d)) +} + +func (d *Client) sharedKey() *exported.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(d)) +} + +func (d *Client) identityCredential() *azcore.TokenCredential { + return base.IdentityCredentialComposite((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(d)) +} + +// DFSURL returns the URL endpoint used by the Client object. +func (d *Client) DFSURL() string { + return d.generatedDirClientWithDFS().Endpoint() +} + +// BlobURL returns the URL endpoint used by the Client object. +func (d *Client) BlobURL() string { + return d.generatedDirClientWithBlob().Endpoint() +} + +// NewFileClient creates a new directory.Client object by concatenating directoryName to the end of this Client's URL. +// The new directory.Client uses the same request policy pipeline as the Client. +func (d *Client) NewFileClient(fileName string) (*file.Client, error) { + fileName = url.PathEscape(fileName) + fileURL := runtime.JoinPaths(d.DFSURL(), fileName) + newBlobURL, fileURL := shared.GetURLs(fileURL) + var newBlobClient *blockblob.Client + var err error + if d.identityCredential() != nil { + newBlobClient, err = blockblob.NewClient(newBlobURL, *d.identityCredential(), nil) + } else if d.sharedKey() != nil { + blobSharedKey, _ := exported.ConvertToBlobSharedKey(d.sharedKey()) + newBlobClient, err = blockblob.NewClientWithSharedKeyCredential(newBlobURL, blobSharedKey, nil) + } else { + newBlobClient, err = blockblob.NewClientWithNoCredential(newBlobURL, nil) + } + if err != nil { + return nil, exported.ConvertToDFSError(err) + } + return (*file.Client)(base.NewPathClient(fileURL, newBlobURL, newBlobClient, d.generatedDirClientWithDFS().InternalClient().WithClientName(shared.FileClient), d.sharedKey(), d.identityCredential(), d.getClientOptions())), nil +} + +// Create creates a new directory. +func (d *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { + lac, mac, httpHeaders, createOpts, cpkOpts := options.format() + resp, err := d.generatedDirClientWithDFS().Create(ctx, createOpts, httpHeaders, lac, mac, nil, cpkOpts) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// Delete deletes directory and any path under it. +func (d *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + lac, mac, deleteOpts := path.FormatDeleteOptions(options, true) + resp, err := d.generatedDirClientWithDFS().Delete(ctx, deleteOpts, lac, mac) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// GetProperties gets the properties of a directory. +func (d *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts := path.FormatGetPropertiesOptions(options) + var respFromCtx *http.Response + ctxWithResp := runtime.WithCaptureResponse(ctx, &respFromCtx) + resp, err := d.blobClient().GetProperties(ctxWithResp, opts) + newResp := path.FormatGetPropertiesResponse(&resp, respFromCtx) + err = exported.ConvertToDFSError(err) + return newResp, err +} + +func (d *Client) renamePathInURL(newName string) (string, string, string) { + endpoint := d.DFSURL() + separator := "/" + // Find the index of the last occurrence of the separator + lastIndex := strings.LastIndex(endpoint, separator) + // Split the string based on the last occurrence of the separator + firstPart := endpoint[:lastIndex] // From the beginning of the string to the last occurrence of the separator + newBlobURL, newPathURL := shared.GetURLs(runtime.JoinPaths(firstPart, newName)) + oldURL, _ := url.Parse(d.DFSURL()) + return oldURL.Path, newPathURL, newBlobURL +} + +// Rename renames a directory. The original directory will no longer exist and the client will be stale. +func (d *Client) Rename(ctx context.Context, newName string, options *RenameOptions) (RenameResponse, error) { + oldURL, newPathURL, newBlobURL := d.renamePathInURL(newName) + lac, mac, smac, createOpts := path.FormatRenameOptions(options, oldURL) + var newBlobClient *blockblob.Client + var err error + if d.identityCredential() != nil { + newBlobClient, err = blockblob.NewClient(newBlobURL, *d.identityCredential(), nil) + } else if d.sharedKey() != nil { + blobSharedKey, _ := exported.ConvertToBlobSharedKey(d.sharedKey()) + newBlobClient, err = blockblob.NewClientWithSharedKeyCredential(newBlobURL, blobSharedKey, nil) + } else { + newBlobClient, err = blockblob.NewClientWithNoCredential(newBlobURL, nil) + } + if err != nil { + return RenameResponse{}, exported.ConvertToDFSError(err) + } + newDirClient := (*Client)(base.NewPathClient(newPathURL, newBlobURL, newBlobClient, d.generatedDirClientWithDFS().InternalClient().WithClientName(shared.DirectoryClient), d.sharedKey(), d.identityCredential(), d.getClientOptions())) + resp, err := newDirClient.generatedDirClientWithDFS().Create(ctx, createOpts, nil, lac, mac, smac, nil) + //return RenameResponse{ + // Response: resp, + // NewDirectoryClient: newDirClient, + //}, exported.ConvertToDFSError(err) + return path.FormatRenameResponse(&resp), exported.ConvertToDFSError(err) +} + +// SetAccessControl sets the owner, owning group, and permissions for a directory. +func (d *Client) SetAccessControl(ctx context.Context, options *SetAccessControlOptions) (SetAccessControlResponse, error) { + opts, lac, mac, err := path.FormatSetAccessControlOptions(options) + if err != nil { + return SetAccessControlResponse{}, err + } + resp, err := d.generatedDirClientWithDFS().SetAccessControl(ctx, opts, lac, mac) + err = exported.ConvertToDFSError(err) + return resp, err +} + +func (d *Client) setAccessControlPager(mode generated.PathSetAccessControlRecursiveMode, listOptions *generated.PathClientSetAccessControlRecursiveOptions) *runtime.Pager[generated.PathClientSetAccessControlRecursiveResponse] { + return runtime.NewPager(runtime.PagingHandler[generated.PathClientSetAccessControlRecursiveResponse]{ + More: func(page generated.PathClientSetAccessControlRecursiveResponse) bool { + return page.Continuation != nil && len(*page.Continuation) > 0 + }, + Fetcher: func(ctx context.Context, page *generated.PathClientSetAccessControlRecursiveResponse) (generated.PathClientSetAccessControlRecursiveResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = d.generatedDirClientWithDFS().SetAccessControlRecursiveCreateRequest(ctx, mode, listOptions) + err = exported.ConvertToDFSError(err) + } else { + listOptions.Continuation = page.Continuation + req, err = d.generatedDirClientWithDFS().SetAccessControlRecursiveCreateRequest(ctx, mode, listOptions) + err = exported.ConvertToDFSError(err) + } + if err != nil { + return generated.PathClientSetAccessControlRecursiveResponse{}, err + } + resp, err := d.generatedDirClientWithDFS().InternalClient().Pipeline().Do(req) + err = exported.ConvertToDFSError(err) + if err != nil { + return generated.PathClientSetAccessControlRecursiveResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return generated.PathClientSetAccessControlRecursiveResponse{}, runtime.NewResponseError(resp) + } + newResp, err := d.generatedDirClientWithDFS().SetAccessControlRecursiveHandleResponse(resp) + return newResp, exported.ConvertToDFSError(err) + }, + }) + +} + +func (d *Client) setAccessControlRecursiveHelper(ctx context.Context, mode generated.PathSetAccessControlRecursiveMode, listOptions *generated.PathClientSetAccessControlRecursiveOptions, options *SetAccessControlRecursiveOptions) (SetAccessControlRecursiveResponse, error) { + pager := d.setAccessControlPager(mode, listOptions) + counter := *options.MaxBatches + continueOnFailure := listOptions.ForceFlag + totalSuccessfulDirs := int32(0) + totalSuccessfulFiles := int32(0) + totalFailureCount := int32(0) + finalResponse := SetAccessControlRecursiveResponse{ + DirectoriesSuccessful: &totalSuccessfulDirs, + FilesSuccessful: &totalSuccessfulFiles, + FailureCount: &totalFailureCount, + FailedEntries: []*ACLFailedEntry{}, + } + for pager.More() && counter != 0 { + resp, err := pager.NextPage(ctx) + if err != nil { + return finalResponse, exported.ConvertToDFSError(err) + } + finalResponse.DirectoriesSuccessful = to.Ptr(*finalResponse.DirectoriesSuccessful + *resp.DirectoriesSuccessful) + finalResponse.FilesSuccessful = to.Ptr(*finalResponse.FilesSuccessful + *resp.FilesSuccessful) + finalResponse.FailureCount = to.Ptr(*finalResponse.FailureCount + *resp.FailureCount) + finalResponse.FailedEntries = append(finalResponse.FailedEntries, resp.FailedEntries...) + counter = counter - 1 + if !*continueOnFailure && *resp.FailureCount > 0 { + return finalResponse, exported.ConvertToDFSError(err) + } + } + return finalResponse, nil +} + +// SetAccessControlRecursive sets the owner, owning group, and permissions for a directory. +func (d *Client) SetAccessControlRecursive(ctx context.Context, ACL string, options *SetAccessControlRecursiveOptions) (SetAccessControlRecursiveResponse, error) { + if options == nil { + options = &SetAccessControlRecursiveOptions{} + } + mode, listOptions := options.format(ACL, "set") + return d.setAccessControlRecursiveHelper(ctx, mode, listOptions, options) +} + +// UpdateAccessControlRecursive updates the owner, owning group, and permissions for a directory. +func (d *Client) UpdateAccessControlRecursive(ctx context.Context, ACL string, options *UpdateAccessControlRecursiveOptions) (SetAccessControlRecursiveResponse, error) { + if options == nil { + options = &UpdateAccessControlRecursiveOptions{} + } + mode, listOptions := options.format(ACL, "modify") + return d.setAccessControlRecursiveHelper(ctx, mode, listOptions, options) +} + +// RemoveAccessControlRecursive removes the owner, owning group, and permissions for a directory. +func (d *Client) RemoveAccessControlRecursive(ctx context.Context, ACL string, options *RemoveAccessControlRecursiveOptions) (SetAccessControlRecursiveResponse, error) { + if options == nil { + options = &RemoveAccessControlRecursiveOptions{} + } + mode, listOptions := options.format(ACL, "remove") + return d.setAccessControlRecursiveHelper(ctx, mode, listOptions, options) +} + +// GetAccessControl gets the owner, owning group, and permissions for a directory. +func (d *Client) GetAccessControl(ctx context.Context, options *GetAccessControlOptions) (GetAccessControlResponse, error) { + opts, lac, mac := path.FormatGetAccessControlOptions(options) + resp, err := d.generatedDirClientWithDFS().GetProperties(ctx, opts, lac, mac) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// SetMetadata sets the metadata for a directory. +func (d *Client) SetMetadata(ctx context.Context, metadata map[string]*string, options *SetMetadataOptions) (SetMetadataResponse, error) { + opts := path.FormatSetMetadataOptions(options) + resp, err := d.blobClient().SetMetadata(ctx, metadata, opts) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// SetHTTPHeaders sets the HTTP headers for a directory. +func (d *Client) SetHTTPHeaders(ctx context.Context, httpHeaders HTTPHeaders, options *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { + opts, blobHTTPHeaders := path.FormatSetHTTPHeadersOptions(options, httpHeaders) + resp, err := d.blobClient().SetHTTPHeaders(ctx, blobHTTPHeaders, opts) + newResp := SetHTTPHeadersResponse{} + path.FormatSetHTTPHeadersResponse(&newResp, &resp) + err = exported.ConvertToDFSError(err) + return newResp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at directory. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (d *Client) GetSASURL(permissions sas.DirectoryPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if d.sharedKey() == nil { + return "", datalakeerror.MissingSharedKeyCredential + } + + urlParts, err := sas.ParseURL(d.BlobURL()) + err = exported.ConvertToDFSError(err) + if err != nil { + return "", err + } + + st := path.FormatGetSASURLOptions(o) + + qps, err := sas.DatalakeSignatureValues{ + DirectoryPath: urlParts.PathName, + FileSystemName: urlParts.FileSystemName, + Version: sas.Version, + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(d.sharedKey()) + + err = exported.ConvertToDFSError(err) + if err != nil { + return "", err + } + + endpoint := d.BlobURL() + "?" + qps.Encode() + + return endpoint, nil +} + +// TODO: Undelete() +// TODO: ListPaths() diff --git a/sdk/storage/azdatalake/directory/client_test.go b/sdk/storage/azdatalake/directory/client_test.go new file mode 100644 index 000000000000..9ff90a56b9d4 --- /dev/null +++ b/sdk/storage/azdatalake/directory/client_test.go @@ -0,0 +1,2445 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory_test + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "testing" + "time" +) + +var proposedLeaseIDs = []*string{to.Ptr("c820a799-76d7-4ee2-6e15-546f19325c2c"), to.Ptr("326cc5e1-746e-4af8-4811-a50e6629a8ca")} + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running datalake Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &RecordedTestSuite{}) + suite.Run(t, &UnrecordedTestSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &RecordedTestSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &RecordedTestSuite{}) + } +} + +func (s *RecordedTestSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(s.T(), suite, test) +} + +func (s *RecordedTestSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(s.T(), suite, test) +} + +func (s *UnrecordedTestSuite) BeforeTest(suite string, test string) { + +} + +func (s *UnrecordedTestSuite) AfterTest(suite string, test string) { + +} + +type RecordedTestSuite struct { + suite.Suite +} + +type UnrecordedTestSuite struct { + suite.Suite +} + +func (s *UnrecordedTestSuite) TestCreateDirAndDeleteWithConnectionString() { + + _require := require.New(s.T()) + testName := s.T().Name() + + connectionString, _ := testcommon.GetGenericConnectionString(testcommon.TestAccountDatalake) + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := directory.NewClientFromConnectionString(*connectionString, dirName, filesystemName, nil) + + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestBlobURLAndDFSURL() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _require.Contains(dirClient.DFSURL(), ".dfs.core.windows.net/"+filesystemName+"/"+dirName) + _require.Contains(dirClient.BlobURL(), ".blob.core.windows.net/"+filesystemName+"/"+dirName) +} + +func (s *RecordedTestSuite) TestCreateDirAndDelete() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestGetAndCreateFileClient() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + fileClient, err := dirClient.NewFileClient(testcommon.GenerateFileName(testName)) + _require.Nil(err) + _require.NotNil(fileClient) + + _, err = fileClient.Create(context.Background(), nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestCreateDirWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + createDirOpts := &directory.CreateOptions{ + AccessConditions: nil, + } + + resp, err = dirClient.Create(context.Background(), createDirOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateDirIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + DirName := testcommon.GenerateDirName(testName) + DirClient, err := testcommon.GetDirClient(filesystemName, DirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, DirClient) + + resp, err := DirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + createDirOpts := &directory.CreateOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + resp, err = DirClient.Create(context.Background(), createDirOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateDirIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + DirName := testcommon.GenerateDirName(testName) + DirClient, err := testcommon.GetDirClient(filesystemName, DirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, DirClient) + + resp, err := DirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + createDirOpts := &directory.CreateOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + resp, err = DirClient.Create(context.Background(), createDirOpts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestCreateDirIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + createDirOpts := &directory.CreateOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + + resp, err = dirClient.Create(context.Background(), createDirOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateDirIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + createDirOpts := &directory.CreateOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + + resp, err = dirClient.Create(context.Background(), createDirOpts) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestCreateDirIfETagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + createDirOpts := &directory.CreateOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfMatch: etag, + }, + }, + } + + resp, err = dirClient.Create(context.Background(), createDirOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateDirIfETagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + createDirOpts := &directory.CreateOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfNoneMatch: etag, + }, + }, + } + + resp, err = dirClient.Create(context.Background(), createDirOpts) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestCreateDirWithNilHTTPHeaders() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createDirOpts := &directory.CreateOptions{ + HTTPHeaders: nil, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), createDirOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateDirWithHTTPHeaders() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createDirOpts := &directory.CreateOptions{ + HTTPHeaders: &testcommon.BasicHeaders, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), createDirOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateDirWithLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createFileOpts := &directory.CreateOptions{ + ProposedLeaseID: proposedLeaseIDs[0], + LeaseDuration: to.Ptr(int64(15)), + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) + + // should fail since leased + _, err = dirClient.Create(context.Background(), createFileOpts) + _require.NotNil(err) + + time.Sleep(time.Second * 15) + resp, err = dirClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateDirWithPermissions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + perms := "0777" + umask := "0000" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createFileOpts := &directory.CreateOptions{ + Permissions: &perms, + Umask: &umask, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) + + resp2, err := dirClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp2) + _require.Equal("rwxrwxrwx", *resp2.Permissions) +} + +func (s *RecordedTestSuite) TestCreateDirWithOwnerGroupACLUmask() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + umask := "0000" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createFileOpts := &directory.CreateOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + Umask: &umask, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) + +} + +func (s *RecordedTestSuite) TestDeleteDirWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + + deleteOpts := &directory.DeleteOptions{ + AccessConditions: nil, + } + + resp, err := dirClient.Delete(context.Background(), deleteOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestDeleteDirIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + deleteOpts := &directory.DeleteOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + resp1, err := dirClient.Delete(context.Background(), deleteOpts) + _require.Nil(err) + _require.NotNil(resp1) +} + +func (s *RecordedTestSuite) TestDeleteDirIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + deleteOpts := &directory.DeleteOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + _, err = dirClient.Delete(context.Background(), deleteOpts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDeleteDirIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + deleteOpts := &directory.DeleteOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + + _, err = dirClient.Delete(context.Background(), deleteOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestDeleteDirIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + deleteOpts := &directory.DeleteOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + + _, err = dirClient.Delete(context.Background(), deleteOpts) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDeleteDirIfETagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + deleteOpts := &directory.DeleteOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfMatch: etag, + }, + }, + } + + _, err = dirClient.Delete(context.Background(), deleteOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestDeleteDirIfETagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + deleteOpts := &directory.DeleteOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfNoneMatch: etag, + }, + }, + } + + _, err = dirClient.Delete(context.Background(), deleteOpts) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlNil() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, err = dirClient.SetAccessControl(context.Background(), nil) + _require.NotNil(err) + + _require.Equal(err, datalakeerror.MissingParameters) +} + +// TODO: write test that fails if you provide permissions and acls +func (s *RecordedTestSuite) TestDirSetAccessControl() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + opts := &directory.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, err = dirClient.SetAccessControl(context.Background(), opts) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + opts := &directory.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: nil, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, err = dirClient.SetAccessControl(context.Background(), opts) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + opts := &directory.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + _, err = dirClient.SetAccessControl(context.Background(), opts) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + opts := &directory.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + _, err = dirClient.SetAccessControl(context.Background(), opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + opts := &directory.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }} + + _, err = dirClient.SetAccessControl(context.Background(), opts) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + opts := &directory.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + + _, err = dirClient.SetAccessControl(context.Background(), opts) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlIfETagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + etag := resp.ETag + + opts := &directory.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfMatch: etag, + }, + }, + } + + _, err = dirClient.SetAccessControl(context.Background(), opts) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlIfETagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + opts := &directory.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfNoneMatch: etag, + }, + }} + + _, err = dirClient.SetAccessControl(context.Background(), opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDirGetAccessControl() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &directory.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + getACLResp, err := dirClient.GetAccessControl(context.Background(), nil) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *UnrecordedTestSuite) TestDirGetAccessControlWithSAS() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &directory.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + // Adding SAS and options + permissions := sas.DirectoryPermissions{ + Read: true, + Add: true, + Write: true, + Create: true, + Delete: true, + } + expiry := time.Now().Add(time.Hour) + + sasURL, err := dirClient.GetSASURL(permissions, expiry, nil) + _require.Nil(err) + + dirClient2, _ := directory.NewClientWithNoCredential(sasURL, nil) + + getACLResp, err := dirClient2.GetAccessControl(context.Background(), nil) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *UnrecordedTestSuite) TestDeleteWithSAS() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + // Adding SAS and options + permissions := sas.DirectoryPermissions{ + Read: true, + Add: true, + Write: true, + Create: true, + Delete: true, + } + expiry := time.Now().Add(time.Hour) + + sasURL, err := dirClient.GetSASURL(permissions, expiry, nil) + _require.Nil(err) + + dirClient2, _ := directory.NewClientWithNoCredential(sasURL, nil) + + _, err = dirClient2.Delete(context.Background(), nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestDirGetAccessControlWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &directory.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + opts := &directory.GetAccessControlOptions{ + AccessConditions: nil, + } + + getACLResp, err := dirClient.GetAccessControl(context.Background(), opts) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestDirGetAccessControlIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &directory.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + opts := &directory.GetAccessControlOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + getACLResp, err := dirClient.GetAccessControl(context.Background(), opts) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestDirGetAccessControlIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &directory.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + opts := &directory.GetAccessControlOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + _, err = dirClient.GetAccessControl(context.Background(), opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDirGetAccessControlIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &directory.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + opts := &directory.GetAccessControlOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }} + + getACLResp, err := dirClient.GetAccessControl(context.Background(), opts) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestDirGetAccessControlIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &directory.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + opts := &directory.GetAccessControlOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + + _, err = dirClient.GetAccessControl(context.Background(), opts) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDirGetAccessControlIfETagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &directory.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + etag := resp.ETag + + opts := &directory.GetAccessControlOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfMatch: etag, + }, + }, + } + + getACLResp, err := dirClient.GetAccessControl(context.Background(), opts) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestDirGetAccessControlIfETagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &directory.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + opts := &directory.GetAccessControlOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfNoneMatch: etag, + }, + }} + + _, err = dirClient.GetAccessControl(context.Background(), opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +///===================================================================== + +func (s *RecordedTestSuite) TestDirSetAccessControlRecursive() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + resp1, err := dirClient.SetAccessControlRecursive(context.Background(), acl, nil) + _require.Nil(err) + + _require.Equal(resp1.DirectoriesSuccessful, to.Ptr(int32(1))) + _require.Equal(resp1.FilesSuccessful, to.Ptr(int32(0))) + _require.Equal(resp1.FailureCount, to.Ptr(int32(0))) + + getACLResp, err := dirClient.GetAccessControl(context.Background(), nil) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlRecursiveWithBadContinuation() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + opts := &directory.SetAccessControlRecursiveOptions{Marker: to.Ptr("garbage")} + resp1, err := dirClient.SetAccessControlRecursive(context.Background(), acl, opts) + _require.Nil(err) + + _require.Equal(resp1.DirectoriesSuccessful, to.Ptr(int32(0))) + _require.Equal(resp1.FilesSuccessful, to.Ptr(int32(0))) + _require.Equal(resp1.FailureCount, to.Ptr(int32(0))) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlRecursiveWithEmptyOpts() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + opts := &directory.SetAccessControlRecursiveOptions{} + resp1, err := dirClient.SetAccessControlRecursive(context.Background(), acl, opts) + _require.Nil(err) + + _require.Equal(resp1.DirectoriesSuccessful, to.Ptr(int32(1))) + _require.Equal(resp1.FilesSuccessful, to.Ptr(int32(0))) + _require.Equal(resp1.FailureCount, to.Ptr(int32(0))) + + getACLResp, err := dirClient.GetAccessControl(context.Background(), nil) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlRecursiveWithMaxResults() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + resp1, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp1) + + fileClient, err := dirClient.NewFileClient(testcommon.GenerateFileName(testName)) + _require.Nil(err) + _require.NotNil(fileClient) + + _, err = fileClient.Create(context.Background(), nil) + _require.Nil(err) + + fileClient1, err := dirClient.NewFileClient(testcommon.GenerateFileName(testName + "1")) + _require.Nil(err) + _require.NotNil(fileClient1) + + _, err = fileClient1.Create(context.Background(), nil) + _require.Nil(err) + + opts := &directory.SetAccessControlRecursiveOptions{BatchSize: to.Ptr(int32(2)), MaxBatches: to.Ptr(int32(1)), ContinueOnFailure: to.Ptr(true), Marker: nil} + resp2, err := dirClient.SetAccessControlRecursive(context.Background(), acl, opts) + _require.Nil(err) + + // we expect only one file to have been updated not both since our batch size is 2 and max batches is 1 + _require.Equal(resp2.DirectoriesSuccessful, to.Ptr(int32(1))) + _require.Equal(resp2.FilesSuccessful, to.Ptr(int32(1))) + _require.Equal(resp2.FailureCount, to.Ptr(int32(0))) + + getACLResp, err := dirClient.GetAccessControl(context.Background(), nil) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlRecursiveWithMaxResults2() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + resp1, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp1) + + fileClient, err := dirClient.NewFileClient(testcommon.GenerateFileName(testName)) + _require.Nil(err) + _require.NotNil(fileClient) + + _, err = fileClient.Create(context.Background(), nil) + _require.Nil(err) + + fileClient1, err := dirClient.NewFileClient(testcommon.GenerateFileName(testName + "1")) + _require.Nil(err) + _require.NotNil(fileClient1) + + _, err = fileClient1.Create(context.Background(), nil) + _require.Nil(err) + + opts := &directory.SetAccessControlRecursiveOptions{ContinueOnFailure: to.Ptr(true), Marker: nil} + resp2, err := dirClient.SetAccessControlRecursive(context.Background(), acl, opts) + _require.Nil(err) + + // we expect only one file to have been updated not both since our batch size is 2 and max batches is 1 + _require.Equal(resp2.DirectoriesSuccessful, to.Ptr(int32(1))) + _require.Equal(resp2.FilesSuccessful, to.Ptr(int32(2))) + _require.Equal(resp2.FailureCount, to.Ptr(int32(0))) + + getACLResp, err := dirClient.GetAccessControl(context.Background(), nil) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestDirSetAccessControlRecursiveWithMaxResults3() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + resp1, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp1) + + fileClient, err := dirClient.NewFileClient(testcommon.GenerateFileName(testName)) + _require.Nil(err) + _require.NotNil(fileClient) + + _, err = fileClient.Create(context.Background(), nil) + _require.Nil(err) + + fileClient1, err := dirClient.NewFileClient(testcommon.GenerateFileName(testName + "1")) + _require.Nil(err) + _require.NotNil(fileClient1) + + _, err = fileClient1.Create(context.Background(), nil) + _require.Nil(err) + + opts := &directory.SetAccessControlRecursiveOptions{BatchSize: to.Ptr(int32(1)), ContinueOnFailure: to.Ptr(true), Marker: nil} + resp2, err := dirClient.SetAccessControlRecursive(context.Background(), acl, opts) + _require.Nil(err) + + // we expect only one file to have been updated not both since our batch size is 2 and max batches is 1 + _require.Equal(resp2.DirectoriesSuccessful, to.Ptr(int32(1))) + _require.Equal(resp2.FilesSuccessful, to.Ptr(int32(2))) + _require.Equal(resp2.FailureCount, to.Ptr(int32(0))) + + getACLResp, err := dirClient.GetAccessControl(context.Background(), nil) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestDirUpdateAccessControlRecursive() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + acl1 := "user::rwx,group::r--,other::r--" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &directory.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + resp1, err := dirClient.UpdateAccessControlRecursive(context.Background(), acl1, nil) + _require.Nil(err) + _require.Equal(resp1.DirectoriesSuccessful, to.Ptr(int32(1))) + + resp2, err := dirClient.GetAccessControl(context.Background(), nil) + _require.Nil(err) + _require.Equal(acl1, *resp2.ACL) + +} + +func (s *RecordedTestSuite) TestDirRemoveAccessControlRecursive() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "mask," + "default:user,default:group," + + "user:ec3595d6-2c17-4696-8caa-7e139758d24a,group:ec3595d6-2c17-4696-8caa-7e139758d24a," + + "default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,default:group:ec3595d6-2c17-4696-8caa-7e139758d24a" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + resp1, err := dirClient.RemoveAccessControlRecursive(context.Background(), acl, nil) + _require.Nil(err) + _require.Equal(resp1.DirectoriesSuccessful, to.Ptr(int32(1))) +} + +func (s *RecordedTestSuite) TestDirSetMetadataWithBasicMetadata() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, err = dirClient.SetMetadata(context.Background(), testcommon.BasicMetadata, nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestDirSetMetadataWithAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteDir(context.Background(), _require, dirClient) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + opts := &directory.SetMetadataOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + _, err = dirClient.SetMetadata(context.Background(), testcommon.BasicMetadata, opts) + _require.Nil(err) +} + +func validatePropertiesSet(_require *require.Assertions, dirClient *directory.Client, disposition string) { + resp, err := dirClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.Equal(*resp.ContentDisposition, disposition) +} + +func (s *RecordedTestSuite) TestDirSetHTTPHeaders() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, err = dirClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, nil) + _require.Nil(err) + validatePropertiesSet(_require, dirClient, *testcommon.BasicHeaders.ContentDisposition) +} + +func (s *RecordedTestSuite) TestDirSetHTTPHeadersWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + opts := &directory.SetHTTPHeadersOptions{ + AccessConditions: nil, + } + + _, err = dirClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.Nil(err) + validatePropertiesSet(_require, dirClient, *testcommon.BasicHeaders.ContentDisposition) +} + +func (s *RecordedTestSuite) TestDirSetHTTPHeadersIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + opts := &directory.SetHTTPHeadersOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + _, err = dirClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.Nil(err) + validatePropertiesSet(_require, dirClient, *testcommon.BasicHeaders.ContentDisposition) +} + +func (s *RecordedTestSuite) TestDirSetHTTPHeadersIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + opts := &directory.SetHTTPHeadersOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + _, err = dirClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDirSetHTTPHeadersIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + opts := &directory.SetHTTPHeadersOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + _, err = dirClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.Nil(err) + validatePropertiesSet(_require, dirClient, *testcommon.BasicHeaders.ContentDisposition) +} + +func (s *RecordedTestSuite) TestDirSetHTTPHeadersIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + opts := &directory.SetHTTPHeadersOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + _, err = dirClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDirSetHTTPHeadersIfETagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + opts := &directory.SetHTTPHeadersOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfMatch: etag, + }, + }} + _, err = dirClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.Nil(err) + validatePropertiesSet(_require, dirClient, *testcommon.BasicHeaders.ContentDisposition) +} + +func (s *RecordedTestSuite) TestDirSetHTTPHeadersIfETagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + opts := &directory.SetHTTPHeadersOptions{ + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfNoneMatch: etag, + }, + }, + } + _, err = dirClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDirRenameNoOptions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + //resp1, err := dirClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = dirClient.Rename(context.Background(), "newName", nil) + _require.Nil(err) + //_require.NotNil(resp1) + //_require.Contains(resp1.NewDirectoryClient.DFSURL(), "newName") +} + +func (s *RecordedTestSuite) TestRenameDirWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + renameFileOpts := &directory.RenameOptions{ + AccessConditions: nil, + } + + //resp1, err := dirClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = dirClient.Rename(context.Background(), "newName", renameFileOpts) + _require.Nil(err) + //_require.NotNil(resp1) + //_require.Contains(resp1.NewDirectoryClient.DFSURL(), "newName") +} + +func (s *RecordedTestSuite) TestRenameDirIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + renameFileOpts := &directory.RenameOptions{ + SourceAccessConditions: &directory.SourceAccessConditions{ + SourceModifiedAccessConditions: &directory.SourceModifiedAccessConditions{ + SourceIfModifiedSince: ¤tTime, + }, + }, + } + //resp1, err := dirClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = dirClient.Rename(context.Background(), "newName", renameFileOpts) + _require.Nil(err) + //_require.NotNil(resp1) + //_require.Contains(resp1.NewDirectoryClient.DFSURL(), "newName") +} + +func (s *RecordedTestSuite) TestRenameDirIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + renameFileOpts := &directory.RenameOptions{ + SourceAccessConditions: &directory.SourceAccessConditions{ + SourceModifiedAccessConditions: &directory.SourceModifiedAccessConditions{ + SourceIfModifiedSince: ¤tTime, + }, + }, + } + + //_, err = dirClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = dirClient.Rename(context.Background(), "newName", renameFileOpts) + + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.SourceConditionNotMet) +} + +func (s *RecordedTestSuite) TestRenameDirIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + renameFileOpts := &directory.RenameOptions{ + SourceAccessConditions: &directory.SourceAccessConditions{ + SourceModifiedAccessConditions: &directory.SourceModifiedAccessConditions{ + SourceIfUnmodifiedSince: ¤tTime, + }, + }, + } + + //resp1, err := dirClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = dirClient.Rename(context.Background(), "newName", renameFileOpts) + _require.Nil(err) + //_require.NotNil(resp1) + //_require.Contains(resp1.NewDirectoryClient.DFSURL(), "newName") +} + +func (s *RecordedTestSuite) TestRenameDirIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + renameFileOpts := &directory.RenameOptions{ + SourceAccessConditions: &directory.SourceAccessConditions{ + SourceModifiedAccessConditions: &directory.SourceModifiedAccessConditions{ + SourceIfUnmodifiedSince: ¤tTime, + }, + }, + } + + //_, err = dirClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = dirClient.Rename(context.Background(), "newName", renameFileOpts) + + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.SourceConditionNotMet) +} + +func (s *RecordedTestSuite) TestRenameDirIfETagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + renameFileOpts := &directory.RenameOptions{ + SourceAccessConditions: &directory.SourceAccessConditions{ + SourceModifiedAccessConditions: &directory.SourceModifiedAccessConditions{ + SourceIfMatch: etag, + }, + }, + } + + //resp1, err := dirClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = dirClient.Rename(context.Background(), "newName", renameFileOpts) + _require.Nil(err) + //_require.NotNil(resp1) + //_require.Contains(resp1.NewDirectoryClient.DFSURL(), "newName") +} + +func (s *RecordedTestSuite) TestRenameDirIfETagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + dirName := testcommon.GenerateDirName(testName) + dirClient, err := testcommon.GetDirClient(filesystemName, dirName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + renameFileOpts := &directory.RenameOptions{ + SourceAccessConditions: &directory.SourceAccessConditions{ + SourceModifiedAccessConditions: &directory.SourceModifiedAccessConditions{ + SourceIfNoneMatch: etag, + }, + }, + } + + //_, err = dirClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = dirClient.Rename(context.Background(), "newName", renameFileOpts) + + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.SourceConditionNotMet) +} + +// TODO: more tests for acls diff --git a/sdk/storage/azdatalake/directory/constants.go b/sdk/storage/azdatalake/directory/constants.go new file mode 100644 index 000000000000..25bfba6e776d --- /dev/null +++ b/sdk/storage/azdatalake/directory/constants.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/path" +) + +// EncryptionAlgorithmType defines values for EncryptionAlgorithmType. +type EncryptionAlgorithmType = path.EncryptionAlgorithmType + +const ( + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = path.EncryptionAlgorithmTypeNone + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = path.EncryptionAlgorithmTypeAES256 +) diff --git a/sdk/storage/azdatalake/directory/examples_test.go b/sdk/storage/azdatalake/directory/examples_test.go new file mode 100644 index 000000000000..627317f8a62f --- /dev/null +++ b/sdk/storage/azdatalake/directory/examples_test.go @@ -0,0 +1,166 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/directory" + "log" + "os" + "time" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +// make sure you create the filesystem before running this example +func Example_directory_CreateAndDelete() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a directory client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/dir1", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + dirClient, err := directory.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + _, err = dirClient.Create(context.Background(), nil) + handleError(err) + + _, err = dirClient.Delete(context.Background(), nil) + handleError(err) +} + +// This examples shows how to set a directory HTTP Headers, how to read, and how to update the directory's HTTP headers. +func Example_directory_HTTPHeaders() { + // make sure you create the filesystem and directory before running this example + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a dir client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/dir1", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + dirClient, err := directory.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + // Create a directory with HTTP headers + _, err = dirClient.SetHTTPHeaders(context.TODO(), directory.HTTPHeaders{ + ContentType: to.Ptr("text/html; charset=utf-8"), + ContentDisposition: to.Ptr("attachment"), + }, nil) + handleError(err) + + get, err := dirClient.GetProperties(context.TODO(), nil) + handleError(err) + + fmt.Println(get.ContentType) + fmt.Println(get.ContentDisposition) +} + +// make sure you create the filesystem and directory before running this example +func Example_dir_Client_SetMetadata() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + // Create a dir client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/dir1", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + dirClient, err := directory.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + _, err = dirClient.SetMetadata(context.TODO(), map[string]*string{"author": to.Ptr("Tamer")}, nil) + handleError(err) + + // Query the directory's properties and metadata + get, err := dirClient.GetProperties(context.TODO(), nil) + handleError(err) + + // Show the directory's metadata + if get.Metadata == nil { + log.Fatal("No metadata returned") + } + + for k, v := range get.Metadata { + fmt.Print(k + "=" + *v + "\n") + } +} + +// make sure you create the filesystem before running this example +func Example_directory_Rename() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a directory client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/dir1", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + dirClient, err := directory.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + _, err = dirClient.Create(context.Background(), nil) + handleError(err) + + _, err = dirClient.Rename(context.Background(), "renameDir", nil) + handleError(err) +} + +// for this example make sure to create paths within the dir so you can recursively set the ACL on them +func Example_directory_SetACLRecursive() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a directory client + acl := "user::rwx,group::r-x,other::rwx" + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/dir1", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + dirClient, err := directory.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + _, err = dirClient.SetAccessControlRecursive(context.Background(), acl, &directory.SetAccessControlRecursiveOptions{ + BatchSize: to.Ptr(int32(2)), MaxBatches: to.Ptr(int32(1)), ContinueOnFailure: to.Ptr(true), Marker: nil}) + handleError(err) +} + +func getRelativeTimeFromAnchor(anchorTime *time.Time, amount time.Duration) time.Time { + return anchorTime.Add(amount * time.Second) +} + +// make sure you create the filesystem before running this example +func Example_directory_SetAccessControlIfUnmodifiedSinceTrue() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a directory client + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/dir1", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + dirClient, err := directory.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + resp, err := dirClient.Create(context.Background(), nil) + handleError(err) + + currentTime := getRelativeTimeFromAnchor(resp.Date, 10) + opts := &directory.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }} + + _, err = dirClient.SetAccessControl(context.Background(), opts) + handleError(err) +} diff --git a/sdk/storage/azdatalake/directory/models.go b/sdk/storage/azdatalake/directory/models.go new file mode 100644 index 000000000000..954ee67c481d --- /dev/null +++ b/sdk/storage/azdatalake/directory/models.go @@ -0,0 +1,172 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/path" +) + +// CreateOptions contains the optional parameters when calling the Create operation. +type CreateOptions struct { + // AccessConditions contains parameters for accessing the file. + AccessConditions *AccessConditions + // CPKInfo contains a group of parameters for client provided encryption key. + CPKInfo *CPKInfo + // HTTPHeaders contains the HTTP headers for path operations. + HTTPHeaders *HTTPHeaders + // LeaseDuration specifies the duration of the lease, in seconds, or negative one + // (-1) for a lease that never expires. A non-infinite lease can be + // between 15 and 60 seconds. + LeaseDuration *int64 + // ProposedLeaseID specifies the proposed lease ID for the file. + ProposedLeaseID *string + // Permissions is the octal representation of the permissions for user, group and mask. + Permissions *string + // Umask is the umask for the file. + Umask *string + // Owner is the owner of the file. + Owner *string + // Group is the owning group of the file. + Group *string + // ACL is the access control list for the file. + ACL *string +} + +func (o *CreateOptions) format() (*generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, *generated.PathHTTPHeaders, *generated.PathClientCreateOptions, *generated.CPKInfo) { + resource := generated.PathResourceTypeDirectory + createOpts := &generated.PathClientCreateOptions{ + Resource: &resource, + } + if o == nil { + return nil, nil, nil, createOpts, nil + } + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) + createOpts.ACL = o.ACL + createOpts.Group = o.Group + createOpts.Owner = o.Owner + createOpts.Umask = o.Umask + createOpts.Permissions = o.Permissions + createOpts.ProposedLeaseID = o.ProposedLeaseID + createOpts.LeaseDuration = o.LeaseDuration + + var httpHeaders *generated.PathHTTPHeaders + var cpkOpts *generated.CPKInfo + + if o.HTTPHeaders != nil { + httpHeaders = path.FormatPathHTTPHeaders(o.HTTPHeaders) + } + if o.CPKInfo != nil { + cpkOpts = &generated.CPKInfo{ + EncryptionAlgorithm: o.CPKInfo.EncryptionAlgorithm, + EncryptionKey: o.CPKInfo.EncryptionKey, + EncryptionKeySHA256: o.CPKInfo.EncryptionKeySHA256, + } + } + return leaseAccessConditions, modifiedAccessConditions, httpHeaders, createOpts, cpkOpts +} + +// ===================================== PATH IMPORTS =========================================== + +// SetAccessControlRecursiveOptions contains the optional parameters when calling the SetAccessControlRecursive operation. +type accessControlRecursiveOptions struct { + // Max batch size is 2000 paths + BatchSize *int32 + // Number of recursive calls to be made to set access control. + MaxBatches *int32 + // ContinueOnFailure indicates whether to continue on failure when the operation encounters an error. + ContinueOnFailure *bool + // Marker is the continuation token to use when continuing the operation. + Marker *string +} + +func (o *accessControlRecursiveOptions) format(ACL, mode string) (generated.PathSetAccessControlRecursiveMode, *generated.PathClientSetAccessControlRecursiveOptions) { + defaultMaxBatches := to.Ptr(int32(-1)) + defaultForceFlag := to.Ptr(false) + opts := &generated.PathClientSetAccessControlRecursiveOptions{ + ACL: &ACL, + } + newMode := generated.PathSetAccessControlRecursiveMode(mode) + if o.ContinueOnFailure == nil { + opts.ForceFlag = defaultForceFlag + } else { + opts.ForceFlag = o.ContinueOnFailure + } + opts.Continuation = o.Marker + opts.MaxRecords = o.BatchSize + if o.MaxBatches == nil { + o.MaxBatches = defaultMaxBatches + } + return newMode, opts +} + +// SetAccessControlRecursiveOptions contains the optional parameters when calling the UpdateAccessControlRecursive operation. +type SetAccessControlRecursiveOptions = accessControlRecursiveOptions + +// UpdateAccessControlRecursiveOptions contains the optional parameters when calling the UpdateAccessControlRecursive operation. +type UpdateAccessControlRecursiveOptions = accessControlRecursiveOptions + +// RemoveAccessControlRecursiveOptions contains the optional parameters when calling the RemoveAccessControlRecursive operation. +type RemoveAccessControlRecursiveOptions = accessControlRecursiveOptions + +// ================================= path imports ================================== + +// DeleteOptions contains the optional parameters when calling the Delete operation. +type DeleteOptions = path.DeleteOptions + +// RenameOptions contains the optional parameters when calling the Rename operation. +type RenameOptions = path.RenameOptions + +// GetPropertiesOptions contains the optional parameters for the GetProperties method. +type GetPropertiesOptions = path.GetPropertiesOptions + +// SetAccessControlOptions contains the optional parameters when calling the SetAccessControl operation. +type SetAccessControlOptions = path.SetAccessControlOptions + +// GetAccessControlOptions contains the optional parameters when calling the GetAccessControl operation. +type GetAccessControlOptions = path.GetAccessControlOptions + +// CPKInfo contains a group of parameters for client provided encryption key. +type CPKInfo = path.CPKInfo + +// GetSASURLOptions contains the optional parameters for the GetSASURL method. +type GetSASURLOptions = path.GetSASURLOptions + +// SetHTTPHeadersOptions contains the optional parameters for the SetHTTPHeaders method. +type SetHTTPHeadersOptions = path.SetHTTPHeadersOptions + +// HTTPHeaders contains the HTTP headers for path operations. +type HTTPHeaders = path.HTTPHeaders + +// SetMetadataOptions provides set of configurations for SetMetadata. +type SetMetadataOptions = path.SetMetadataOptions + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = path.SharedKeyCredential + +// AccessConditions identifies path-specific access conditions which you optionally set. +type AccessConditions = path.AccessConditions + +// SourceAccessConditions identifies source path-specific access conditions which you optionally set. +type SourceAccessConditions = path.SourceAccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = path.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = path.ModifiedAccessConditions + +// SourceModifiedAccessConditions contains a group of parameters for specifying source access conditions. +type SourceModifiedAccessConditions = path.SourceModifiedAccessConditions + +// CPKScopeInfo contains a group of parameters for client provided encryption scope. +type CPKScopeInfo path.CPKScopeInfo + +// ACLFailedEntry contains the failed ACL entry (response model). +type ACLFailedEntry = path.ACLFailedEntry diff --git a/sdk/storage/azdatalake/directory/responses.go b/sdk/storage/azdatalake/directory/responses.go new file mode 100644 index 000000000000..0a3cd654da49 --- /dev/null +++ b/sdk/storage/azdatalake/directory/responses.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/path" +) + +// RenameResponse contains the response fields for the Rename operation. +type RenameResponse = path.RenameResponse + +type setAccessControlRecursiveResponse struct { + DirectoriesSuccessful *int32 + FailureCount *int32 + FilesSuccessful *int32 + FailedEntries []*ACLFailedEntry +} + +// SetAccessControlRecursiveResponse contains the response fields for the SetAccessControlRecursive operation. +type SetAccessControlRecursiveResponse = setAccessControlRecursiveResponse + +// UpdateAccessControlRecursiveResponse contains the response fields for the UpdateAccessControlRecursive operation. +type UpdateAccessControlRecursiveResponse = setAccessControlRecursiveResponse + +// RemoveAccessControlRecursiveResponse contains the response fields for the RemoveAccessControlRecursive operation. +type RemoveAccessControlRecursiveResponse = setAccessControlRecursiveResponse + +// ========================================== path imports =========================================================== + +// SetAccessControlResponse contains the response fields for the SetAccessControl operation. +type SetAccessControlResponse = path.SetAccessControlResponse + +// SetHTTPHeadersResponse contains the response from method Client.SetHTTPHeaders. +type SetHTTPHeadersResponse = path.SetHTTPHeadersResponse + +// GetAccessControlResponse contains the response fields for the GetAccessControl operation. +type GetAccessControlResponse = path.GetAccessControlResponse + +// GetPropertiesResponse contains the response fields for the GetProperties operation. +type GetPropertiesResponse = path.GetPropertiesResponse + +// SetMetadataResponse contains the response fields for the SetMetadata operation. +type SetMetadataResponse = path.SetMetadataResponse + +// CreateResponse contains the response fields for the Create operation. +type CreateResponse = path.CreateResponse + +// DeleteResponse contains the response fields for the Delete operation. +type DeleteResponse = path.DeleteResponse diff --git a/sdk/storage/azdatalake/doc.go b/sdk/storage/azdatalake/doc.go new file mode 100644 index 000000000000..50b93e555d49 --- /dev/null +++ b/sdk/storage/azdatalake/doc.go @@ -0,0 +1,218 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +/* + +Package azdatalake can access an Azure Data Lake Service Gen2 (ADLS Gen2). + +The azdatalake package is capable of :- + - Creating, deleting, and querying filesystems in an account + - Creating, deleting, and querying files and directories in a filesystem + - Creating Shared Access Signature for authentication + +Types of Resources + +The azdatalake package allows you to interact with three types of resources :- + +* Azure storage accounts. +* filesystems within those storage accounts. +* files and directories within those filesystems. + +ADLS Gen2 client library for Go allows you to interact with each of these components through the use of a dedicated client object. +To create a client object, you will need the account's ADLS Gen2 service endpoint URL and a credential that allows you to access the account. + +Types of Credentials + +The clients support different forms of authentication. +The azdatalake library supports any of the `azcore.TokenCredential` interfaces, authorization via a Connection String, +or authorization with a Shared Access Signature token. + +Using a Shared Key + +To use an account shared key (aka account key or access key), provide the key as a string. +This can be found in your storage account in the Azure Portal under the "Access Keys" section. + +Use the key as the credential parameter to authenticate the client: + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.dfs.core.windows.net/", accountName) + + cred, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + serviceClient, err := service.NewClientWithSharedKey(serviceURL, cred, nil) + handle(err) + + // get the underlying dfs endpoint + fmt.Println(serviceClient.DFSURL()) + + // get the underlying blob endpoint + fmt.Println(serviceClient.BlobURL()) + +Using a Connection String + +Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately. +To do this, pass the connection string to the service client's `NewClientFromConnectionString` method. +The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section. + + connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" + serviceClient, err := service.NewClientFromConnectionString(connStr, nil) + +Using a Shared Access Signature (SAS) Token + +To use a shared access signature (SAS) token, provide the token at the end of your service URL. +You can generate a SAS token from the Azure Portal under Shared Access Signature or use the ServiceClient.GetSASToken() functions. + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + serviceURL := fmt.Sprintf("https://%s.dfs.core.windows.net/", accountName) + + cred, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handle(err) + serviceClient, err := service.NewClientWithSharedKey(serviceURL, cred, nil) + handle(err) + fmt.Println(serviceClient.DFSURL()) + + // Alternatively, you can create SAS on the fly + + resources := sas.AccountResourceTypes{Service: true} + permission := sas.AccountPermissions{Read: true} + start := time.Now() + expiry := start.AddDate(0, 0, 1) + serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, start, expiry) + handle(err) + + serviceClientWithSAS, err := service.NewClientWithNoCredential(serviceURLWithSAS, nil) + handle(err) + + fmt.Println(serviceClientWithSAS.DFSURL()) + fmt.Println(serviceClientWithSAS.BlobURL()) + +Types of Clients + +There are three different clients provided to interact with the various components of the ADLS Gen2 Service: + +1. **`ServiceClient`** + * Get and set account settings. + * Query, create, list and delete filesystems within the account. + +2. **`FileSystemClient`** + * Get and set filesystem access settings, properties, and metadata. + * Create, delete, and query files/directories within the filesystem. + * `FileSystemLeaseClient` to support filesystem lease management. + +3. **`FileClient`** + * Get and set file properties. + * Perform CRUD operations on a given file. + * Set ACLs on a given file. + * `PathLeaseClient` to support file lease management. + +4. **`DirectoryClient`** + * Get and set directory properties. + * Perform CRUD operations on a given directory. + * Set ACLs on a given directory and recursively on all subdirectories and files. + * `PathLeaseClient` to support directory lease management. + +Examples + + // Your account name and key can be obtained from the Azure Portal. + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + cred, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + // The service URL for dfs endpoints is usually in the form: http(s)://.dfs.core.windows.net/ + serviceClient, err := service.NewClientWithSharedKey(fmt.Sprintf("https://%s.dfs.core.windows.net/", accountName), cred, nil) + handle(err) + + // ===== 1. Create a filesystem ===== + + // First, create a filesystem client, and use the Create method to create a new filesystem in your account + fsClient, err := serviceClient.NewFileSystemClient("testfs") + handle(err) + + // All APIs have an options' bag struct as a parameter. + // The options' bag struct allows you to specify optional parameters such as metadata, public access types, etc. + // If you want to use the default options, pass in nil. + _, err = fsClient.Create(context.TODO(), nil) + handle(err) + + // ===== 2. Upload and Download a file ===== + uploadData := "Hello world!" + + // Create a new file from the fsClient + fileClient, err := fsClient.NewFileClient("HelloWorld.txt") + handle(err) + + + _, err = fileClient.UploadStream(context.TODO(), streaming.NopCloser(strings.NewReader(uploadData)), nil) + handle(err) + + // Download the file's contents and ensure that the download worked properly + fileDownloadResponse, err := fileClient.DownloadStream(context.TODO(), nil) + handle(err) + + // Use the bytes.Buffer object to read the downloaded data. + // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. + reader := fileDownloadResponse.Body(nil) + downloadData, err := io.ReadAll(reader) + handle(err) + if string(downloadData) != uploadData { + handle(errors.New("Uploaded data should be same as downloaded data")) + } + + if err = reader.Close(); err != nil { + handle(err) + return + } + + // ===== 3. List paths ===== + // List methods returns a pager object which can be used to iterate over the results of a paging operation. + // To iterate over a page use the NextPage(context.Context) to fetch the next page of results. + // PageResponse() can be used to iterate over the results of the specific page. + // Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results. + + pager := fsClient.NewListPathsPager(nil) + page, err := pager.NextPage(context.TODO()) + handle(err) + + // print the path names for this page + for _, path := range page.PathList.Paths { + fmt.Println(*path.Name) + fmt.Println(*path.IsDirectory) + } + + // Delete the file. + _, err = fileClient.Delete(context.TODO(), nil) + handle(err) + + // Delete the filesystem. + _, err = fsClient.Delete(context.TODO(), nil) + handle(err) +*/ + +package azdatalake diff --git a/sdk/storage/azdatalake/file/chunkwriting.go b/sdk/storage/azdatalake/file/chunkwriting.go new file mode 100644 index 000000000000..289b042d1646 --- /dev/null +++ b/sdk/storage/azdatalake/file/chunkwriting.go @@ -0,0 +1,193 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "bytes" + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "io" + "sync" +) + +// chunkWriter provides methods to upload chunks that represent a file to a server. +// This allows us to provide a local implementation that fakes the server for hermetic testing. +type chunkWriter interface { + AppendData(context.Context, int64, io.ReadSeekCloser, *AppendDataOptions) (AppendDataResponse, error) + FlushData(context.Context, int64, *FlushDataOptions) (FlushDataResponse, error) +} + +// bufferManager provides an abstraction for the management of buffers. +// this is mostly for testing purposes, but does allow for different implementations without changing the algorithm. +type bufferManager[T ~[]byte] interface { + // Acquire returns the channel that contains the pool of buffers. + Acquire() <-chan T + + // Release releases the buffer back to the pool for reuse/cleanup. + Release(T) + + // Grow grows the number of buffers, up to the predefined max. + // It returns the total number of buffers or an error. + // No error is returned if the number of buffers has reached max. + // This is called only from the reading goroutine. + Grow() (int, error) + + // Free cleans up all buffers. + Free() +} + +// copyFromReader copies a source io.Reader to file storage using concurrent uploads. +func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst chunkWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) bufferManager[T]) error { + options.setDefaults() + actualSize := int64(0) + wg := sync.WaitGroup{} // Used to know when all outgoing chunks have finished processing + errCh := make(chan error, 1) // contains the first error encountered during processing + var err error + + buffers := getBufferManager(int(options.Concurrency), options.ChunkSize) + defer buffers.Free() + + // this controls the lifetime of the uploading goroutines. + // if an error is encountered, cancel() is called which will terminate all uploads. + // NOTE: the ordering is important here. cancel MUST execute before + // cleaning up the buffers so that any uploading goroutines exit first, + // releasing their buffers back to the pool for cleanup. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // This goroutine grabs a buffer, reads from the stream into the buffer, + // then creates a goroutine to upload/stage the chunk. + for chunkNum := uint32(0); true; chunkNum++ { + var buffer T + select { + case buffer = <-buffers.Acquire(): + // got a buffer + default: + // no buffer available; allocate a new buffer if possible + if _, err := buffers.Grow(); err != nil { + return err + } + + // either grab the newly allocated buffer or wait for one to become available + buffer = <-buffers.Acquire() + } + + var n int + n, err = io.ReadFull(src, buffer) + + if n > 0 { + // some data was read, upload it + wg.Add(1) // We're posting a buffer to be sent + + // NOTE: we must pass chunkNum as an arg to our goroutine else + // it's captured by reference and can change underneath us! + go func(chunkNum uint32) { + // Upload the outgoing chunk, matching the number of bytes read + offset := int64(chunkNum) * options.ChunkSize + appendDataOpts := options.getAppendDataOptions() + actualSize += int64(len(buffer[:n])) + _, err := dst.AppendData(ctx, offset, streaming.NopCloser(bytes.NewReader(buffer[:n])), appendDataOpts) + if err != nil { + select { + case errCh <- err: + // error was set + default: + // some other error is already set + } + cancel() + } + buffers.Release(buffer) // The goroutine reading from the stream can reuse this buffer now + + // signal that the chunk has been staged. + // we MUST do this after attempting to write to errCh + // to avoid it racing with the reading goroutine. + wg.Done() + }(chunkNum) + } else { + // nothing was read so the buffer is empty, send it back for reuse/clean-up. + buffers.Release(buffer) + } + + if err != nil { // The reader is done, no more outgoing buffers + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + // these are expected errors, we don't surface those + err = nil + } else { + // some other error happened, terminate any outstanding uploads + cancel() + } + break + } + } + + wg.Wait() // Wait for all outgoing chunks to complete + + if err != nil { + // there was an error reading from src, favor this error over any error during staging + return err + } + + select { + case err = <-errCh: + // there was an error during staging + return err + default: + // no error was encountered + } + + // All chunks uploaded, return nil error + flushOpts := options.getFlushDataOptions() + _, err = dst.FlushData(ctx, actualSize, flushOpts) + return err +} + +// mmbPool implements the bufferManager interface. +// it uses anonymous memory mapped files for buffers. +// don't use this type directly, use newMMBPool() instead. +type mmbPool struct { + buffers chan mmb + count int + max int + size int64 +} + +func newMMBPool(maxBuffers int, bufferSize int64) bufferManager[mmb] { + return &mmbPool{ + buffers: make(chan mmb, maxBuffers), + max: maxBuffers, + size: bufferSize, + } +} + +func (pool *mmbPool) Acquire() <-chan mmb { + return pool.buffers +} + +func (pool *mmbPool) Grow() (int, error) { + if pool.count < pool.max { + buffer, err := newMMB(pool.size) + if err != nil { + return 0, err + } + pool.buffers <- buffer + pool.count++ + } + return pool.count, nil +} + +func (pool *mmbPool) Release(buffer mmb) { + pool.buffers <- buffer +} + +func (pool *mmbPool) Free() { + for i := 0; i < pool.count; i++ { + buffer := <-pool.buffers + buffer.delete() + } + pool.count = 0 +} diff --git a/sdk/storage/azdatalake/file/client.go b/sdk/storage/azdatalake/file/client.go new file mode 100644 index 000000000000..113750ae05e9 --- /dev/null +++ b/sdk/storage/azdatalake/file/client.go @@ -0,0 +1,527 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "net/url" + "os" + "reflect" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated_blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/path" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure Datalake Storage service. +type Client base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client] + +// NewClient creates an instance of Client with the specified values. +// - fileURL - the URL of the blob e.g. https://.dfs.core.windows.net/fs/file.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(fileURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + blobURL, fileURL := shared.GetURLs(fileURL) + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + } + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.FileClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + blobClientOpts := blockblob.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobClient, _ := blockblob.NewClient(blobURL, cred, &blobClientOpts) + fileClient := base.NewPathClient(fileURL, blobURL, blobClient, azClient, nil, &cred, (*base.ClientOptions)(conOptions)) + + return (*Client)(fileClient), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - fileURL - the URL of the storage account e.g. https://.dfs.core.windows.net/fs/file.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(fileURL string, options *ClientOptions) (*Client, error) { + blobURL, fileURL := shared.GetURLs(fileURL) + + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{} + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.FileClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + blobClientOpts := blockblob.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobClient, _ := blockblob.NewClientWithNoCredential(blobURL, &blobClientOpts) + fileClient := base.NewPathClient(fileURL, blobURL, blobClient, azClient, nil, nil, (*base.ClientOptions)(conOptions)) + + return (*Client)(fileClient), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - fileURL - the URL of the storage account e.g. https://.dfs.core.windows.net/fs/file.txt +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(fileURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + blobURL, fileURL := shared.GetURLs(fileURL) + + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + } + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.FileClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + blobClientOpts := blockblob.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobSharedKey, err := exported.ConvertToBlobSharedKey(cred) + if err != nil { + return nil, err + } + blobClient, _ := blockblob.NewClientWithSharedKeyCredential(blobURL, blobSharedKey, &blobClientOpts) + fileClient := base.NewPathClient(fileURL, blobURL, blobClient, azClient, cred, nil, (*base.ClientOptions)(conOptions)) + + return (*Client)(fileClient), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, filePath, fsName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + filePath = strings.ReplaceAll(filePath, "\\", "/") + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, fsName, filePath) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (f *Client) generatedFileClientWithDFS() *generated.PathClient { + //base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) + dirClientWithDFS, _, _ := base.InnerClients((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(f)) + return dirClientWithDFS +} + +func (f *Client) generatedFileClientWithBlob() *generated_blob.BlobClient { + _, dirClientWithBlob, _ := base.InnerClients((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(f)) + return dirClientWithBlob +} + +func (f *Client) blobClient() *blockblob.Client { + _, _, blobClient := base.InnerClients((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(f)) + return blobClient +} + +func (f *Client) sharedKey() *exported.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(f)) +} + +func (f *Client) identityCredential() *azcore.TokenCredential { + return base.IdentityCredentialComposite((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(f)) +} + +func (f *Client) getClientOptions() *base.ClientOptions { + return base.GetCompositeClientOptions((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(f)) +} + +// DFSURL returns the URL endpoint used by the Client object. +func (f *Client) DFSURL() string { + return f.generatedFileClientWithDFS().Endpoint() +} + +// BlobURL returns the URL endpoint used by the Client object. +func (f *Client) BlobURL() string { + return f.generatedFileClientWithBlob().Endpoint() +} + +// Create creates a new file. +func (f *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { + lac, mac, httpHeaders, createOpts, cpkOpts := options.format() + resp, err := f.generatedFileClientWithDFS().Create(ctx, createOpts, httpHeaders, lac, mac, nil, cpkOpts) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// Delete deletes a file. +func (f *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + lac, mac, deleteOpts := path.FormatDeleteOptions(options, false) + resp, err := f.generatedFileClientWithDFS().Delete(ctx, deleteOpts, lac, mac) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// GetProperties gets the properties of a file. +func (f *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts := path.FormatGetPropertiesOptions(options) + var respFromCtx *http.Response + ctxWithResp := runtime.WithCaptureResponse(ctx, &respFromCtx) + resp, err := f.blobClient().GetProperties(ctxWithResp, opts) + newResp := path.FormatGetPropertiesResponse(&resp, respFromCtx) + err = exported.ConvertToDFSError(err) + return newResp, err +} + +func (f *Client) renamePathInURL(newName string) (string, string, string) { + endpoint := f.DFSURL() + separator := "/" + // Find the index of the last occurrence of the separator + lastIndex := strings.LastIndex(endpoint, separator) + // Split the string based on the last occurrence of the separator + firstPart := endpoint[:lastIndex] // From the beginning of the string to the last occurrence of the separator + newBlobURL, newPathURL := shared.GetURLs(runtime.JoinPaths(firstPart, newName)) + oldURL, _ := url.Parse(f.DFSURL()) + return oldURL.Path, newPathURL, newBlobURL +} + +// Rename renames a file. The original file will no longer exist and the client will be stale. +func (f *Client) Rename(ctx context.Context, newName string, options *RenameOptions) (RenameResponse, error) { + oldPathWithoutURL, newPathURL, newBlobURL := f.renamePathInURL(newName) + lac, mac, smac, createOpts := path.FormatRenameOptions(options, oldPathWithoutURL) + var newBlobClient *blockblob.Client + var err error + if f.identityCredential() != nil { + newBlobClient, err = blockblob.NewClient(newBlobURL, *f.identityCredential(), nil) + } else if f.sharedKey() != nil { + blobSharedKey, _ := exported.ConvertToBlobSharedKey(f.sharedKey()) + newBlobClient, err = blockblob.NewClientWithSharedKeyCredential(newBlobURL, blobSharedKey, nil) + } else { + newBlobClient, err = blockblob.NewClientWithNoCredential(newBlobURL, nil) + } + if err != nil { + return RenameResponse{}, exported.ConvertToDFSError(err) + } + newFileClient := (*Client)(base.NewPathClient(newPathURL, newBlobURL, newBlobClient, f.generatedFileClientWithDFS().InternalClient().WithClientName(shared.FileClient), f.sharedKey(), f.identityCredential(), f.getClientOptions())) + resp, err := newFileClient.generatedFileClientWithDFS().Create(ctx, createOpts, nil, lac, mac, smac, nil) + //return RenameResponse{ + // Response: resp, + // NewFileClient: newFileClient, + //}, exported.ConvertToDFSError(err) + return path.FormatRenameResponse(&resp), exported.ConvertToDFSError(err) +} + +// SetExpiry operation sets an expiry time on an existing file (blob2). +func (f *Client) SetExpiry(ctx context.Context, expiryValues SetExpiryValues, o *SetExpiryOptions) (SetExpiryResponse, error) { + if reflect.ValueOf(expiryValues).IsZero() { + expiryValues.ExpiryType = SetExpiryTypeNeverExpire + } + opts := &generated_blob.BlobClientSetExpiryOptions{} + if expiryValues.ExpiryType != SetExpiryTypeNeverExpire { + opts.ExpiresOn = &expiryValues.ExpiresOn + } + resp, err := f.generatedFileClientWithBlob().SetExpiry(ctx, expiryValues.ExpiryType, opts) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// SetAccessControl sets the owner, owning group, and permissions for a file. +func (f *Client) SetAccessControl(ctx context.Context, options *SetAccessControlOptions) (SetAccessControlResponse, error) { + opts, lac, mac, err := path.FormatSetAccessControlOptions(options) + if err != nil { + return SetAccessControlResponse{}, err + } + resp, err := f.generatedFileClientWithDFS().SetAccessControl(ctx, opts, lac, mac) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// UpdateAccessControl updates the owner, owning group, and permissions for a file. +func (f *Client) UpdateAccessControl(ctx context.Context, ACL string, options *UpdateAccessControlOptions) (UpdateAccessControlResponse, error) { + opts, mode := options.format(ACL) + resp, err := f.generatedFileClientWithDFS().SetAccessControlRecursive(ctx, mode, opts) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// GetAccessControl gets the owner, owning group, and permissions for a file. +func (f *Client) GetAccessControl(ctx context.Context, options *GetAccessControlOptions) (GetAccessControlResponse, error) { + opts, lac, mac := path.FormatGetAccessControlOptions(options) + resp, err := f.generatedFileClientWithDFS().GetProperties(ctx, opts, lac, mac) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// RemoveAccessControl removes the owner, owning group, and permissions for a file. +func (f *Client) RemoveAccessControl(ctx context.Context, ACL string, options *RemoveAccessControlOptions) (RemoveAccessControlResponse, error) { + opts, mode := options.format(ACL) + resp, err := f.generatedFileClientWithDFS().SetAccessControlRecursive(ctx, mode, opts) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// SetMetadata sets the metadata for a file. +func (f *Client) SetMetadata(ctx context.Context, metadata map[string]*string, options *SetMetadataOptions) (SetMetadataResponse, error) { + opts := path.FormatSetMetadataOptions(options) + resp, err := f.blobClient().SetMetadata(ctx, metadata, opts) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// SetHTTPHeaders sets the HTTP headers for a file. +func (f *Client) SetHTTPHeaders(ctx context.Context, httpHeaders HTTPHeaders, options *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { + opts, blobHTTPHeaders := path.FormatSetHTTPHeadersOptions(options, httpHeaders) + resp, err := f.blobClient().SetHTTPHeaders(ctx, blobHTTPHeaders, opts) + newResp := SetHTTPHeadersResponse{} + path.FormatSetHTTPHeadersResponse(&newResp, &resp) + err = exported.ConvertToDFSError(err) + return newResp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at file. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (f *Client) GetSASURL(permissions sas.FilePermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if f.sharedKey() == nil { + return "", datalakeerror.MissingSharedKeyCredential + } + + urlParts, err := sas.ParseURL(f.BlobURL()) + err = exported.ConvertToDFSError(err) + if err != nil { + return "", err + } + + st := path.FormatGetSASURLOptions(o) + + qps, err := sas.DatalakeSignatureValues{ + FilePath: urlParts.PathName, + FileSystemName: urlParts.FileSystemName, + Version: sas.Version, + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(f.sharedKey()) + + err = exported.ConvertToDFSError(err) + if err != nil { + return "", err + } + + endpoint := f.BlobURL() + "?" + qps.Encode() + + return endpoint, nil +} + +// AppendData appends data to existing file with a given offset. +func (f *Client) AppendData(ctx context.Context, offset int64, body io.ReadSeekCloser, options *AppendDataOptions) (AppendDataResponse, error) { + appendDataOptions, leaseAccessConditions, cpkInfo, err := options.format(offset, body) + if err != nil { + return AppendDataResponse{}, err + } + resp, err := f.generatedFileClientWithDFS().AppendData(ctx, body, appendDataOptions, nil, leaseAccessConditions, cpkInfo) + // TODO: check and uncomment this + //if err != nil { + // _, err1 := body.Seek(0, io.SeekStart) + // if err1 != nil { + // return AppendDataResponse{}, err1 + // } + //} + return resp, exported.ConvertToDFSError(err) +} + +// FlushData commits appended data to file +func (f *Client) FlushData(ctx context.Context, offset int64, options *FlushDataOptions) (FlushDataResponse, error) { + flushDataOpts, modifiedAccessConditions, leaseAccessConditions, httpHeaderOpts, cpkInfoOpts, err := options.format(offset) + if err != nil { + return FlushDataResponse{}, err + } + + resp, err := f.generatedFileClientWithDFS().FlushData(ctx, flushDataOpts, httpHeaderOpts, leaseAccessConditions, modifiedAccessConditions, cpkInfoOpts) + return resp, exported.ConvertToDFSError(err) +} + +// Concurrent Upload Functions ----------------------------------------------------------------------------------------- + +// uploadFromReader uploads a buffer in chunks to an Azure file. +func (f *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actualSize int64, o *uploadFromReaderOptions) error { + if actualSize > MaxFileSize { + return errors.New("buffer is too large to upload to a file") + } + if o.ChunkSize == 0 { + o.ChunkSize = MaxAppendBytes + } + + if log.Should(exported.EventUpload) { + urlParts, err := azdatalake.ParseURL(f.DFSURL()) + if err == nil { + log.Writef(exported.EventUpload, "file name %s actual size %v chunk-size %v chunk-count %v", + urlParts.PathName, actualSize, o.ChunkSize, ((actualSize-1)/o.ChunkSize)+1) + } + } + + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "uploadFromReader", + TransferSize: actualSize, + ChunkSize: o.ChunkSize, + Concurrency: o.Concurrency, + Operation: func(ctx context.Context, offset int64, chunkSize int64) error { + // This function is called once per file range. + // It is passed this file's offset within the buffer and its count of bytes + // Prepare to read the proper range/section of the buffer + if chunkSize < o.ChunkSize { + // this is the last file range. Its actual size might be less + // than the calculated size due to rounding up of the payload + // size to fit in a whole number of chunks. + chunkSize = actualSize - offset + } + var body io.ReadSeeker = io.NewSectionReader(reader, offset, chunkSize) + if o.Progress != nil { + chunkProgress := int64(0) + body = streaming.NewRequestProgress(streaming.NopCloser(body), + func(bytesTransferred int64) { + diff := bytesTransferred - chunkProgress + chunkProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + uploadRangeOptions := o.getAppendDataOptions() + _, err := f.AppendData(ctx, offset, streaming.NopCloser(body), uploadRangeOptions) + return exported.ConvertToDFSError(err) + }, + }) + + if err != nil { + return exported.ConvertToDFSError(err) + } + // All appends were successful, call to flush + flushOpts := o.getFlushDataOptions() + _, err = f.FlushData(ctx, actualSize, flushOpts) + return exported.ConvertToDFSError(err) +} + +// UploadBuffer uploads a buffer in chunks to a file. +func (f *Client) UploadBuffer(ctx context.Context, buffer []byte, options *UploadBufferOptions) error { + uploadOptions := uploadFromReaderOptions{} + if options != nil { + uploadOptions = *options + } + return exported.ConvertToDFSError(f.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions)) +} + +// UploadFile uploads a file in chunks to a file. +func (f *Client) UploadFile(ctx context.Context, file *os.File, options *UploadFileOptions) error { + stat, err := file.Stat() + if err != nil { + return err + } + uploadOptions := uploadFromReaderOptions{} + if options != nil { + uploadOptions = *options + } + return exported.ConvertToDFSError(f.uploadFromReader(ctx, file, stat.Size(), &uploadOptions)) +} + +// UploadStream copies the file held in io.Reader to the file at fileClient. +// A Context deadline or cancellation will cause this to error. +func (f *Client) UploadStream(ctx context.Context, body io.Reader, options *UploadStreamOptions) error { + if options == nil { + options = &UploadStreamOptions{} + } + + err := copyFromReader(ctx, body, f, *options, newMMBPool) + return exported.ConvertToDFSError(err) +} + +// DownloadStream reads a range of bytes from a file. The response also includes the file's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (f *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) { + if o == nil { + o = &DownloadStreamOptions{} + } + opts := o.format() + resp, err := f.blobClient().DownloadStream(ctx, opts) + newResp := FormatDownloadStreamResponse(&resp) + fullResp := DownloadStreamResponse{ + client: f, + DownloadResponse: newResp, + getInfo: httpGetterInfo{Range: o.Range, ETag: newResp.ETag}, + cpkInfo: o.CPKInfo, + cpkScope: o.CPKScopeInfo, + } + + return fullResp, exported.ConvertToDFSError(err) +} + +// DownloadBuffer downloads an Azure file to a buffer with parallel. +func (f *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadBufferOptions) (int64, error) { + opts := o.format() + val, err := f.blobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), opts) + return val, exported.ConvertToDFSError(err) +} + +// DownloadFile downloads a datalake file to a local file. +// The file would be truncated if the size doesn't match. +func (f *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFileOptions) (int64, error) { + opts := o.format() + val, err := f.blobClient().DownloadFile(ctx, file, opts) + return val, exported.ConvertToDFSError(err) +} + +// TODO: Undelete() diff --git a/sdk/storage/azdatalake/file/client_test.go b/sdk/storage/azdatalake/file/client_test.go new file mode 100644 index 000000000000..23a9f03d4c35 --- /dev/null +++ b/sdk/storage/azdatalake/file/client_test.go @@ -0,0 +1,3888 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file_test + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/binary" + "hash/crc64" + "io" + "math/rand" + "net/http" + "os" + "strconv" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +var proposedLeaseIDs = []*string{to.Ptr("c820a799-76d7-4ee2-6e15-546f19325c2c"), to.Ptr("326cc5e1-746e-4af8-4811-a50e6629a8ca")} + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running datalake Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &RecordedTestSuite{}) + suite.Run(t, &UnrecordedTestSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &RecordedTestSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &RecordedTestSuite{}) + } +} + +func (s *RecordedTestSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(s.T(), suite, test) +} + +func (s *RecordedTestSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(s.T(), suite, test) +} + +func (s *UnrecordedTestSuite) BeforeTest(suite string, test string) { + +} + +func (s *UnrecordedTestSuite) AfterTest(suite string, test string) { + +} + +type RecordedTestSuite struct { + suite.Suite +} + +type UnrecordedTestSuite struct { + suite.Suite +} + +//func validateFileDeleted(_require *require.Assertions, fileClient *file.Client) { +// _, err := fileClient.GetAccessControl(context.Background(), nil) +// _require.NotNil(err) +// +// testcommon.ValidateErrorCode(_require, err, datalakeerror.PathNotFound) +//} + +func (s *UnrecordedTestSuite) TestCreateFileAndDeleteWithConnectionString() { + + _require := require.New(s.T()) + testName := s.T().Name() + + connectionString, _ := testcommon.GetGenericConnectionString(testcommon.TestAccountDatalake) + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateDirName(testName) + fileClient, err := file.NewClientFromConnectionString(*connectionString, fileName, filesystemName, nil) + + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fileClient) + + resp, err := fileClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateFileAndDelete() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateFileWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + createFileOpts := &file.CreateOptions{ + AccessConditions: nil, + } + + resp, err = fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateFileIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + createFileOpts := &file.CreateOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + resp, err = fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateFileIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + createFileOpts := &file.CreateOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + resp, err = fClient.Create(context.Background(), createFileOpts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestCreateFileIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + createFileOpts := &file.CreateOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + + resp, err = fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateFileIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + createFileOpts := &file.CreateOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + + resp, err = fClient.Create(context.Background(), createFileOpts) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestCreateFileIfETagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + createFileOpts := &file.CreateOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfMatch: etag, + }, + }, + } + + resp, err = fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateFileIfETagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + createFileOpts := &file.CreateOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfNoneMatch: etag, + }, + }, + } + + resp, err = fClient.Create(context.Background(), createFileOpts) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestCreateFileWithNilHTTPHeaders() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createFileOpts := &file.CreateOptions{ + HTTPHeaders: nil, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + resp, err := fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *UnrecordedTestSuite) TestCreateFileWithExpiryAbsolute() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + expiryTimeAbsolute := time.Now().Add(8 * time.Second) + createFileOpts := &file.CreateOptions{ + Expiry: file.CreateExpiryValues{ + ExpiryType: file.CreateExpiryTypeAbsolute, + ExpiresOn: time.Now().Add(8 * time.Second).UTC().Format(http.TimeFormat), + }, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + resp, err := fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) + + resp1, err := fClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp1.ExpiresOn) + _require.Equal(expiryTimeAbsolute.UTC().Format(http.TimeFormat), (*resp1.ExpiresOn).UTC().Format(http.TimeFormat)) +} + +func (s *RecordedTestSuite) TestCreateFileWithExpiryNever() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createFileOpts := &file.CreateOptions{ + Expiry: file.CreateExpiryValues{ + ExpiryType: file.CreateExpiryTypeNeverExpire, + }, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) + + _, err = fClient.Delete(context.Background(), nil) + _require.Nil(err) + +} + +func (s *RecordedTestSuite) TestCreateFileWithExpiryRelativeToNow() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createFileOpts := &file.CreateOptions{ + Expiry: file.CreateExpiryValues{ + ExpiryType: file.CreateExpiryTypeRelativeToNow, + ExpiresOn: strconv.FormatInt((8 * time.Second).Milliseconds(), 10), + }, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) + + resp1, err := fClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp1.ExpiresOn) + + time.Sleep(time.Second * 10) + _, err = fClient.GetProperties(context.Background(), nil) + testcommon.ValidateErrorCode(_require, err, datalakeerror.PathNotFound) +} + +func (s *RecordedTestSuite) TestCreateFileWithNeverExpire() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createFileOpts := &file.CreateOptions{ + Expiry: file.CreateExpiryValues{ + ExpiryType: file.CreateExpiryTypeNeverExpire, + }, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) + + resp1, err := fClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.Nil(resp1.ExpiresOn) +} + +func (s *RecordedTestSuite) TestCreateFileWithLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createFileOpts := &file.CreateOptions{ + ProposedLeaseID: proposedLeaseIDs[0], + LeaseDuration: to.Ptr(int64(15)), + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) + + // should fail since leased + _, err = fClient.Create(context.Background(), createFileOpts) + _require.NotNil(err) + + time.Sleep(time.Second * 15) + resp, err = fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestCreateFileWithPermissions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + perms := "0777" + umask := "0000" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createFileOpts := &file.CreateOptions{ + Permissions: &perms, + Umask: &umask, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) + + resp2, err := fClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp2) + _require.Equal("rwxrwxrwx", *resp2.Permissions) +} + +func (s *RecordedTestSuite) TestCreateFileWithOwnerGroupACLUmask() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + umask := "0000" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createFileOpts := &file.CreateOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + Umask: &umask, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) + +} + +func (s *RecordedTestSuite) TestDeleteFileWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fClient.Create(context.Background(), nil) + _require.Nil(err) + + deleteOpts := &file.DeleteOptions{ + AccessConditions: nil, + } + + resp, err := fClient.Delete(context.Background(), deleteOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestDeleteFileIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + deleteOpts := &file.DeleteOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + resp1, err := fClient.Delete(context.Background(), deleteOpts) + _require.Nil(err) + _require.NotNil(resp1) +} + +func (s *RecordedTestSuite) TestDeleteFileIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + deleteOpts := &file.DeleteOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + _, err = fClient.Delete(context.Background(), deleteOpts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDeleteFileIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + deleteOpts := &file.DeleteOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + + _, err = fClient.Delete(context.Background(), deleteOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestDeleteFileIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + deleteOpts := &file.DeleteOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + + _, err = fClient.Delete(context.Background(), deleteOpts) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestDeleteFileIfETagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + deleteOpts := &file.DeleteOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfMatch: etag, + }, + }, + } + + _, err = fClient.Delete(context.Background(), deleteOpts) + _require.Nil(err) + _require.NotNil(resp) +} + +func (s *RecordedTestSuite) TestDeleteFileIfETagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + deleteOpts := &file.DeleteOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfNoneMatch: etag, + }, + }, + } + + _, err = fClient.Delete(context.Background(), deleteOpts) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFileSetAccessControlNil() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, err = fClient.SetAccessControl(context.Background(), nil) + _require.NotNil(err) + + _require.Equal(err, datalakeerror.MissingParameters) +} + +// TODO: write test that fails if you provide permissions and acls +func (s *RecordedTestSuite) TestFileSetAccessControl() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + opts := &file.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, err = fClient.SetAccessControl(context.Background(), opts) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFileSetAccessControlWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + opts := &file.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: nil, + } + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, err = fClient.SetAccessControl(context.Background(), opts) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFileSetAccessControlIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + opts := &file.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + _, err = fClient.SetAccessControl(context.Background(), opts) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFileSetAccessControlIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + opts := &file.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + _, err = fClient.SetAccessControl(context.Background(), opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFileSetAccessControlIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + opts := &file.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }} + + _, err = fClient.SetAccessControl(context.Background(), opts) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFileSetAccessControlIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + opts := &file.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + + _, err = fClient.SetAccessControl(context.Background(), opts) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFileSetAccessControlIfETagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + etag := resp.ETag + + opts := &file.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfMatch: etag, + }, + }, + } + + _, err = fClient.SetAccessControl(context.Background(), opts) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFileSetAccessControlIfETagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + opts := &file.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfNoneMatch: etag, + }, + }} + + _, err = fClient.SetAccessControl(context.Background(), opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFileGetAccessControl() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &file.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + getACLResp, err := fClient.GetAccessControl(context.Background(), nil) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *UnrecordedTestSuite) TestFileGetAccessControlWithSAS() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &file.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + // Adding SAS and options + permissions := sas.FilePermissions{ + Read: true, + Add: true, + Write: true, + Create: true, + Delete: true, + } + expiry := time.Now().Add(time.Hour) + + sasURL, err := fClient.GetSASURL(permissions, expiry, nil) + _require.Nil(err) + + fClient2, _ := file.NewClientWithNoCredential(sasURL, nil) + + getACLResp, err := fClient2.GetAccessControl(context.Background(), nil) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *UnrecordedTestSuite) TestFileDeleteWithSAS() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + // Adding SAS and options + permissions := sas.FilePermissions{ + Read: true, + Add: true, + Write: true, + Create: true, + Delete: true, + } + expiry := time.Now().Add(time.Hour) + + sasURL, err := fClient.GetSASURL(permissions, expiry, nil) + _require.Nil(err) + + fClient2, _ := file.NewClientWithNoCredential(sasURL, nil) + + _, err = fClient2.Delete(context.Background(), nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFileGetAccessControlWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &file.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + opts := &file.GetAccessControlOptions{ + AccessConditions: nil, + } + + getACLResp, err := fClient.GetAccessControl(context.Background(), opts) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestFileGetAccessControlIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &file.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + opts := &file.GetAccessControlOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + getACLResp, err := fClient.GetAccessControl(context.Background(), opts) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestFileGetAccessControlIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &file.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + opts := &file.GetAccessControlOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + + _, err = fClient.GetAccessControl(context.Background(), opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFileGetAccessControlIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &file.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + opts := &file.GetAccessControlOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }} + + getACLResp, err := fClient.GetAccessControl(context.Background(), opts) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestFileGetAccessControlIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &file.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + opts := &file.GetAccessControlOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + + _, err = fClient.GetAccessControl(context.Background(), opts) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFileGetAccessControlIfETagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &file.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + etag := resp.ETag + + opts := &file.GetAccessControlOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfMatch: etag, + }, + }, + } + + getACLResp, err := fClient.GetAccessControl(context.Background(), opts) + _require.Nil(err) + _require.Equal(acl, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestFileGetAccessControlIfETagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &file.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + opts := &file.GetAccessControlOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfNoneMatch: etag, + }, + }} + + _, err = fClient.GetAccessControl(context.Background(), opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFileUpdateAccessControl() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "user::rwx,group::r-x,other::rwx" + acl1 := "user::rwx,group::r--,other::r--" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + createOpts := &file.CreateOptions{ + ACL: &acl, + } + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), createOpts) + _require.Nil(err) + _require.NotNil(resp) + + _, err = fClient.UpdateAccessControl(context.Background(), acl1, nil) + _require.Nil(err) + + getACLResp, err := fClient.GetAccessControl(context.Background(), nil) + _require.Nil(err) + _require.Equal(acl1, *getACLResp.ACL) +} + +func (s *RecordedTestSuite) TestFileRemoveAccessControl() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + acl := "mask," + "default:user,default:group," + + "user:ec3595d6-2c17-4696-8caa-7e139758d24a,group:ec3595d6-2c17-4696-8caa-7e139758d24a," + + "default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,default:group:ec3595d6-2c17-4696-8caa-7e139758d24a" + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, err = fClient.RemoveAccessControl(context.Background(), acl, nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFileSetMetadataWithBasicMetadata() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, err = fClient.SetMetadata(context.Background(), testcommon.BasicMetadata, nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFileSetMetadataWithAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + opts := &file.SetMetadataOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + _, err = fClient.SetMetadata(context.Background(), testcommon.BasicMetadata, opts) + _require.Nil(err) +} + +func validatePropertiesSet(_require *require.Assertions, fileClient *file.Client, disposition string) { + resp, err := fileClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.Equal(*resp.ContentDisposition, disposition) +} + +func (s *RecordedTestSuite) TestFileSetHTTPHeaders() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, err = fClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, nil) + _require.Nil(err) + validatePropertiesSet(_require, fClient, *testcommon.BasicHeaders.ContentDisposition) +} + +func (s *RecordedTestSuite) TestFileSetHTTPHeadersWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + opts := &file.SetHTTPHeadersOptions{ + AccessConditions: nil, + } + + _, err = fClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.Nil(err) + validatePropertiesSet(_require, fClient, *testcommon.BasicHeaders.ContentDisposition) +} + +func (s *RecordedTestSuite) TestFileSetHTTPHeadersIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + opts := &file.SetHTTPHeadersOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + _, err = fClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.Nil(err) + validatePropertiesSet(_require, fClient, *testcommon.BasicHeaders.ContentDisposition) +} + +func (s *RecordedTestSuite) TestFileSetHTTPHeadersIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + opts := &file.SetHTTPHeadersOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + _, err = fClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFileSetHTTPHeadersIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + opts := &file.SetHTTPHeadersOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + _, err = fClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.Nil(err) + validatePropertiesSet(_require, fClient, *testcommon.BasicHeaders.ContentDisposition) +} + +func (s *RecordedTestSuite) TestFileSetHTTPHeadersIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + opts := &file.SetHTTPHeadersOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + _, err = fClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFileSetHTTPHeadersIfETagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + opts := &file.SetHTTPHeadersOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfMatch: etag, + }, + }} + _, err = fClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.Nil(err) + validatePropertiesSet(_require, fClient, *testcommon.BasicHeaders.ContentDisposition) +} + +func (s *RecordedTestSuite) TestFileSetHTTPHeadersIfETagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + opts := &file.SetHTTPHeadersOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfNoneMatch: etag, + }, + }, + } + _, err = fClient.SetHTTPHeaders(context.Background(), testcommon.BasicHeaders, opts) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestRenameNoOptions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + //resp1, err := fClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = fClient.Rename(context.Background(), "newName", nil) + _require.Nil(err) + //_require.NotNil(resp1) + //_require.Contains(resp1.NewFileClient.DFSURL(), "newName") +} + +func (s *RecordedTestSuite) TestRenameFileWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + renameFileOpts := &file.RenameOptions{ + AccessConditions: nil, + } + + //resp1, err := fClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = fClient.Rename(context.Background(), "newName", renameFileOpts) + _require.Nil(err) + //_require.NotNil(resp1) + //_require.Contains(resp1.NewFileClient.DFSURL(), "newName") +} + +func (s *RecordedTestSuite) TestRenameFileIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + renameFileOpts := &file.RenameOptions{ + SourceAccessConditions: &file.SourceAccessConditions{ + SourceModifiedAccessConditions: &file.SourceModifiedAccessConditions{ + SourceIfModifiedSince: ¤tTime, + }, + }, + } + //resp1, err := fClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = fClient.Rename(context.Background(), "newName", renameFileOpts) + _require.Nil(err) + //_require.NotNil(resp1) + //_require.Contains(resp1.NewFileClient.DFSURL(), "newName") +} + +func (s *RecordedTestSuite) TestRenameFileIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + renameFileOpts := &file.RenameOptions{ + SourceAccessConditions: &file.SourceAccessConditions{ + SourceModifiedAccessConditions: &file.SourceModifiedAccessConditions{ + SourceIfModifiedSince: ¤tTime, + }, + }, + } + + //_, err = fClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = fClient.Rename(context.Background(), "newName", renameFileOpts) + + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.SourceConditionNotMet) +} + +func (s *RecordedTestSuite) TestRenameFileIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + renameFileOpts := &file.RenameOptions{ + SourceAccessConditions: &file.SourceAccessConditions{ + SourceModifiedAccessConditions: &file.SourceModifiedAccessConditions{ + SourceIfUnmodifiedSince: ¤tTime, + }, + }, + } + + //resp1, err := fClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = fClient.Rename(context.Background(), "newName", renameFileOpts) + _require.Nil(err) + //_require.NotNil(resp1) + //_require.Contains(resp1.NewFileClient.DFSURL(), "newName") +} + +func (s *RecordedTestSuite) TestRenameFileIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + renameFileOpts := &file.RenameOptions{ + SourceAccessConditions: &file.SourceAccessConditions{ + SourceModifiedAccessConditions: &file.SourceModifiedAccessConditions{ + SourceIfUnmodifiedSince: ¤tTime, + }, + }, + } + + //_, err = fClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = fClient.Rename(context.Background(), "newName", renameFileOpts) + + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.SourceConditionNotMet) +} + +func (s *RecordedTestSuite) TestRenameFileIfETagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + renameFileOpts := &file.RenameOptions{ + SourceAccessConditions: &file.SourceAccessConditions{ + SourceModifiedAccessConditions: &file.SourceModifiedAccessConditions{ + SourceIfMatch: etag, + }, + }, + } + + //resp1, err := fClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = fClient.Rename(context.Background(), "newName", renameFileOpts) + _require.Nil(err) + //_require.NotNil(resp1) + //_require.Contains(resp1.NewFileClient.DFSURL(), "newName") +} + +func (s *RecordedTestSuite) TestRenameFileIfETagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + etag := resp.ETag + + renameFileOpts := &file.RenameOptions{ + SourceAccessConditions: &file.SourceAccessConditions{ + SourceModifiedAccessConditions: &file.SourceModifiedAccessConditions{ + SourceIfNoneMatch: etag, + }, + }, + } + + //_, err = fClient.Rename(context.Background(), "newName", renameFileOpts) + _, err = fClient.Rename(context.Background(), "newName", renameFileOpts) + + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.SourceConditionNotMet) +} + +func (s *UnrecordedTestSuite) TestFileUploadDownloadStream() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 100 * 1024 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadStream(context.Background(), streaming.NopCloser(bytes.NewReader(content)), &file.UploadStreamOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + +} + +func (s *RecordedTestSuite) TestFileUploadDownloadSmallStream() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 10 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadStream(context.Background(), streaming.NopCloser(bytes.NewReader(content)), &file.UploadStreamOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) +} + +func (s *RecordedTestSuite) TestFileUploadTinyStream() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 4 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadStream(context.Background(), streaming.NopCloser(bytes.NewReader(content)), &file.UploadStreamOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) +} + +func (s *UnrecordedTestSuite) TestFileUploadFile() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 100 * 1024 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + // create local file + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + err = os.WriteFile("testFile", content, 0644) + _require.NoError(err) + + defer func() { + err = os.Remove("testFile") + _require.NoError(err) + }() + + fh, err := os.Open("testFile") + _require.NoError(err) + + defer func(fh *os.File) { + err := fh.Close() + _require.NoError(err) + }(fh) + + hash := md5.New() + _, err = io.Copy(hash, fh) + _require.NoError(err) + contentMD5 := hash.Sum(nil) + + err = fClient.UploadFile(context.Background(), fh, &file.UploadFileOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) +} + +func (s *RecordedTestSuite) TestSmallFileUploadFile() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 10 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + // create local file + _, content := testcommon.GenerateData(int(fileSize)) + _require.NoError(err) + err = os.WriteFile("testFile", content, 0644) + _require.NoError(err) + + defer func() { + err = os.Remove("testFile") + _require.NoError(err) + }() + + fh, err := os.Open("testFile") + _require.NoError(err) + + defer func(fh *os.File) { + err := fh.Close() + _require.NoError(err) + }(fh) + + hash := md5.New() + _, err = io.Copy(hash, fh) + _require.NoError(err) + contentMD5 := hash.Sum(nil) + + err = fClient.UploadFile(context.Background(), fh, &file.UploadFileOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) +} + +func (s *RecordedTestSuite) TestTinyFileUploadFile() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 10 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + // create local file + _, content := testcommon.GenerateData(int(fileSize)) + _require.NoError(err) + err = os.WriteFile("testFile", content, 0644) + _require.NoError(err) + + defer func() { + err = os.Remove("testFile") + _require.NoError(err) + }() + + fh, err := os.Open("testFile") + _require.NoError(err) + + defer func(fh *os.File) { + err := fh.Close() + _require.NoError(err) + }(fh) + + hash := md5.New() + _, err = io.Copy(hash, fh) + _require.NoError(err) + contentMD5 := hash.Sum(nil) + + err = fClient.UploadFile(context.Background(), fh, &file.UploadFileOptions{ + ChunkSize: 2, + }) + _require.NoError(err) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) +} + +func (s *UnrecordedTestSuite) TestFileUploadBuffer() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 100 * 1024 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) +} + +func (s *RecordedTestSuite) TestFileUploadSmallBuffer() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 10 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) +} + +func (s *RecordedTestSuite) TestFileAppendAndFlushData() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := srcFClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + _, err = srcFClient.AppendData(context.Background(), 0, rsc, nil) + _require.NoError(err) + + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), nil) + _require.NoError(err) + + gResp2, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, int64(contentSize)) +} + +func (s *UnrecordedTestSuite) TestFileAppendAndFlushDataWithValidation() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := srcFClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + content := make([]byte, contentSize) + body := bytes.NewReader(content) + rsc := streaming.NopCloser(body) + contentCRC64 := crc64.Checksum(content, shared.CRC64Table) + + opts := &file.AppendDataOptions{ + TransactionalValidation: file.TransferValidationTypeComputeCRC64(), + } + putResp, err := srcFClient.AppendData(context.Background(), 0, rsc, opts) + _require.Nil(err) + // _require.Equal(putResp.RawResponse.StatusCode, 201) + _require.NotNil(putResp.ContentCRC64) + _require.EqualValues(binary.LittleEndian.Uint64(putResp.ContentCRC64), contentCRC64) + + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), nil) + _require.NoError(err) + + gResp2, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, int64(contentSize)) +} + +func (s *RecordedTestSuite) TestFileAppendAndFlushDataWithEmptyOpts() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := srcFClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + opts := &file.AppendDataOptions{} + opts1 := &file.FlushDataOptions{} + + _, err = srcFClient.AppendData(context.Background(), 0, rsc, opts) + _require.NoError(err) + + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), opts1) + _require.NoError(err) + + gResp2, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, int64(contentSize)) +} + +func (s *RecordedTestSuite) TestFileAppendAndFlushDataWithLeasedFile() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + createFileOpts := &file.CreateOptions{ + ProposedLeaseID: proposedLeaseIDs[0], + LeaseDuration: to.Ptr(int64(15)), + } + resp, err := srcFClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + opts := &file.AppendDataOptions{LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: proposedLeaseIDs[0], + }} + + _, err = srcFClient.AppendData(context.Background(), 0, rsc, nil) + _require.NotNil(err) + + _, err = rsc.Seek(0, io.SeekStart) + _require.NoError(err) + + _, err = srcFClient.AppendData(context.Background(), 0, rsc, opts) + _require.Nil(err) + + opts1 := &file.FlushDataOptions{AccessConditions: &file.AccessConditions{LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: proposedLeaseIDs[0], + }}} + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), opts1) + _require.NoError(err) + + gResp2, err := srcFClient.GetProperties(context.Background(), &file.GetPropertiesOptions{ + AccessConditions: &file.AccessConditions{LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: proposedLeaseIDs[0], + }}}) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, int64(contentSize)) +} + +func (s *RecordedTestSuite) TestFileAppendAndFlushAndDownloadDataWithLeasedFile() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + createFileOpts := &file.CreateOptions{ + ProposedLeaseID: proposedLeaseIDs[0], + LeaseDuration: to.Ptr(int64(15)), + } + resp, err := srcFClient.Create(context.Background(), createFileOpts) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + opts := &file.AppendDataOptions{LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: proposedLeaseIDs[0], + }} + + _, err = srcFClient.AppendData(context.Background(), 0, rsc, opts) + _require.Nil(err) + + _, err = rsc.Seek(0, io.SeekStart) + _require.NoError(err) + + _, err = srcFClient.AppendData(context.Background(), int64(contentSize), rsc, opts) + _require.Nil(err) + + opts1 := &file.FlushDataOptions{AccessConditions: &file.AccessConditions{LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: proposedLeaseIDs[0], + }}} + _, err = srcFClient.FlushData(context.Background(), int64(contentSize)*2, opts1) + _require.NoError(err) + + gResp2, err := srcFClient.GetProperties(context.Background(), &file.GetPropertiesOptions{ + AccessConditions: &file.AccessConditions{LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: proposedLeaseIDs[0], + }}}) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, int64(contentSize)*2) + + destBuffer := make([]byte, contentSize*2) + cnt, err := srcFClient.DownloadBuffer(context.Background(), destBuffer, &file.DownloadBufferOptions{ + ChunkSize: 8 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, int64(contentSize*2)) +} + +func (s *RecordedTestSuite) TestAppendAndFlushFileWithHTTPHeaders() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := srcFClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + opts := &file.FlushDataOptions{HTTPHeaders: &testcommon.BasicHeaders} + + _, err = srcFClient.AppendData(context.Background(), 0, rsc, nil) + _require.NoError(err) + + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), opts) + _require.NoError(err) + + gResp2, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, int64(contentSize)) + validatePropertiesSet(_require, srcFClient, *testcommon.BasicHeaders.ContentDisposition) +} + +func (s *RecordedTestSuite) TestFlushWithNilAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := srcFClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + _, err = srcFClient.AppendData(context.Background(), 0, rsc, nil) + _require.NoError(err) + + opts := &file.FlushDataOptions{ + AccessConditions: nil, + } + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), opts) + _require.NoError(err) + + gResp2, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, int64(contentSize)) +} + +func (s *RecordedTestSuite) TestFlushWithEmptyAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := srcFClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + _, err = srcFClient.AppendData(context.Background(), 0, rsc, nil) + _require.NoError(err) + + opts := &file.FlushDataOptions{ + AccessConditions: &file.AccessConditions{}, + } + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), opts) + _require.NoError(err) + + gResp2, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, int64(contentSize)) +} + +// ========================================== + +func (s *RecordedTestSuite) TestFlushIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := srcFClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + _, err = srcFClient.AppendData(context.Background(), 0, rsc, nil) + _require.NoError(err) + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + opts := &file.FlushDataOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), opts) + _require.NoError(err) + + gResp2, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, int64(contentSize)) +} + +func (s *RecordedTestSuite) TestFlushIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := srcFClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + _, err = srcFClient.AppendData(context.Background(), 0, rsc, nil) + _require.NoError(err) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + opts := &file.FlushDataOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), opts) + _require.NotNil(err) +} + +func (s *RecordedTestSuite) TestFlushIfUnmodifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := srcFClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + _, err = srcFClient.AppendData(context.Background(), 0, rsc, nil) + _require.NoError(err) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + opts := &file.FlushDataOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), opts) + _require.NoError(err) + + gResp2, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, int64(contentSize)) +} + +func (s *RecordedTestSuite) TestFlushIfUnmodifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := srcFClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + _, err = srcFClient.AppendData(context.Background(), 0, rsc, nil) + _require.NoError(err) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + opts := &file.FlushDataOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), opts) + _require.NotNil(err) +} + +func (s *RecordedTestSuite) TestFlushIfEtagMatch() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := srcFClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + resp1, err := srcFClient.AppendData(context.Background(), 0, rsc, nil) + _require.NoError(err) + etag := resp1.ETag + + opts := &file.FlushDataOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfMatch: etag, + }, + }, + } + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), opts) + _require.NoError(err) + + gResp2, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, int64(contentSize)) +} + +func (s *RecordedTestSuite) TestFlushIfEtagMatchFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + srcFileName := "src" + testcommon.GenerateFileName(testName) + + srcFClient, err := testcommon.GetFileClient(filesystemName, srcFileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := srcFClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + contentSize := 1024 * 8 // 8KB + rsc, _ := testcommon.GenerateData(contentSize) + + resp1, err := srcFClient.AppendData(context.Background(), 0, rsc, nil) + _require.NoError(err) + etag := resp1.ETag + + opts := &file.FlushDataOptions{ + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfNoneMatch: etag, + }, + }, + } + _, err = srcFClient.FlushData(context.Background(), int64(contentSize), opts) + _require.NoError(err) + + gResp2, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, int64(contentSize)) +} + +// TODO: test retain uncommitted data + +func (s *UnrecordedTestSuite) TestFileDownloadFile() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 100 * 1024 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + destFileName := "BigFile-downloaded.bin" + destFile, err := os.Create(destFileName) + _require.NoError(err) + defer func(name string) { + err = os.Remove(name) + _require.NoError(err) + }(destFileName) + defer func(destFile *os.File) { + err = destFile.Close() + _require.NoError(err) + }(destFile) + + cnt, err := fClient.DownloadFile(context.Background(), destFile, &file.DownloadFileOptions{ + ChunkSize: 10 * 1024 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + hash := md5.New() + _, err = io.Copy(hash, destFile) + _require.NoError(err) + downloadedContentMD5 := hash.Sum(nil) + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) +} + +func (s *RecordedTestSuite) TestFileUploadDownloadSmallFile() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 10 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + // create local file + _, content := testcommon.GenerateData(int(fileSize)) + srcFileName := "testFileUpload" + err = os.WriteFile(srcFileName, content, 0644) + _require.NoError(err) + defer func() { + err = os.Remove(srcFileName) + _require.NoError(err) + }() + fh, err := os.Open(srcFileName) + _require.NoError(err) + defer func(fh *os.File) { + err := fh.Close() + _require.NoError(err) + }(fh) + + srcHash := md5.New() + _, err = io.Copy(srcHash, fh) + _require.NoError(err) + contentMD5 := srcHash.Sum(nil) + + err = fClient.UploadFile(context.Background(), fh, &file.UploadFileOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + + destFileName := "SmallFile-downloaded.bin" + destFile, err := os.Create(destFileName) + _require.NoError(err) + defer func(name string) { + err = os.Remove(name) + _require.NoError(err) + }(destFileName) + defer func(destFile *os.File) { + err = destFile.Close() + _require.NoError(err) + }(destFile) + + cnt, err := fClient.DownloadFile(context.Background(), destFile, &file.DownloadFileOptions{ + ChunkSize: 2 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + destHash := md5.New() + _, err = io.Copy(destHash, destFile) + _require.NoError(err) + downloadedContentMD5 := destHash.Sum(nil) + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) +} + +func (s *RecordedTestSuite) TestFileUploadDownloadSmallFileWithRange() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 10 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + // create local file + _, content := testcommon.GenerateData(int(fileSize)) + srcFileName := "testFileUpload" + err = os.WriteFile(srcFileName, content, 0644) + _require.NoError(err) + defer func() { + err = os.Remove(srcFileName) + _require.NoError(err) + }() + fh, err := os.Open(srcFileName) + _require.NoError(err) + defer func(fh *os.File) { + err := fh.Close() + _require.NoError(err) + }(fh) + + srcHash := md5.New() + _, err = io.Copy(srcHash, fh) + _require.NoError(err) + contentMD5 := srcHash.Sum(nil) + + err = fClient.UploadFile(context.Background(), fh, &file.UploadFileOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + + destFileName := "SmallFile-downloaded.bin" + destFile, err := os.Create(destFileName) + _require.NoError(err) + defer func(name string) { + err = os.Remove(name) + _require.NoError(err) + }(destFileName) + defer func(destFile *os.File) { + err = destFile.Close() + _require.NoError(err) + }(destFile) + + cnt, err := fClient.DownloadFile(context.Background(), destFile, &file.DownloadFileOptions{ + ChunkSize: 2 * 1024, + Concurrency: 5, + Range: &file.HTTPRange{ + Offset: 0, + Count: 10 * 1024, + }, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + destHash := md5.New() + _, err = io.Copy(destHash, destFile) + _require.NoError(err) + downloadedContentMD5 := destHash.Sum(nil) + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) +} + +func (s *RecordedTestSuite) TestFileUploadDownloadSmallFileWithAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 10 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + // create local file + _, content := testcommon.GenerateData(int(fileSize)) + srcFileName := "testFileUpload" + err = os.WriteFile(srcFileName, content, 0644) + _require.NoError(err) + defer func() { + err = os.Remove(srcFileName) + _require.NoError(err) + }() + fh, err := os.Open(srcFileName) + _require.NoError(err) + defer func(fh *os.File) { + err := fh.Close() + _require.NoError(err) + }(fh) + + srcHash := md5.New() + _, err = io.Copy(srcHash, fh) + _require.NoError(err) + contentMD5 := srcHash.Sum(nil) + + err = fClient.UploadFile(context.Background(), fh, &file.UploadFileOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + + destFileName := "SmallFile-downloaded.bin" + destFile, err := os.Create(destFileName) + _require.NoError(err) + defer func(name string) { + err = os.Remove(name) + _require.NoError(err) + }(destFileName) + defer func(destFile *os.File) { + err = destFile.Close() + _require.NoError(err) + }(destFile) + + cnt, err := fClient.DownloadFile(context.Background(), destFile, &file.DownloadFileOptions{ + ChunkSize: 2 * 1024, + Concurrency: 5, + Range: &file.HTTPRange{ + Offset: 0, + Count: 10 * 1024, + }, + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + destHash := md5.New() + _, err = io.Copy(destHash, destFile) + _require.NoError(err) + downloadedContentMD5 := destHash.Sum(nil) + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) +} + +func (s *RecordedTestSuite) TestFileUploadDownloadWithProgress() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 10 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + bytesUploaded := int64(0) + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + Progress: func(bytesTransferred int64) { + _require.GreaterOrEqual(bytesTransferred, bytesUploaded) + bytesUploaded = bytesTransferred + }, + }) + _require.NoError(err) + _require.Equal(bytesUploaded, fileSize) + + destBuffer := make([]byte, fileSize) + bytesDownloaded := int64(0) + cnt, err := fClient.DownloadBuffer(context.Background(), destBuffer, &file.DownloadBufferOptions{ + ChunkSize: 2 * 1024, + Concurrency: 5, + Progress: func(bytesTransferred int64) { + _require.GreaterOrEqual(bytesTransferred, bytesDownloaded) + bytesDownloaded = bytesTransferred + }, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + _require.Equal(bytesDownloaded, fileSize) + + downloadedMD5Value := md5.Sum(destBuffer) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) +} + +func (s *UnrecordedTestSuite) TestFileDownloadBuffer() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 100 * 1024 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + destBuffer := make([]byte, fileSize) + cnt, err := fClient.DownloadBuffer(context.Background(), destBuffer, &file.DownloadBufferOptions{ + ChunkSize: 10 * 1024 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + downloadedMD5Value := md5.Sum(destBuffer) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) +} + +func (s *RecordedTestSuite) TestFileDownloadSmallBuffer() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 10 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + destBuffer := make([]byte, fileSize) + cnt, err := fClient.DownloadBuffer(context.Background(), destBuffer, &file.DownloadBufferOptions{ + ChunkSize: 10 * 1024 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + downloadedMD5Value := md5.Sum(destBuffer) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) +} + +func (s *RecordedTestSuite) TestFileDownloadSmallBufferWithHTTPRange() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 10 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content[0 : fileSize/2]) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + destBuffer := make([]byte, fileSize/2) + cnt, err := fClient.DownloadBuffer(context.Background(), destBuffer, &file.DownloadBufferOptions{ + ChunkSize: 10 * 1024 * 1024, + Concurrency: 5, + Range: &file.HTTPRange{ + Offset: 0, + Count: 5 * 1024, + }, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize/2) + + downloadedMD5Value := md5.Sum(destBuffer) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) +} + +func (s *RecordedTestSuite) TestFileDownloadSmallBufferWithAccessConditions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var fileSize int64 = 10 * 1024 + fileName := testcommon.GenerateFileName(testName) + fClient, err := testcommon.GetFileClient(filesystemName, fileName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fClient.Create(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content[0 : fileSize/2]) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + destBuffer := make([]byte, fileSize/2) + cnt, err := fClient.DownloadBuffer(context.Background(), destBuffer, &file.DownloadBufferOptions{ + ChunkSize: 10 * 1024 * 1024, + Concurrency: 5, + AccessConditions: &file.AccessConditions{ + ModifiedAccessConditions: &file.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + Range: &file.HTTPRange{ + Offset: 0, + Count: 5 * 1024, + }, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize/2) + + downloadedMD5Value := md5.Sum(destBuffer) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) +} + +// TODO tests all uploads/downloads with other opts diff --git a/sdk/storage/azdatalake/file/constants.go b/sdk/storage/azdatalake/file/constants.go new file mode 100644 index 000000000000..77729179d3d4 --- /dev/null +++ b/sdk/storage/azdatalake/file/constants.go @@ -0,0 +1,126 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated_blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/path" +) + +// EncryptionAlgorithmType defines values for EncryptionAlgorithmType. +type EncryptionAlgorithmType = path.EncryptionAlgorithmType + +const ( + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = path.EncryptionAlgorithmTypeNone + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = path.EncryptionAlgorithmTypeAES256 +) + +// response models: + +// ImmutabilityPolicyMode Specifies the immutability policy mode to set on the file. +type ImmutabilityPolicyMode = path.ImmutabilityPolicyMode + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = path.ImmutabilityPolicyModeMutable + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = path.ImmutabilityPolicyModeUnlocked + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = path.ImmutabilityPolicyModeLocked +) + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = path.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = path.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = path.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = path.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = path.CopyStatusTypeFailed +) + +// TransferValidationType abstracts the various mechanisms used to verify a transfer. +type TransferValidationType = exported.TransferValidationType + +// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed crc64. +type TransferValidationTypeCRC64 = exported.TransferValidationTypeCRC64 + +// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer. +func TransferValidationTypeComputeCRC64() TransferValidationType { + return exported.TransferValidationTypeComputeCRC64() +} + +// SetExpiryType defines the values for modes of file expiration. +type SetExpiryType = generated_blob.ExpiryOptions + +const ( + // SetExpiryTypeAbsolute sets the expiration date as an absolute value expressed in RFC1123 format. + SetExpiryTypeAbsolute SetExpiryType = generated_blob.ExpiryOptionsAbsolute + + // SetExpiryTypeNeverExpire sets the file to never expire or removes the current expiration date. + SetExpiryTypeNeverExpire SetExpiryType = generated_blob.ExpiryOptionsNeverExpire + + // SetExpiryTypeRelativeToCreation sets the expiration date relative to the time of file creation. + // The value is expressed as the number of miliseconds to elapse from the time of creation. + SetExpiryTypeRelativeToCreation SetExpiryType = generated_blob.ExpiryOptionsRelativeToCreation + + // SetExpiryTypeRelativeToNow sets the expiration date relative to the current time. + // The value is expressed as the number of milliseconds to elapse from the present time. + SetExpiryTypeRelativeToNow SetExpiryType = generated_blob.ExpiryOptionsRelativeToNow +) + +// CreateExpiryType defines the values for modes of file expiration specified during creation. +type CreateExpiryType = generated.PathExpiryOptions + +const ( + // CreateExpiryTypeAbsolute sets the expiration date as an absolute value expressed in RFC1123 format. + CreateExpiryTypeAbsolute CreateExpiryType = generated.PathExpiryOptionsAbsolute + + // CreateExpiryTypeNeverExpire sets the file to never expire or removes the current expiration date. + CreateExpiryTypeNeverExpire CreateExpiryType = generated.PathExpiryOptionsNeverExpire + + // CreateExpiryTypeRelativeToNow sets the expiration date relative to the current time. + // The value is expressed as the number of milliseconds to elapse from the present time. + CreateExpiryTypeRelativeToNow CreateExpiryType = generated.PathExpiryOptionsRelativeToNow +) + +// StatusType defines values for StatusType +type StatusType = azdatalake.StatusType + +const ( + StatusTypeLocked StatusType = azdatalake.StatusTypeLocked + StatusTypeUnlocked StatusType = azdatalake.StatusTypeUnlocked +) + +// PossibleStatusTypeValues returns the possible values for the StatusType const type. +func PossibleStatusTypeValues() []StatusType { + return azdatalake.PossibleStatusTypeValues() +} + +// DurationType defines values for DurationType +type DurationType = azdatalake.DurationType + +const ( + DurationTypeInfinite DurationType = azdatalake.DurationTypeInfinite + DurationTypeFixed DurationType = azdatalake.DurationTypeFixed +) + +// PossibleDurationTypeValues returns the possible values for the DurationType const type. +func PossibleDurationTypeValues() []DurationType { + return azdatalake.PossibleDurationTypeValues() +} + +// StateType defines values for StateType +type StateType = azdatalake.StateType + +const ( + StateTypeAvailable StateType = azdatalake.StateTypeAvailable + StateTypeLeased StateType = azdatalake.StateTypeLeased + StateTypeExpired StateType = azdatalake.StateTypeExpired + StateTypeBreaking StateType = azdatalake.StateTypeBreaking + StateTypeBroken StateType = azdatalake.StateTypeBroken +) diff --git a/sdk/storage/azdatalake/file/examples_test.go b/sdk/storage/azdatalake/file/examples_test.go new file mode 100644 index 000000000000..213adfb3f173 --- /dev/null +++ b/sdk/storage/azdatalake/file/examples_test.go @@ -0,0 +1,389 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file_test + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/binary" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" + "hash/crc64" + "io" + "log" + "os" + "strconv" + "strings" + "time" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +// make sure you create the filesystem before running this example +func Example_file_CreateAndDelete() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a file client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + fileClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + _, err = fileClient.Create(context.Background(), nil) + handleError(err) + + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) +} + +// This examples shows how to create a file with HTTP Headers, how to read, and how to update the file's HTTP headers. +// make sure you create the filesystem and file before running this example. +func Example_file_HTTPHeaders() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a file client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + fileClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + // Create a directory with HTTP headers + _, err = fileClient.SetHTTPHeaders(context.TODO(), file.HTTPHeaders{ + ContentType: to.Ptr("text/html; charset=utf-8"), + ContentDisposition: to.Ptr("attachment"), + }, nil) + handleError(err) + + get, err := fileClient.GetProperties(context.TODO(), nil) + handleError(err) + + fmt.Println(get.ContentType) + fmt.Println(get.ContentDisposition) +} + +// make sure you create the filesystem before running this example +func Example_file_CreateFileWithExpiryRelativeToNow() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a file client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + fClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + createFileOpts := &file.CreateOptions{ + Expiry: file.CreateExpiryValues{ + ExpiryType: file.CreateExpiryTypeRelativeToNow, + ExpiresOn: strconv.FormatInt((8 * time.Second).Milliseconds(), 10), + }, + } + + _, err = fClient.Create(context.Background(), createFileOpts) + handleError(err) + + resp, err := fClient.GetProperties(context.Background(), nil) + handleError(err) + fmt.Println(*resp.ExpiresOn) + + time.Sleep(time.Second * 10) + _, err = fClient.GetProperties(context.Background(), nil) + // we expect datalakeerror.PathNotFound + handleError(err) +} + +// make sure you create the filesystem before running this example +func Example_file_CreateFileWithNeverExpire() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a file client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + fClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + createFileOpts := &file.CreateOptions{ + Expiry: file.CreateExpiryValues{ + ExpiryType: file.CreateExpiryTypeNeverExpire, + }, + } + + _, err = fClient.Create(context.Background(), createFileOpts) + handleError(err) + + resp, err := fClient.GetProperties(context.Background(), nil) + handleError(err) + // should be empty since we never expire + fmt.Println(*resp.ExpiresOn) +} + +// make sure you create the filesystem and file before running this example +func Example_file_Client_SetMetadata() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a file client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + fileClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + _, err = fileClient.SetMetadata(context.TODO(), map[string]*string{"author": to.Ptr("Tamer")}, nil) + handleError(err) + + // Query the directory's properties and metadata + get, err := fileClient.GetProperties(context.TODO(), nil) + handleError(err) + + // Show the directory's metadata + if get.Metadata == nil { + log.Fatal("No metadata returned") + } + + for k, v := range get.Metadata { + fmt.Print(k + "=" + *v + "\n") + } +} + +// make sure you create the filesystem before running this example +func Example_file_Rename() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a file client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + fileClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + _, err = fileClient.Create(context.Background(), nil) + handleError(err) + + _, err = fileClient.Rename(context.Background(), "renameFile", nil) + handleError(err) +} + +// set acl on a file +// make sure you create the filesystem and file before running this example +func Example_file_SetACL() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a file client + acl := "user::rwx,group::r-x,other::rwx" + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + fileClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + _, err = fileClient.SetAccessControl(context.Background(), &file.SetAccessControlOptions{ + ACL: &acl, + }) + handleError(err) +} + +func getRelativeTimeFromAnchor(anchorTime *time.Time, amount time.Duration) time.Time { + return anchorTime.Add(amount * time.Second) +} + +// make sure you create the filesystem before running this example +func Example_file_SetAccessControlIfUnmodifiedSinceTrue() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a directory client + owner := "4cf4e284-f6a8-4540-b53e-c3469af032dc" + group := owner + acl := "user::rwx,group::r-x,other::rwx" + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + fileClient, err := directory.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + resp, err := fileClient.Create(context.Background(), nil) + handleError(err) + + currentTime := getRelativeTimeFromAnchor(resp.Date, 10) + opts := &directory.SetAccessControlOptions{ + Owner: &owner, + Group: &group, + ACL: &acl, + AccessConditions: &directory.AccessConditions{ + ModifiedAccessConditions: &directory.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }} + + _, err = fileClient.SetAccessControl(context.Background(), opts) + handleError(err) +} + +const random64BString string = "2SDgZj6RkKYzJpu04sweQek4uWHO8ndPnYlZ0tnFS61hjnFZ5IkvIGGY44eKABov" + +func generateData(sizeInBytes int) (io.ReadSeekCloser, []byte) { + data := make([]byte, sizeInBytes) + _len := len(random64BString) + if sizeInBytes > _len { + count := sizeInBytes / _len + if sizeInBytes%_len != 0 { + count = count + 1 + } + copy(data[:], strings.Repeat(random64BString, count)) + } else { + copy(data[:], random64BString) + } + return streaming.NopCloser(bytes.NewReader(data)), data +} + +// make sure you create the filesystem before running this example +func Example_file_UploadFileAndDownloadStream() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a file client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + fClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + var fileSize int64 = 10 * 1024 + + _, err = fClient.Create(context.Background(), nil) + handleError(err) + + // create local file + _, content := generateData(int(fileSize)) + err = os.WriteFile("testFile", content, 0644) + handleError(err) + + defer func() { + err = os.Remove("testFile") + handleError(err) + }() + + fh, err := os.Open("testFile") + handleError(err) + + defer func(fh *os.File) { + err := fh.Close() + handleError(err) + }(fh) + + // get md5 hash to compare against after download + hash := md5.New() + _, err = io.Copy(hash, fh) + handleError(err) + contentMD5 := hash.Sum(nil) + + // upload the file + err = fClient.UploadFile(context.Background(), fh, &file.UploadFileOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + handleError(err) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + handleError(err) + fmt.Println(*gResp2.ContentLength, fileSize) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + handleError(err) + + data, err := io.ReadAll(dResp.Body) + handleError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + // compare the hashes + fmt.Println(downloadedContentMD5, contentMD5) +} + +// make sure you create the filesystem before running this example +func Example_file_UploadBufferAndDownloadStream() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a file client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + fClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + var fileSize int64 = 10 * 1024 + + _, content := generateData(int(fileSize)) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + handleError(err) + gResp2, err := fClient.GetProperties(context.Background(), nil) + handleError(err) + fmt.Println(*gResp2.ContentLength, fileSize) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + handleError(err) + + data, err := io.ReadAll(dResp.Body) + handleError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + fmt.Println(downloadedContentMD5, contentMD5) +} + +// make sure you create the filesystem before running this example +func Example_file_AppendAndFlushDataWithValidation() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Create a file client + u := fmt.Sprintf("https://%s.dfs.core.windows.net/fs/file.txt", accountName) + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + fClient, err := file.NewClientWithSharedKeyCredential(u, credential, nil) + handleError(err) + + contentSize := 1024 * 8 // 8KB + content := make([]byte, contentSize) + body := bytes.NewReader(content) + rsc := streaming.NopCloser(body) + contentCRC64 := crc64.Checksum(content, shared.CRC64Table) + + // validate data using crc64 + opts := &file.AppendDataOptions{ + TransactionalValidation: file.TransferValidationTypeComputeCRC64(), + } + putResp, err := fClient.AppendData(context.Background(), 0, rsc, opts) + handleError(err) + fmt.Println(putResp.ContentCRC64) + fmt.Println(binary.LittleEndian.Uint64(putResp.ContentCRC64), contentCRC64) + + // after appending data, flush it + _, err = fClient.FlushData(context.Background(), int64(contentSize), nil) + handleError(err) + + // compare content length as well + gResp2, err := fClient.GetProperties(context.Background(), nil) + handleError(err) + fmt.Println(*gResp2.ContentLength, int64(contentSize)) +} diff --git a/sdk/storage/azdatalake/file/mmf_unix.go b/sdk/storage/azdatalake/file/mmf_unix.go new file mode 100644 index 000000000000..4c8ed223dbae --- /dev/null +++ b/sdk/storage/azdatalake/file/mmf_unix.go @@ -0,0 +1,38 @@ +//go:build go1.18 && (linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris || aix) +// +build go1.18 +// +build linux darwin dragonfly freebsd openbsd netbsd solaris aix + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "fmt" + "os" + "syscall" +) + +// mmb is a memory mapped buffer +type mmb []byte + +// newMMB creates a new memory mapped buffer with the specified size +func newMMB(size int64) (mmb, error) { + prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE + addr, err := syscall.Mmap(-1, 0, int(size), prot, flags) + if err != nil { + return nil, os.NewSyscallError("Mmap", err) + } + return mmb(addr), nil +} + +// delete cleans up the memory mapped buffer +func (m *mmb) delete() { + err := syscall.Munmap(*m) + *m = nil + if err != nil { + // if we get here, there is likely memory corruption. + // please open an issue https://github.com/Azure/azure-sdk-for-go/issues + panic(fmt.Sprintf("Munmap error: %v", err)) + } +} diff --git a/sdk/storage/azdatalake/file/mmf_windows.go b/sdk/storage/azdatalake/file/mmf_windows.go new file mode 100644 index 000000000000..b59e6b415776 --- /dev/null +++ b/sdk/storage/azdatalake/file/mmf_windows.go @@ -0,0 +1,56 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "fmt" + "os" + "reflect" + "syscall" + "unsafe" +) + +// mmb is a memory mapped buffer +type mmb []byte + +// newMMB creates a new memory mapped buffer with the specified size +func newMMB(size int64) (mmb, error) { + const InvalidHandleValue = ^uintptr(0) // -1 + + prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) + hMMF, err := syscall.CreateFileMapping(syscall.Handle(InvalidHandleValue), nil, prot, uint32(size>>32), uint32(size&0xffffffff), nil) + if err != nil { + return nil, os.NewSyscallError("CreateFileMapping", err) + } + defer func() { + _ = syscall.CloseHandle(hMMF) + }() + + addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size)) + if err != nil { + return nil, os.NewSyscallError("MapViewOfFile", err) + } + + m := mmb{} + h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) + h.Data = addr + h.Len = int(size) + h.Cap = h.Len + return m, nil +} + +// delete cleans up the memory mapped buffer +func (m *mmb) delete() { + addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) + *m = mmb{} + err := syscall.UnmapViewOfFile(addr) + if err != nil { + // if we get here, there is likely memory corruption. + // please open an issue https://github.com/Azure/azure-sdk-for-go/issues + panic(fmt.Sprintf("UnmapViewOfFile error: %v", err)) + } +} diff --git a/sdk/storage/azdatalake/file/models.go b/sdk/storage/azdatalake/file/models.go new file mode 100644 index 000000000000..aa4852e7c9e8 --- /dev/null +++ b/sdk/storage/azdatalake/file/models.go @@ -0,0 +1,573 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "errors" + "io" + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/path" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" +) + +const ( + _1MiB = 1024 * 1024 + CountToEnd = 0 + + // MaxAppendBytes indicates the maximum number of bytes that can be updated in a call to Client.AppendData. + MaxAppendBytes = 100 * 1024 * 1024 // 100iB + + // MaxFileSize indicates the maximum size of the file allowed. + MaxFileSize = 4 * 1024 * 1024 * 1024 * 1024 // 4 TiB +) + +// CreateOptions contains the optional parameters when calling the Create operation. +type CreateOptions struct { + // AccessConditions contains parameters for accessing the file. + AccessConditions *AccessConditions + // CPKInfo contains a group of parameters for client provided encryption key. + CPKInfo *CPKInfo + // HTTPHeaders contains the HTTP headers for path operations. + HTTPHeaders *HTTPHeaders + // Expiry specifies the type and time of expiry for the file. + Expiry CreateExpiryValues + // LeaseDuration specifies the duration of the lease, in seconds, or negative one + // (-1) for a lease that never expires. A non-infinite lease can be + // between 15 and 60 seconds. + LeaseDuration *int64 + // ProposedLeaseID specifies the proposed lease ID for the file. + ProposedLeaseID *string + // Permissions is the octal representation of the permissions for user, group and mask. + Permissions *string + // Umask is the umask for the file. + Umask *string + // Owner is the owner of the file. + Owner *string + // Group is the owning group of the file. + Group *string + // ACL is the access control list for the file. + ACL *string +} + +// CreateExpiryValues describes when a newly created file should expire. +// A zero-value indicates the file has no expiration date. +type CreateExpiryValues struct { + // ExpiryType indicates how the value of ExpiresOn should be interpreted (absolute, relative to now, etc). + ExpiryType CreateExpiryType + + // ExpiresOn contains the time the file should expire. + // The value will either be an absolute UTC time in RFC1123 format or an integer expressing a number of milliseconds. + // NOTE: when ExpiryType is CreateExpiryTypeNeverExpire, this value is ignored. + ExpiresOn string +} + +func (o *CreateOptions) format() (*generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, *generated.PathHTTPHeaders, *generated.PathClientCreateOptions, *generated.CPKInfo) { + resource := generated.PathResourceTypeFile + createOpts := &generated.PathClientCreateOptions{ + Resource: &resource, + } + if o == nil { + return nil, nil, nil, createOpts, nil + } + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) + if !reflect.ValueOf(o.Expiry).IsZero() { + createOpts.ExpiryOptions = &o.Expiry.ExpiryType + if o.Expiry.ExpiryType != CreateExpiryTypeNeverExpire { + createOpts.ExpiresOn = &o.Expiry.ExpiresOn + } + } + createOpts.ACL = o.ACL + createOpts.Group = o.Group + createOpts.Owner = o.Owner + createOpts.Umask = o.Umask + createOpts.Permissions = o.Permissions + createOpts.ProposedLeaseID = o.ProposedLeaseID + createOpts.LeaseDuration = o.LeaseDuration + + var httpHeaders *generated.PathHTTPHeaders + var cpkOpts *generated.CPKInfo + + if o.HTTPHeaders != nil { + httpHeaders = path.FormatPathHTTPHeaders(o.HTTPHeaders) + } + if o.CPKInfo != nil { + cpkOpts = &generated.CPKInfo{ + EncryptionAlgorithm: o.CPKInfo.EncryptionAlgorithm, + EncryptionKey: o.CPKInfo.EncryptionKey, + EncryptionKeySHA256: o.CPKInfo.EncryptionKeySHA256, + } + } + return leaseAccessConditions, modifiedAccessConditions, httpHeaders, createOpts, cpkOpts +} + +// UpdateAccessControlOptions contains the optional parameters when calling the UpdateAccessControlRecursive operation. +type UpdateAccessControlOptions struct { + //placeholder +} + +func (o *UpdateAccessControlOptions) format(ACL string) (*generated.PathClientSetAccessControlRecursiveOptions, generated.PathSetAccessControlRecursiveMode) { + mode := generated.PathSetAccessControlRecursiveModeModify + return &generated.PathClientSetAccessControlRecursiveOptions{ + ACL: &ACL, + }, mode +} + +// RemoveAccessControlOptions contains the optional parameters when calling the RemoveAccessControlRecursive operation. +type RemoveAccessControlOptions struct { + //placeholder +} + +func (o *RemoveAccessControlOptions) format(ACL string) (*generated.PathClientSetAccessControlRecursiveOptions, generated.PathSetAccessControlRecursiveMode) { + mode := generated.PathSetAccessControlRecursiveModeRemove + return &generated.PathClientSetAccessControlRecursiveOptions{ + ACL: &ACL, + }, mode +} + +// ===================================== PATH IMPORTS =========================================== + +// uploadFromReaderOptions identifies options used by the UploadBuffer and UploadFile functions. +type uploadFromReaderOptions struct { + // ChunkSize specifies the chunk size to use in bytes; the default (and maximum size) is MaxAppendBytes. + ChunkSize int64 + // Progress is a function that is invoked periodically as bytes are sent to the FileClient. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress func(bytesTransferred int64) + // Concurrency indicates the maximum number of chunks to upload in parallel (default is 5) + Concurrency uint16 + // AccessConditions contains optional parameters to access leased entity. + AccessConditions *AccessConditions + // HTTPHeaders contains the optional path HTTP headers to set when the file is created. + HTTPHeaders *HTTPHeaders + // CPKInfo contains optional parameters to perform encryption using customer-provided key. + CPKInfo *CPKInfo +} + +// UploadStreamOptions provides set of configurations for Client.UploadStream operation. +type UploadStreamOptions struct { + // ChunkSize specifies the chunk size to use in bytes; the default (and maximum size) is MaxAppendBytes. + ChunkSize int64 + // Concurrency indicates the maximum number of chunks to upload in parallel (default is 5) + Concurrency uint16 + // AccessConditions contains optional parameters to access leased entity. + AccessConditions *AccessConditions + // HTTPHeaders contains the optional path HTTP headers to set when the file is created. + HTTPHeaders *HTTPHeaders + // CPKInfo contains optional parameters to perform encryption using customer-provided key. + CPKInfo *CPKInfo +} + +// UploadBufferOptions provides set of configurations for Client.UploadBuffer operation. +type UploadBufferOptions = uploadFromReaderOptions + +// UploadFileOptions provides set of configurations for Client.UploadFile operation. +type UploadFileOptions = uploadFromReaderOptions + +// FlushDataOptions contains the optional parameters for the Client.FlushData method. +type FlushDataOptions struct { + // AccessConditions contains parameters for accessing the file. + AccessConditions *AccessConditions + // CPKInfo contains optional parameters to perform encryption using customer-provided key. + CPKInfo *CPKInfo + // HTTPHeaders contains the HTTP headers for path operations. + HTTPHeaders *HTTPHeaders + // Close This event has a property indicating whether this is the final change to distinguish the + // difference between an intermediate flush to a file stream and the final close of a file stream. + Close *bool + // RetainUncommittedData if "true", uncommitted data is retained after the flush operation + // completes, otherwise, the uncommitted data is deleted after the flush operation. + RetainUncommittedData *bool +} + +func (o *FlushDataOptions) format(offset int64) (*generated.PathClientFlushDataOptions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions, *generated.PathHTTPHeaders, *generated.CPKInfo, error) { + defaultRetainUncommitted := false + defaultClose := false + contentLength := int64(0) + + var httpHeaderOpts *generated.PathHTTPHeaders + var leaseAccessConditions *generated.LeaseAccessConditions + var modifiedAccessConditions *generated.ModifiedAccessConditions + var cpkInfoOpts *generated.CPKInfo + flushDataOpts := &generated.PathClientFlushDataOptions{ContentLength: &contentLength, Position: &offset} + + if o == nil { + flushDataOpts.RetainUncommittedData = &defaultRetainUncommitted + flushDataOpts.Close = &defaultClose + return flushDataOpts, nil, nil, nil, nil, nil + } + + if o != nil { + if o.RetainUncommittedData == nil { + flushDataOpts.RetainUncommittedData = &defaultRetainUncommitted + } else { + flushDataOpts.RetainUncommittedData = o.RetainUncommittedData + } + if o.Close == nil { + flushDataOpts.Close = &defaultClose + } else { + flushDataOpts.Close = o.Close + } + leaseAccessConditions, modifiedAccessConditions = exported.FormatPathAccessConditions(o.AccessConditions) + if o.HTTPHeaders != nil { + httpHeaderOpts = &generated.PathHTTPHeaders{} + httpHeaderOpts.ContentMD5 = o.HTTPHeaders.ContentMD5 + httpHeaderOpts.ContentType = o.HTTPHeaders.ContentType + httpHeaderOpts.CacheControl = o.HTTPHeaders.CacheControl + httpHeaderOpts.ContentDisposition = o.HTTPHeaders.ContentDisposition + httpHeaderOpts.ContentEncoding = o.HTTPHeaders.ContentEncoding + } + if o.CPKInfo != nil { + cpkInfoOpts = &generated.CPKInfo{} + cpkInfoOpts.EncryptionKey = o.CPKInfo.EncryptionKey + cpkInfoOpts.EncryptionKeySHA256 = o.CPKInfo.EncryptionKeySHA256 + cpkInfoOpts.EncryptionAlgorithm = o.CPKInfo.EncryptionAlgorithm + } + } + return flushDataOpts, modifiedAccessConditions, leaseAccessConditions, httpHeaderOpts, cpkInfoOpts, nil +} + +// AppendDataOptions contains the optional parameters for the Client.AppendData method. +type AppendDataOptions struct { + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation TransferValidationType + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions + // CPKInfo contains optional parameters to perform encryption using customer-provided key. + CPKInfo *CPKInfo +} + +func (o *AppendDataOptions) format(offset int64, body io.ReadSeekCloser) (*generated.PathClientAppendDataOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, error) { + if offset < 0 || body == nil { + return nil, nil, nil, errors.New("invalid argument: offset must be >= 0 and body must not be nil") + } + + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return nil, nil, nil, err + } + + if count == 0 { + return nil, nil, nil, errors.New("invalid argument: body must contain readable data whose size is > 0") + } + + appendDataOptions := &generated.PathClientAppendDataOptions{} + httpRange := exported.FormatHTTPRange(HTTPRange{ + Offset: offset, + Count: count, + }) + if httpRange != nil { + appendDataOptions.Position = &offset + appendDataOptions.ContentLength = &count + } + + var leaseAccessConditions *LeaseAccessConditions + var cpkInfoOpts *generated.CPKInfo + + if o != nil { + leaseAccessConditions = o.LeaseAccessConditions + if o.CPKInfo != nil { + cpkInfoOpts = &generated.CPKInfo{} + cpkInfoOpts.EncryptionKey = o.CPKInfo.EncryptionKey + cpkInfoOpts.EncryptionKeySHA256 = o.CPKInfo.EncryptionKeySHA256 + cpkInfoOpts.EncryptionAlgorithm = o.CPKInfo.EncryptionAlgorithm + } + } + if o != nil && o.TransactionalValidation != nil { + _, err = o.TransactionalValidation.Apply(body, appendDataOptions) + if err != nil { + return nil, nil, nil, err + } + } + + return appendDataOptions, leaseAccessConditions, cpkInfoOpts, nil +} + +func (u *UploadStreamOptions) setDefaults() { + if u.Concurrency == 0 { + u.Concurrency = 1 + } + + if u.ChunkSize < _1MiB { + u.ChunkSize = _1MiB + } +} + +func (u *uploadFromReaderOptions) getAppendDataOptions() *AppendDataOptions { + if u == nil { + return nil + } + leaseAccessConditions, _ := exported.FormatPathAccessConditions(u.AccessConditions) + return &AppendDataOptions{ + LeaseAccessConditions: leaseAccessConditions, + CPKInfo: u.CPKInfo, + } +} + +func (u *uploadFromReaderOptions) getFlushDataOptions() *FlushDataOptions { + if u == nil { + return nil + } + return &FlushDataOptions{ + AccessConditions: u.AccessConditions, + HTTPHeaders: u.HTTPHeaders, + CPKInfo: u.CPKInfo, + } +} + +func (u *UploadStreamOptions) getAppendDataOptions() *AppendDataOptions { + if u == nil { + return nil + } + leaseAccessConditions, _ := exported.FormatPathAccessConditions(u.AccessConditions) + return &AppendDataOptions{ + LeaseAccessConditions: leaseAccessConditions, + CPKInfo: u.CPKInfo, + } +} + +func (u *UploadStreamOptions) getFlushDataOptions() *FlushDataOptions { + if u == nil { + return nil + } + return &FlushDataOptions{ + AccessConditions: u.AccessConditions, + HTTPHeaders: u.HTTPHeaders, + CPKInfo: u.CPKInfo, + } +} + +// DownloadStreamOptions contains the optional parameters for the Client.DownloadStream method. +type DownloadStreamOptions struct { + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + // Range specifies a range of bytes. The default value is all bytes. + Range *HTTPRange + // AccessConditions contains parameters for accessing the file. + AccessConditions *AccessConditions + // CPKInfo contains optional parameters to perform encryption using customer-provided key. + CPKInfo *CPKInfo + // CPKScopeInfo contains a group of parameters for client provided encryption scope. + CPKScopeInfo *CPKScopeInfo +} + +func (o *DownloadStreamOptions) format() *blob.DownloadStreamOptions { + if o == nil { + return nil + } + + downloadStreamOptions := &blob.DownloadStreamOptions{} + if o.Range != nil { + downloadStreamOptions.Range = *o.Range + } + if o.CPKInfo != nil { + downloadStreamOptions.CPKInfo = &blob.CPKInfo{ + EncryptionKey: o.CPKInfo.EncryptionKey, + EncryptionKeySHA256: o.CPKInfo.EncryptionKeySHA256, + EncryptionAlgorithm: (*blob.EncryptionAlgorithmType)(o.CPKInfo.EncryptionAlgorithm), + } + } + + downloadStreamOptions.RangeGetContentMD5 = o.RangeGetContentMD5 + downloadStreamOptions.AccessConditions = exported.FormatBlobAccessConditions(o.AccessConditions) + downloadStreamOptions.CPKScopeInfo = o.CPKScopeInfo + return downloadStreamOptions +} + +// DownloadBufferOptions contains the optional parameters for the DownloadBuffer method. +type DownloadBufferOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range *HTTPRange + // ChunkSize specifies the chunk size to use for each parallel download; the default size is 4MB. + ChunkSize int64 + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + // AccessConditions indicates the access conditions used when making HTTP GET requests against the file. + AccessConditions *AccessConditions + // CPKInfo contains a group of parameters for client provided encryption key. + CPKInfo *CPKInfo + // CPKScopeInfo contains a group of parameters for client provided encryption scope. + CPKScopeInfo *CPKScopeInfo + // Concurrency indicates the maximum number of chunks to download in parallel (0=default). + Concurrency uint16 + // RetryReaderOptionsPerChunk is used when downloading each chunk. + RetryReaderOptionsPerChunk *RetryReaderOptions +} + +func (o *DownloadBufferOptions) format() *blob.DownloadBufferOptions { + if o == nil { + return nil + } + + downloadBufferOptions := &blob.DownloadBufferOptions{} + if o.Range != nil { + downloadBufferOptions.Range = blob.HTTPRange{ + Offset: o.Range.Offset, + Count: o.Range.Count, + } + } + if o.CPKInfo != nil { + downloadBufferOptions.CPKInfo = &blob.CPKInfo{ + EncryptionKey: o.CPKInfo.EncryptionKey, + EncryptionKeySHA256: o.CPKInfo.EncryptionKeySHA256, + EncryptionAlgorithm: (*blob.EncryptionAlgorithmType)(o.CPKInfo.EncryptionAlgorithm), + } + } + + downloadBufferOptions.AccessConditions = exported.FormatBlobAccessConditions(o.AccessConditions) + downloadBufferOptions.CPKScopeInfo = o.CPKScopeInfo + downloadBufferOptions.BlockSize = o.ChunkSize + downloadBufferOptions.Progress = o.Progress + downloadBufferOptions.Concurrency = o.Concurrency + if o.RetryReaderOptionsPerChunk != nil { + downloadBufferOptions.RetryReaderOptionsPerBlock.OnFailedRead = o.RetryReaderOptionsPerChunk.OnFailedRead + downloadBufferOptions.RetryReaderOptionsPerBlock.EarlyCloseAsError = o.RetryReaderOptionsPerChunk.EarlyCloseAsError + downloadBufferOptions.RetryReaderOptionsPerBlock.MaxRetries = o.RetryReaderOptionsPerChunk.MaxRetries + } + + return downloadBufferOptions +} + +// DownloadFileOptions contains the optional parameters for the Client.DownloadFile method. +type DownloadFileOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range *HTTPRange + // ChunkSize specifies the chunk size to use for each parallel download; the default size is 4MB. + ChunkSize int64 + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + // AccessConditions indicates the access conditions used when making HTTP GET requests against the file. + AccessConditions *AccessConditions + // CPKInfo contains a group of parameters for client provided encryption key. + CPKInfo *CPKInfo + // CPKScopeInfo contains a group of parameters for client provided encryption scope. + CPKScopeInfo *CPKScopeInfo + // Concurrency indicates the maximum number of chunks to download in parallel. The default value is 5. + Concurrency uint16 + // RetryReaderOptionsPerChunk is used when downloading each chunk. + RetryReaderOptionsPerChunk *RetryReaderOptions +} + +func (o *DownloadFileOptions) format() *blob.DownloadFileOptions { + if o == nil { + return nil + } + + downloadFileOptions := &blob.DownloadFileOptions{} + if o.Range != nil { + downloadFileOptions.Range = blob.HTTPRange{ + Offset: o.Range.Offset, + Count: o.Range.Count, + } + } + if o.CPKInfo != nil { + downloadFileOptions.CPKInfo = &blob.CPKInfo{ + EncryptionKey: o.CPKInfo.EncryptionKey, + EncryptionKeySHA256: o.CPKInfo.EncryptionKeySHA256, + EncryptionAlgorithm: (*blob.EncryptionAlgorithmType)(o.CPKInfo.EncryptionAlgorithm), + } + } + + downloadFileOptions.AccessConditions = exported.FormatBlobAccessConditions(o.AccessConditions) + downloadFileOptions.CPKScopeInfo = o.CPKScopeInfo + downloadFileOptions.BlockSize = o.ChunkSize + downloadFileOptions.Progress = o.Progress + downloadFileOptions.Concurrency = o.Concurrency + if o.RetryReaderOptionsPerChunk != nil { + downloadFileOptions.RetryReaderOptionsPerBlock.OnFailedRead = o.RetryReaderOptionsPerChunk.OnFailedRead + downloadFileOptions.RetryReaderOptionsPerBlock.EarlyCloseAsError = o.RetryReaderOptionsPerChunk.EarlyCloseAsError + downloadFileOptions.RetryReaderOptionsPerBlock.MaxRetries = o.RetryReaderOptionsPerChunk.MaxRetries + } + + return downloadFileOptions +} + +// SetExpiryValues describes when a file should expire. +// A zero-value indicates the file has no expiration date. +type SetExpiryValues struct { + // ExpiryType indicates how the value of ExpiresOn should be interpreted (absolute, relative to now, etc). + ExpiryType SetExpiryType + + // ExpiresOn contains the time the file should expire. + // The value will either be an absolute UTC time in RFC1123 format or an integer expressing a number of milliseconds. + // NOTE: when ExpiryType is SetExpiryTypeNeverExpire, this value is ignored. + ExpiresOn string +} + +// ACLFailedEntry contains the failed ACL entry (response model). +type ACLFailedEntry = path.ACLFailedEntry + +// SetAccessControlRecursiveResponse contains part of the response data returned by the []OP_AccessControl operations. +type SetAccessControlRecursiveResponse = generated.SetAccessControlRecursiveResponse + +// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. +type SetExpiryOptions struct { + // placeholder for future options +} + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange + +// ================================= path imports ================================== + +// DeleteOptions contains the optional parameters when calling the Delete operation. +type DeleteOptions = path.DeleteOptions + +// RenameOptions contains the optional parameters when calling the Rename operation. +type RenameOptions = path.RenameOptions + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method +type GetPropertiesOptions = path.GetPropertiesOptions + +// SetAccessControlOptions contains the optional parameters when calling the SetAccessControl operation. +type SetAccessControlOptions = path.SetAccessControlOptions + +// GetAccessControlOptions contains the optional parameters when calling the GetAccessControl operation. +type GetAccessControlOptions = path.GetAccessControlOptions + +// CPKInfo contains CPK related information. +type CPKInfo = path.CPKInfo + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions = path.GetSASURLOptions + +// SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method. +type SetHTTPHeadersOptions = path.SetHTTPHeadersOptions + +// HTTPHeaders contains the HTTP headers for path operations. +type HTTPHeaders = path.HTTPHeaders + +// SetMetadataOptions provides set of configurations for Set Metadata on path operation +type SetMetadataOptions = path.SetMetadataOptions + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = path.SharedKeyCredential + +// AccessConditions identifies file-specific access conditions which you optionally set. +type AccessConditions = path.AccessConditions + +// SourceAccessConditions identifies file-specific source access conditions which you optionally set. +type SourceAccessConditions = path.SourceAccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = path.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = path.ModifiedAccessConditions + +// SourceModifiedAccessConditions contains a group of parameters for specifying access conditions. +type SourceModifiedAccessConditions = path.SourceModifiedAccessConditions + +// CPKScopeInfo contains a group of parameters for the PathClient.SetMetadata method. +type CPKScopeInfo = path.CPKScopeInfo diff --git a/sdk/storage/azdatalake/file/responses.go b/sdk/storage/azdatalake/file/responses.go new file mode 100644 index 000000000000..f72747584ab7 --- /dev/null +++ b/sdk/storage/azdatalake/file/responses.go @@ -0,0 +1,271 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated_blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/path" + "io" + "time" +) + +// SetExpiryResponse contains the response fields for the SetExpiry operation. +type SetExpiryResponse = generated_blob.BlobClientSetExpiryResponse + +// AppendDataResponse contains the response from method Client.AppendData. +type AppendDataResponse = generated.PathClientAppendDataResponse + +// FlushDataResponse contains the response from method Client.FlushData. +type FlushDataResponse = generated.PathClientFlushDataResponse + +// RenameResponse contains the response fields for the Rename operation. +type RenameResponse = path.RenameResponse + +// DownloadStreamResponse contains the response from the DownloadStream method. +// To read from the stream, read from the Body field, or call the NewRetryReader method. +type DownloadStreamResponse struct { + // DownloadResponse contains response fields from DownloadStream. + DownloadResponse + client *Client + getInfo httpGetterInfo + cpkInfo *CPKInfo + cpkScope *CPKScopeInfo +} + +// NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while +// reading, it will make additional requests to reestablish a connection and continue reading. +// Pass nil for options to accept the default options. +// Callers of this method should not access the DownloadStreamResponse.Body field. +func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader { + if options == nil { + options = &RetryReaderOptions{} + } + + return newRetryReader(ctx, r.Body, r.getInfo, func(ctx context.Context, getInfo httpGetterInfo) (io.ReadCloser, error) { + accessConditions := &AccessConditions{ + ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: getInfo.ETag}, + } + options := DownloadStreamOptions{ + Range: getInfo.Range, + AccessConditions: accessConditions, + CPKInfo: r.cpkInfo, + CPKScopeInfo: r.cpkScope, + } + resp, err := r.client.DownloadStream(ctx, &options) + if err != nil { + return nil, err + } + return resp.Body, err + }, *options) +} + +// DownloadResponse contains the response fields for the GetProperties operation. +type DownloadResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ErrorCode contains the information returned from the x-ms-error-code header response. + ErrorCode *string + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *DurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *StateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *StatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +func FormatDownloadStreamResponse(r *blob.DownloadStreamResponse) DownloadResponse { + newResp := DownloadResponse{} + if r != nil { + newResp.AcceptRanges = r.AcceptRanges + newResp.Body = r.Body + newResp.ContentCRC64 = r.ContentCRC64 + newResp.ContentRange = r.ContentRange + newResp.CacheControl = r.CacheControl + newResp.ErrorCode = r.ErrorCode + newResp.ClientRequestID = r.ClientRequestID + newResp.ContentDisposition = r.ContentDisposition + newResp.ContentEncoding = r.ContentEncoding + newResp.ContentLanguage = r.ContentLanguage + newResp.ContentLength = r.ContentLength + newResp.ContentMD5 = r.ContentMD5 + newResp.ContentType = r.ContentType + newResp.CopyCompletionTime = r.CopyCompletionTime + newResp.CopyID = r.CopyID + newResp.CopyProgress = r.CopyProgress + newResp.CopySource = r.CopySource + newResp.CopyStatus = r.CopyStatus + newResp.CopyStatusDescription = r.CopyStatusDescription + newResp.Date = r.Date + newResp.ETag = r.ETag + newResp.EncryptionKeySHA256 = r.EncryptionKeySHA256 + newResp.EncryptionScope = r.EncryptionScope + newResp.ImmutabilityPolicyExpiresOn = r.ImmutabilityPolicyExpiresOn + newResp.ImmutabilityPolicyMode = r.ImmutabilityPolicyMode + newResp.IsCurrentVersion = r.IsCurrentVersion + newResp.IsSealed = r.IsSealed + newResp.IsServerEncrypted = r.IsServerEncrypted + newResp.LastAccessed = r.LastAccessed + newResp.LastModified = r.LastModified + newResp.LeaseDuration = r.LeaseDuration + newResp.LeaseState = r.LeaseState + newResp.LeaseStatus = r.LeaseStatus + newResp.LegalHold = r.LegalHold + newResp.Metadata = r.Metadata + newResp.ObjectReplicationPolicyID = r.ObjectReplicationPolicyID + newResp.ObjectReplicationRules = r.DownloadResponse.ObjectReplicationRules + newResp.RequestID = r.RequestID + newResp.TagCount = r.TagCount + newResp.Version = r.Version + newResp.VersionID = r.VersionID + } + return newResp +} + +// ========================================== path imports =========================================================== + +// SetAccessControlResponse contains the response fields for the SetAccessControl operation. +type SetAccessControlResponse = path.SetAccessControlResponse + +// SetHTTPHeadersResponse contains the response from method Client.SetHTTPHeaders. +type SetHTTPHeadersResponse = path.SetHTTPHeadersResponse + +// GetAccessControlResponse contains the response fields for the GetAccessControl operation. +type GetAccessControlResponse = path.GetAccessControlResponse + +// GetPropertiesResponse contains the response fields for the GetProperties operation. +type GetPropertiesResponse = path.GetPropertiesResponse + +// SetMetadataResponse contains the response fields for the SetMetadata operation. +type SetMetadataResponse = path.SetMetadataResponse + +// CreateResponse contains the response fields for the Create operation. +type CreateResponse = path.CreateResponse + +// DeleteResponse contains the response fields for the Delete operation. +type DeleteResponse = path.DeleteResponse + +// UpdateAccessControlResponse contains the response fields for the UpdateAccessControlRecursive operation. +type UpdateAccessControlResponse = path.UpdateAccessControlResponse + +// RemoveAccessControlResponse contains the response fields for the RemoveAccessControlRecursive operation. +type RemoveAccessControlResponse = path.RemoveAccessControlResponse diff --git a/sdk/storage/azdatalake/file/retry_reader.go b/sdk/storage/azdatalake/file/retry_reader.go new file mode 100644 index 000000000000..139278e54b85 --- /dev/null +++ b/sdk/storage/azdatalake/file/retry_reader.go @@ -0,0 +1,192 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "io" + "net" + "strings" + "sync" +) + +// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. +type httpGetter func(ctx context.Context, i httpGetterInfo) (io.ReadCloser, error) + +// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters +// that should be used to make an HTTP GET request. +type httpGetterInfo struct { + // HTTPRange contains range + Range *HTTPRange + + // ETag specifies the resource's etag that should be used when creating + // the HTTP GET request's If-Match header + ETag *azcore.ETag +} + +// RetryReaderOptions configures the retry reader's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. +type RetryReaderOptions struct { + // MaxRetries specifies the maximum number of attempts a failed read will be retried + // before producing an error. + // The default value is three. + MaxRetries int32 + + // OnFailedRead, when non-nil, is called after any failure to read. Expected usage is diagnostic logging. + OnFailedRead func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) + + // EarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, + // retryReader has the following special behaviour: closing the response body before it is all read is treated as a + // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = + // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If + // TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead + // treated as a fatal (non-retryable) error. + // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens + // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors + // which will be retried. + // The default value is false. + EarlyCloseAsError bool + + doInjectError bool + doInjectErrorRound int32 + injectedError error +} + +// RetryReader attempts to read from response, and if there is a retry-able network error +// returned during reading, it will retry according to retry reader option through executing +// user defined action with provided data to get a new response, and continue the overall reading process +// through reading from the new response. +// RetryReader implements the io.ReadCloser interface. +type RetryReader struct { + ctx context.Context + info httpGetterInfo + retryReaderOptions RetryReaderOptions + getter httpGetter + countWasBounded bool + + // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response + responseMu *sync.Mutex + response io.ReadCloser +} + +// newRetryReader creates a retry reader. +func newRetryReader(ctx context.Context, initialResponse io.ReadCloser, info httpGetterInfo, getter httpGetter, o RetryReaderOptions) *RetryReader { + if o.MaxRetries < 1 { + o.MaxRetries = 3 + } + return &RetryReader{ + ctx: ctx, + getter: getter, + info: info, + countWasBounded: info.Range.Count != CountToEnd, + response: initialResponse, + responseMu: &sync.Mutex{}, + retryReaderOptions: o, + } +} + +// setResponse function +func (s *RetryReader) setResponse(r io.ReadCloser) { + s.responseMu.Lock() + defer s.responseMu.Unlock() + s.response = r +} + +// Read from retry reader +func (s *RetryReader) Read(p []byte) (n int, err error) { + for try := int32(0); ; try++ { + //fmt.Println(try) // Comment out for debugging. + if s.countWasBounded && s.info.Range.Count == CountToEnd { + // User specified an original count and the remaining bytes are 0, return 0, EOF + return 0, io.EOF + } + + s.responseMu.Lock() + resp := s.response + s.responseMu.Unlock() + if resp == nil { // We don't have a response stream to read from, try to get one. + newResponse, err := s.getter(s.ctx, s.info) + if err != nil { + return 0, err + } + // Successful GET; this is the network stream we'll read from. + s.setResponse(newResponse) + resp = newResponse + } + n, err := resp.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) + + // Injection mechanism for testing. + if s.retryReaderOptions.doInjectError && try == s.retryReaderOptions.doInjectErrorRound { + if s.retryReaderOptions.injectedError != nil { + err = s.retryReaderOptions.injectedError + } else { + err = &net.DNSError{IsTemporary: true} + } + } + + // We successfully read data or end EOF. + if err == nil || err == io.EOF { + s.info.Range.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Range.Count != CountToEnd { + s.info.Range.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + } + return n, err // Return the return to the caller + } + _ = s.Close() + + s.setResponse(nil) // Our stream is no longer good + + // Check the retry count and error code, and decide whether to retry. + retriesExhausted := try >= s.retryReaderOptions.MaxRetries + _, isNetError := err.(net.Error) + isUnexpectedEOF := err == io.ErrUnexpectedEOF + willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted + + // Notify, for logging purposes, of any failures + if s.retryReaderOptions.OnFailedRead != nil { + failureCount := try + 1 // because try is zero-based + s.retryReaderOptions.OnFailedRead(failureCount, err, *s.info.Range, willRetry) + } + + if willRetry { + continue + // Loop around and try to get and read from new stream. + } + return n, err // Not retryable, or retries exhausted, so just return + } +} + +// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry +// Is this safe, to close early from another goroutine? Early close ultimately ends up calling +// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" +// which is exactly the behaviour we want. +// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) +// then there are two different types of error that may happen - either the one we check for here, +// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine +// to check for one, since the other is a net.Error, which our main Read retry loop is already handing. +func (s *RetryReader) wasRetryableEarlyClose(err error) bool { + if s.retryReaderOptions.EarlyCloseAsError { + return false // user wants all early closes to be errors, and so not retryable + } + // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text + return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) +} + +// ReadOnClosedBodyMessage of retry reader +const ReadOnClosedBodyMessage = "read on closed response body" + +// Close retry reader +func (s *RetryReader) Close() error { + s.responseMu.Lock() + defer s.responseMu.Unlock() + if s.response != nil { + return s.response.Close() + } + return nil +} diff --git a/sdk/storage/azdatalake/file/retry_reader_test.go b/sdk/storage/azdatalake/file/retry_reader_test.go new file mode 100644 index 000000000000..61013cd1de46 --- /dev/null +++ b/sdk/storage/azdatalake/file/retry_reader_test.go @@ -0,0 +1,433 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "context" + "crypto/rand" + "errors" + "fmt" + "io" + "net" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// Testings for RetryReader +// This reader return one byte through each Read call +type perByteReader struct { + RandomBytes []byte // Random generated bytes + + byteCount int // Bytes can be returned before EOF + currentByteIndex int // Bytes that have already been returned. + doInjectError bool + doInjectErrorByteIndex int + doInjectTimes int + injectedError error + + // sleepDuration and closeChannel are only use in "forced cancellation" tests + sleepDuration time.Duration + closeChannel chan struct{} +} + +func newPerByteReader(byteCount int) *perByteReader { + perByteReader := perByteReader{ + byteCount: byteCount, + closeChannel: nil, + } + + perByteReader.RandomBytes = make([]byte, byteCount) + _, _ = rand.Read(perByteReader.RandomBytes) + + return &perByteReader +} + +func newSingleUsePerByteReader(contents []byte) *perByteReader { + perByteReader := perByteReader{ + byteCount: len(contents), + closeChannel: make(chan struct{}, 10), + } + + perByteReader.RandomBytes = contents + + return &perByteReader +} + +func (r *perByteReader) Read(b []byte) (n int, err error) { + if r.doInjectError && r.doInjectErrorByteIndex == r.currentByteIndex && r.doInjectTimes > 0 { + r.doInjectTimes-- + return 0, r.injectedError + } + + if r.currentByteIndex < r.byteCount { + n = copy(b, r.RandomBytes[r.currentByteIndex:r.currentByteIndex+1]) + r.currentByteIndex += n + + // simulate a delay, which may be successful or, if we're closed from another go-routine, may return an + // error + select { + case <-r.closeChannel: + return n, errors.New(ReadOnClosedBodyMessage) + case <-time.After(r.sleepDuration): + return n, nil + } + } + + return 0, io.EOF +} + +func (r *perByteReader) Close() error { + if r.closeChannel != nil { + r.closeChannel <- struct{}{} + } + return nil +} + +// Test normal retry succeed, note initial response not provided. +// Tests both with and without notification of failures +func TestRetryReaderReadWithRetry(t *testing.T) { + // Test twice, the second time using the optional "logging"/notification callback for failed tries + // We must test both with and without the callback, since be testing without + // we are testing that it is, indeed, optional to provide the callback + for _, logThisRun := range []bool{false, true} { + + // Extra setup for testing notification of failures (i.e. of unsuccessful tries) + failureMethodNumCalls := 0 + failureWillRetryCount := 0 + failureLastReportedFailureCount := int32(-1) + var failureLastReportedError error = nil + failureMethod := func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) { + failureMethodNumCalls++ + if willRetry { + failureWillRetryCount++ + } + failureLastReportedFailureCount = failureCount + failureLastReportedError = lastError + } + + // Main test setup + byteCount := 1 + body := newPerByteReader(byteCount) + body.doInjectError = true + body.doInjectErrorByteIndex = 0 + body.doInjectTimes = 1 + body.injectedError = &net.DNSError{IsTemporary: true} + + getter := func(ctx context.Context, info httpGetterInfo) (io.ReadCloser, error) { + r := http.Response{} + body.currentByteIndex = int(info.Range.Offset) + r.Body = body + + return r.Body, nil + } + + httpGetterInfo := httpGetterInfo{ + Range: &HTTPRange{ + Count: int64(byteCount), + }, + } + initResponse, err := getter(context.Background(), httpGetterInfo) + require.NoError(t, err) + + rrOptions := RetryReaderOptions{MaxRetries: 1} + if logThisRun { + rrOptions.OnFailedRead = failureMethod + } + retryReader := newRetryReader(context.Background(), initResponse, httpGetterInfo, getter, rrOptions) + + // should fail and succeed through retry + can := make([]byte, 1) + n, err := retryReader.Read(can) + require.Equal(t, n, 1) + require.NoError(t, err) + + // check "logging", if it was enabled + if logThisRun { + // We only expect one failed try in this test + // And the notification method is not called for successes + require.Equal(t, failureMethodNumCalls, 1) // this is the number of calls we counted + require.Equal(t, failureWillRetryCount, 1) // the sole failure was retried + require.Equal(t, failureLastReportedFailureCount, int32(1)) // this is the number of failures reported by the notification method + require.Error(t, failureLastReportedError) + } + // should return EOF + n, err = retryReader.Read(can) + require.Equal(t, n, 0) + require.Equal(t, err, io.EOF) + } +} + +// Test normal retry succeed, note initial response not provided. +// Tests both with and without notification of failures +func TestRetryReaderWithRetryIoUnexpectedEOF(t *testing.T) { + // Test twice, the second time using the optional "logging"/notification callback for failed tries + // We must test both with and without the callback, since be testing without + // we are testing that it is, indeed, optional to provide the callback + for _, logThisRun := range []bool{false, true} { + + // Extra setup for testing notification of failures (i.e. of unsuccessful tries) + failureMethodNumCalls := 0 + failureWillRetryCount := 0 + failureLastReportedFailureCount := int32(-1) + var failureLastReportedError error = nil + failureMethod := func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) { + failureMethodNumCalls++ + if willRetry { + failureWillRetryCount++ + } + failureLastReportedFailureCount = failureCount + failureLastReportedError = lastError + } + + // Main test setup + byteCount := 1 + body := newPerByteReader(byteCount) + body.doInjectError = true + body.doInjectErrorByteIndex = 0 + body.doInjectTimes = 1 + body.injectedError = io.ErrUnexpectedEOF + + getter := func(ctx context.Context, info httpGetterInfo) (io.ReadCloser, error) { + r := http.Response{} + body.currentByteIndex = int(info.Range.Offset) + r.Body = body + + return r.Body, nil + } + + httpGetterInfo := httpGetterInfo{ + Range: &HTTPRange{ + Count: int64(byteCount), + }, + } + initResponse, err := getter(context.Background(), httpGetterInfo) + require.NoError(t, err) + + rrOptions := RetryReaderOptions{MaxRetries: 1} + if logThisRun { + rrOptions.OnFailedRead = failureMethod + } + retryReader := newRetryReader(context.Background(), initResponse, httpGetterInfo, getter, rrOptions) + + // should fail and succeed through retry + can := make([]byte, 1) + n, err := retryReader.Read(can) + require.Equal(t, n, 1) + require.NoError(t, err) + + // check "logging", if it was enabled + if logThisRun { + // We only expect one failed try in this test + // And the notification method is not called for successes + require.Equal(t, failureMethodNumCalls, 1) // this is the number of calls we counted + require.Equal(t, failureWillRetryCount, 1) // the sole failure was retried + require.Equal(t, failureLastReportedFailureCount, int32(1)) // this is the number of failures reported by the notification method + require.Error(t, failureLastReportedError) + } + // should return EOF + n, err = retryReader.Read(can) + require.Equal(t, n, 0) + require.Equal(t, err, io.EOF) + } +} + +// Test normal retry fail as retry Count not enough. +func TestRetryReaderReadNegativeNormalFail(t *testing.T) { + // Extra setup for testing notification of failures (i.e. of unsuccessful tries) + failureMethodNumCalls := 0 + failureWillRetryCount := 0 + failureLastReportedFailureCount := int32(-1) + var failureLastReportedError error = nil + failureMethod := func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) { + failureMethodNumCalls++ + if willRetry { + failureWillRetryCount++ + } + failureLastReportedFailureCount = failureCount + failureLastReportedError = lastError + } + + // Main test setup + byteCount := 1 + body := newPerByteReader(byteCount) + body.doInjectError = true + body.doInjectErrorByteIndex = 0 + body.doInjectTimes = 2 + body.injectedError = &net.DNSError{IsTemporary: true} + + startResponse := body + + getter := func(ctx context.Context, info httpGetterInfo) (io.ReadCloser, error) { + r := http.Response{} + body.currentByteIndex = int(info.Range.Offset) + r.Body = body + + return r.Body, nil + } + + rrOptions := RetryReaderOptions{ + MaxRetries: 1, + OnFailedRead: failureMethod, + } + httpGetterInfo := httpGetterInfo{ + Range: &HTTPRange{ + Count: int64(byteCount), + }, + } + retryReader := newRetryReader(context.Background(), startResponse, httpGetterInfo, getter, rrOptions) + + // should fail + can := make([]byte, 1) + n, err := retryReader.Read(can) + require.Equal(t, n, 0) + require.Equal(t, err, body.injectedError) + + // Check that we received the right notification callbacks + // We only expect two failed tries in this test, but only one + // of the would have had willRetry = true + require.Equal(t, failureMethodNumCalls, 2) // this is the number of calls we counted + require.Equal(t, failureWillRetryCount, 1) // only the first failure was retried + require.Equal(t, failureLastReportedFailureCount, int32(2)) // this is the number of failures reported by the notification method + require.Error(t, failureLastReportedError) +} + +// Test boundary case when Count equals to 0 and fail. +func TestRetryReaderReadCount0(t *testing.T) { + byteCount := 1 + body := newPerByteReader(byteCount) + body.doInjectError = true + body.doInjectErrorByteIndex = 1 + body.doInjectTimes = 1 + body.injectedError = &net.DNSError{IsTemporary: true} + + startResponseBody := body + + getter := func(ctx context.Context, info httpGetterInfo) (io.ReadCloser, error) { + r := http.Response{} + body.currentByteIndex = int(info.Range.Offset) + r.Body = body + + return r.Body, nil + } + + httpGetterInfo := httpGetterInfo{ + Range: &HTTPRange{ + Count: int64(byteCount), + }, + } + + retryReader := newRetryReader(context.Background(), startResponseBody, httpGetterInfo, getter, RetryReaderOptions{MaxRetries: 1}) + + // should consume the only byte + can := make([]byte, 1) + n, err := retryReader.Read(can) + require.Equal(t, n, 1) + require.NoError(t, err) + + // should not read when Count=0, and should return EOF + n, err = retryReader.Read(can) + require.Equal(t, n, 0) + require.Equal(t, err, io.EOF) +} + +func TestRetryReaderReadNegativeNonRetriableError(t *testing.T) { + byteCount := 1 + body := newPerByteReader(byteCount) + body.doInjectError = true + body.doInjectErrorByteIndex = 0 + body.doInjectTimes = 1 + body.injectedError = fmt.Errorf("not retriable error") + + startResponseBody := body + + getter := func(ctx context.Context, info httpGetterInfo) (io.ReadCloser, error) { + r := http.Response{} + body.currentByteIndex = int(info.Range.Offset) + r.Body = body + + return r.Body, nil + } + + httpGetterInfo := httpGetterInfo{ + Range: &HTTPRange{ + Count: int64(byteCount), + }, + } + + retryReader := newRetryReader(context.Background(), startResponseBody, httpGetterInfo, getter, RetryReaderOptions{MaxRetries: 2}) + + dest := make([]byte, 1) + _, err := retryReader.Read(dest) + require.Equal(t, err, body.injectedError) +} + +// Test the case where we programmatically force a retry to happen, via closing the body early from another goroutine +// Unlike the retries orchestrated elsewhere in this test file, which simulate network failures for the +// purposes of unit testing, here we are testing the cancellation mechanism that is exposed to +// consumers of the API, to allow programmatic forcing of retries (e.g. if the consumer deems +// the read to be taking too long, they may force a retry in the hope of better performance next time). +func TestRetryReaderReadWithForcedRetry(t *testing.T) { + for _, enableRetryOnEarlyClose := range []bool{false, true} { + + // use the notification callback, so we know that the retry really did happen + failureMethodNumCalls := 0 + failureMethod := func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) { + failureMethodNumCalls++ + } + + // Main test setup + byteCount := 10 // so multiple passes through read loop will be required + sleepDuration := 100 * time.Millisecond + randBytes := make([]byte, byteCount) + _, _ = rand.Read(randBytes) + getter := func(ctx context.Context, info httpGetterInfo) (io.ReadCloser, error) { + body := newSingleUsePerByteReader(randBytes) // make new one every time, since we force closes in this test, and it is unusable after a close + body.sleepDuration = sleepDuration + r := http.Response{} + body.currentByteIndex = int(info.Range.Offset) + r.Body = body + + return r.Body, nil + } + + httpGetterInfo := httpGetterInfo{ + Range: &HTTPRange{ + Count: int64(byteCount), + }, + } + initResponse, err := getter(context.Background(), httpGetterInfo) + require.NoError(t, err) + + rrOptions := RetryReaderOptions{MaxRetries: 2, EarlyCloseAsError: !enableRetryOnEarlyClose} + rrOptions.OnFailedRead = failureMethod + retryReader := newRetryReader(context.Background(), initResponse, httpGetterInfo, getter, rrOptions) + + // set up timed cancellation from separate goroutine + go func() { + time.Sleep(sleepDuration * 5) + err := retryReader.Close() + if err != nil { + return + } + }() + + // do the read (should fail, due to forced cancellation, and succeed through retry) + output := make([]byte, byteCount) + n, err := io.ReadFull(retryReader, output) + if enableRetryOnEarlyClose { + require.Equal(t, n, byteCount) + require.NoError(t, err) + require.EqualValues(t, output, randBytes) + require.Equal(t, failureMethodNumCalls, 1) // assert that the cancellation did indeed happen + } else { + require.Error(t, err) + } + } +} diff --git a/sdk/storage/azdatalake/filesystem/client.go b/sdk/storage/azdatalake/filesystem/client.go new file mode 100644 index 000000000000..40dc0768aaad --- /dev/null +++ b/sdk/storage/azdatalake/filesystem/client.go @@ -0,0 +1,358 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +// FOR FS CLIENT WE STORE THE GENERATED DATALAKE LAYER WITH BLOB ENDPOINT IN ORDER TO USE DELETED PATH LISTING + +package filesystem + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" + "net/http" + "strings" + "time" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure Datalake Storage service. +type Client base.CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client] + +// NewClient creates an instance of Client with the specified values. +// - filesystemURL - the URL of the blob e.g. https://.dfs.core.windows.net/fs +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(filesystemURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + containerURL, filesystemURL := shared.GetURLs(filesystemURL) + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + } + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.FileSystemClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + containerClientOpts := container.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobContainerClient, _ := container.NewClient(containerURL, cred, &containerClientOpts) + fsClient := base.NewFileSystemClient(filesystemURL, containerURL, blobContainerClient, azClient, nil, &cred, (*base.ClientOptions)(conOptions)) + + return (*Client)(fsClient), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - filesystemURL - the URL of the storage account e.g. https://.dfs.core.windows.net/fs? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(filesystemURL string, options *ClientOptions) (*Client, error) { + containerURL, filesystemURL := shared.GetURLs(filesystemURL) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{} + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.FileSystemClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + containerClientOpts := container.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobContainerClient, _ := container.NewClientWithNoCredential(containerURL, &containerClientOpts) + fsClient := base.NewFileSystemClient(filesystemURL, containerURL, blobContainerClient, azClient, nil, nil, (*base.ClientOptions)(conOptions)) + + return (*Client)(fsClient), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - filesystemURL - the URL of the storage account e.g. https://.dfs.core.windows.net/fs +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(filesystemURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + containerURL, filesystemURL := shared.GetURLs(filesystemURL) + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + } + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.FileSystemClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + containerClientOpts := container.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobSharedKey, err := exported.ConvertToBlobSharedKey(cred) + if err != nil { + return nil, err + } + blobContainerClient, _ := container.NewClientWithSharedKeyCredential(containerURL, blobSharedKey, &containerClientOpts) + fsClient := base.NewFileSystemClient(filesystemURL, containerURL, blobContainerClient, azClient, cred, nil, (*base.ClientOptions)(conOptions)) + + return (*Client)(fsClient), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, fsName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, fsName) + if err != nil { + return nil, err + } + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (fs *Client) generatedFSClientWithDFS() *generated.FileSystemClient { + fsClientWithDFS, _, _ := base.InnerClients((*base.CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client])(fs)) + return fsClientWithDFS +} + +func (fs *Client) generatedFSClientWithBlob() *generated.FileSystemClient { + _, fsClientWithBlob, _ := base.InnerClients((*base.CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client])(fs)) + return fsClientWithBlob +} + +func (fs *Client) getClientOptions() *base.ClientOptions { + return base.GetCompositeClientOptions((*base.CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client])(fs)) +} + +func (fs *Client) containerClient() *container.Client { + _, _, containerClient := base.InnerClients((*base.CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client])(fs)) + return containerClient +} + +func (fs *Client) identityCredential() *azcore.TokenCredential { + return base.IdentityCredentialComposite((*base.CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client])(fs)) +} + +func (fs *Client) sharedKey() *exported.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client])(fs)) +} + +// DFSURL returns the URL endpoint used by the Client object. +func (fs *Client) DFSURL() string { + return fs.generatedFSClientWithDFS().Endpoint() +} + +// BlobURL returns the URL endpoint used by the Client object. +func (fs *Client) BlobURL() string { + return fs.generatedFSClientWithBlob().Endpoint() +} + +// NewDirectoryClient creates a new directory.Client object by concatenating directory path to the end of this Client's URL. +// The new directory.Client uses the same request policy pipeline as the Client. +func (fs *Client) NewDirectoryClient(directoryPath string) *directory.Client { + directoryPath = strings.ReplaceAll(directoryPath, "\\", "/") + dirURL := runtime.JoinPaths(fs.generatedFSClientWithDFS().Endpoint(), directoryPath) + blobURL, dirURL := shared.GetURLs(dirURL) + return (*directory.Client)(base.NewPathClient(dirURL, blobURL, fs.containerClient().NewBlockBlobClient(directoryPath), fs.generatedFSClientWithDFS().InternalClient().WithClientName(shared.DirectoryClient), fs.sharedKey(), fs.identityCredential(), fs.getClientOptions())) +} + +// NewFileClient creates a new file.Client object by concatenating file path to the end of this Client's URL. +// The new file.Client uses the same request policy pipeline as the Client. +func (fs *Client) NewFileClient(filePath string) *file.Client { + filePath = strings.ReplaceAll(filePath, "\\", "/") + fileURL := runtime.JoinPaths(fs.generatedFSClientWithDFS().Endpoint(), filePath) + blobURL, fileURL := shared.GetURLs(fileURL) + return (*file.Client)(base.NewPathClient(fileURL, blobURL, fs.containerClient().NewBlockBlobClient(filePath), fs.generatedFSClientWithDFS().InternalClient().WithClientName(shared.FileClient), fs.sharedKey(), fs.identityCredential(), fs.getClientOptions())) +} + +// Create creates a new filesystem under the specified account. +func (fs *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { + opts := options.format() + resp, err := fs.containerClient().Create(ctx, opts) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// Delete deletes the specified filesystem and any files or directories it contains. +func (fs *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + opts := options.format() + resp, err := fs.containerClient().Delete(ctx, opts) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// GetProperties returns all user-defined metadata, standard HTTP properties, and system properties for the filesystem. +func (fs *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts := options.format() + newResp := GetPropertiesResponse{} + resp, err := fs.containerClient().GetProperties(ctx, opts) + formatFileSystemProperties(&newResp, &resp) + err = exported.ConvertToDFSError(err) + return newResp, err +} + +// SetMetadata sets one or more user-defined name-value pairs for the specified filesystem. +func (fs *Client) SetMetadata(ctx context.Context, options *SetMetadataOptions) (SetMetadataResponse, error) { + opts := options.format() + resp, err := fs.containerClient().SetMetadata(ctx, opts) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// SetAccessPolicy sets the permissions for the specified filesystem or the files and directories under it. +func (fs *Client) SetAccessPolicy(ctx context.Context, options *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { + opts := options.format() + resp, err := fs.containerClient().SetAccessPolicy(ctx, opts) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// GetAccessPolicy returns the permissions for the specified filesystem or the files and directories under it. +func (fs *Client) GetAccessPolicy(ctx context.Context, options *GetAccessPolicyOptions) (GetAccessPolicyResponse, error) { + opts := options.format() + newResp := GetAccessPolicyResponse{} + resp, err := fs.containerClient().GetAccessPolicy(ctx, opts) + formatGetAccessPolicyResponse(&newResp, &resp) + err = exported.ConvertToDFSError(err) + return newResp, err +} + +// TODO: implement undelete path in fs client as well + +// NewListPathsPager operation returns a pager of the shares under the specified account. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-shares +func (fs *Client) NewListPathsPager(recursive bool, options *ListPathsOptions) *runtime.Pager[ListPathsSegmentResponse] { + listOptions := options.format() + return runtime.NewPager(runtime.PagingHandler[ListPathsSegmentResponse]{ + More: func(page ListPathsSegmentResponse) bool { + return page.Continuation != nil && len(*page.Continuation) > 0 + }, + Fetcher: func(ctx context.Context, page *ListPathsSegmentResponse) (ListPathsSegmentResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = fs.generatedFSClientWithDFS().ListPathsCreateRequest(ctx, recursive, &listOptions) + err = exported.ConvertToDFSError(err) + } else { + listOptions.Continuation = page.Continuation + req, err = fs.generatedFSClientWithDFS().ListPathsCreateRequest(ctx, recursive, &listOptions) + err = exported.ConvertToDFSError(err) + } + if err != nil { + return ListPathsSegmentResponse{}, err + } + resp, err := fs.generatedFSClientWithDFS().InternalClient().Pipeline().Do(req) + err = exported.ConvertToDFSError(err) + if err != nil { + return ListPathsSegmentResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListPathsSegmentResponse{}, runtime.NewResponseError(resp) + } + newResp, err := fs.generatedFSClientWithDFS().ListPathsHandleResponse(resp) + return newResp, exported.ConvertToDFSError(err) + }, + }) +} + +// NewListDeletedPathsPager operation returns a pager of the shares under the specified account. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-shares +func (fs *Client) NewListDeletedPathsPager(options *ListDeletedPathsOptions) *runtime.Pager[ListDeletedPathsSegmentResponse] { + listOptions := options.format() + return runtime.NewPager(runtime.PagingHandler[ListDeletedPathsSegmentResponse]{ + More: func(page ListDeletedPathsSegmentResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListDeletedPathsSegmentResponse) (ListDeletedPathsSegmentResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = fs.generatedFSClientWithBlob().ListBlobHierarchySegmentCreateRequest(ctx, &listOptions) + err = exported.ConvertToDFSError(err) + } else { + listOptions.Marker = page.NextMarker + req, err = fs.generatedFSClientWithBlob().ListBlobHierarchySegmentCreateRequest(ctx, &listOptions) + err = exported.ConvertToDFSError(err) + } + if err != nil { + return ListDeletedPathsSegmentResponse{}, err + } + resp, err := fs.generatedFSClientWithBlob().InternalClient().Pipeline().Do(req) + err = exported.ConvertToDFSError(err) + if err != nil { + return ListDeletedPathsSegmentResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListDeletedPathsSegmentResponse{}, runtime.NewResponseError(resp) + } + newResp, err := fs.generatedFSClientWithBlob().ListBlobHierarchySegmentHandleResponse(resp) + return newResp, exported.ConvertToDFSError(err) + }, + }) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at filesystem. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (fs *Client) GetSASURL(permissions sas.FileSystemPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if fs.sharedKey() == nil { + return "", datalakeerror.MissingSharedKeyCredential + } + st := o.format() + urlParts, err := azdatalake.ParseURL(fs.BlobURL()) + err = exported.ConvertToDFSError(err) + if err != nil { + return "", err + } + qps, err := sas.DatalakeSignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + FileSystemName: urlParts.FileSystemName, + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(fs.sharedKey()) + err = exported.ConvertToDFSError(err) + if err != nil { + return "", err + } + + endpoint := fs.BlobURL() + "?" + qps.Encode() + + return endpoint, nil +} diff --git a/sdk/storage/azdatalake/filesystem/client_test.go b/sdk/storage/azdatalake/filesystem/client_test.go new file mode 100644 index 000000000000..571bbce8055e --- /dev/null +++ b/sdk/storage/azdatalake/filesystem/client_test.go @@ -0,0 +1,1811 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package filesystem_test + +import ( + "context" + "strconv" + "strings" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/lease" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +var proposedLeaseIDs = []*string{to.Ptr("c820a799-76d7-4ee2-6e15-546f19325c2c"), to.Ptr("326cc5e1-746e-4af8-4811-a50e6629a8ca")} + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running datalake Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &RecordedTestSuite{}) + suite.Run(t, &UnrecordedTestSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &RecordedTestSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &RecordedTestSuite{}) + } +} + +func (s *RecordedTestSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(s.T(), suite, test) +} + +func (s *RecordedTestSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(s.T(), suite, test) +} + +func (s *UnrecordedTestSuite) BeforeTest(suite string, test string) { + +} + +func (s *UnrecordedTestSuite) AfterTest(suite string, test string) { + +} + +type RecordedTestSuite struct { + suite.Suite +} + +type UnrecordedTestSuite struct { + suite.Suite +} + +func validateFileSystemDeleted(_require *require.Assertions, filesystemClient *filesystem.Client) { + _, err := filesystemClient.GetAccessPolicy(context.Background(), nil) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.FileSystemNotFound) +} + +func (s *RecordedTestSuite) TestCreateFilesystem() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestCreateFilesystemWithOptions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + testStr := "hello" + metadata := map[string]*string{"foo": &testStr, "bar": &testStr} + access := filesystem.FileSystem + opts := filesystem.CreateOptions{ + Metadata: metadata, + Access: &access, + } + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), &opts) + _require.Nil(err) + + props, err := fsClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(props.Metadata) + _require.Equal(*props.PublicAccess, filesystem.FileSystem) +} + +func (s *RecordedTestSuite) TestCreateFilesystemWithFileAccess() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + testStr := "hello" + metadata := map[string]*string{"foo": &testStr, "bar": &testStr} + access := filesystem.File + opts := filesystem.CreateOptions{ + Metadata: metadata, + Access: &access, + } + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), &opts) + _require.Nil(err) + props, err := fsClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(props.Metadata) + _require.Equal(*props.PublicAccess, filesystem.File) +} + +func (s *RecordedTestSuite) TestCreateFilesystemEmptyMetadata() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + metadata := map[string]*string{"foo": nil, "bar": nil} + access := filesystem.FileSystem + opts := filesystem.CreateOptions{ + Metadata: metadata, + Access: &access, + } + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), &opts) + _require.Nil(err) + + props, err := fsClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Nil(props.Metadata) + _require.Equal(*props.PublicAccess, filesystem.FileSystem) + +} + +func (s *RecordedTestSuite) TestFilesystemCreateInvalidName() { + _require := require.New(s.T()) + + fsClient, err := testcommon.GetFileSystemClient("foo bar", s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.InvalidResourceName) +} + +func (s *RecordedTestSuite) TestFilesystemCreateNameCollision() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.FileSystemAlreadyExists) +} + +func (s *RecordedTestSuite) TestFilesystemGetProperties() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + resp, err := fsClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp.ETag) + _require.Nil(resp.Metadata) +} + +func (s *RecordedTestSuite) TestFilesystemGetPropertiesWithEmptyOpts() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + opts := &filesystem.GetPropertiesOptions{} + resp, err := fsClient.GetProperties(context.Background(), opts) + _require.Nil(err) + _require.NotNil(resp.ETag) + _require.Nil(resp.Metadata) +} + +func (s *RecordedTestSuite) TestFilesystemGetPropertiesWithLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + fsLeaseClient, err := lease.NewFileSystemClient(fsClient, &lease.FileSystemClientOptions{LeaseID: proposedLeaseIDs[0]}) + _require.Nil(err) + _, err = fsLeaseClient.AcquireLease(context.Background(), int32(60), nil) + _require.Nil(err) + + opts := &filesystem.GetPropertiesOptions{LeaseAccessConditions: &filesystem.LeaseAccessConditions{ + LeaseID: fsLeaseClient.LeaseID(), + }} + + resp, err := fsClient.GetProperties(context.Background(), opts) + _require.Nil(err) + _require.NotNil(resp.ETag) + _require.Nil(resp.Metadata) + + _, err = fsLeaseClient.ReleaseLease(context.Background(), nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFilesystemDelete() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + _, err = fsClient.Delete(context.Background(), nil) + _require.Nil(err) + + validateFileSystemDeleted(_require, fsClient) +} + +func (s *RecordedTestSuite) TestFilesystemDeleteNonExistent() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Delete(context.Background(), nil) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.FileSystemNotFound) +} + +func (s *RecordedTestSuite) TestFilesystemDeleteIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + deleteFileSystemOptions := filesystem.DeleteOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + _, err = fsClient.Delete(context.Background(), &deleteFileSystemOptions) + _require.Nil(err) + validateFileSystemDeleted(_require, fsClient) +} + +func (s *RecordedTestSuite) TestFilesystemDeleteIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + deleteFileSystemOptions := filesystem.DeleteOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + _, err = fsClient.Delete(context.Background(), &deleteFileSystemOptions) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFilesystemDeleteIfUnModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + deleteFileSystemOptions := filesystem.DeleteOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + _, err = fsClient.Delete(context.Background(), &deleteFileSystemOptions) + _require.Nil(err) + + validateFileSystemDeleted(_require, fsClient) +} + +func (s *RecordedTestSuite) TestFilesystemDeleteIfUnModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + deleteFileSystemOptions := filesystem.DeleteOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + _, err = fsClient.Delete(context.Background(), &deleteFileSystemOptions) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFilesystemSetMetadataNonEmpty() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + opts := filesystem.SetMetadataOptions{ + Metadata: testcommon.BasicMetadata, + } + _, err = fsClient.SetMetadata(context.Background(), &opts) + _require.Nil(err) + + resp1, err := fsClient.GetProperties(context.Background(), nil) + _require.Nil(err) + + for k, v := range testcommon.BasicMetadata { + _require.Equal(v, resp1.Metadata[k]) + } +} + +func (s *RecordedTestSuite) TestFilesystemSetMetadataEmpty() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + opts := filesystem.SetMetadataOptions{ + Metadata: map[string]*string{}, + } + + _, err = fsClient.SetMetadata(context.Background(), &opts) + _require.Nil(err) + + resp1, err := fsClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.Nil(resp1.Metadata) +} + +func (s *RecordedTestSuite) TestFilesystemSetMetadataNil() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.SetMetadata(context.Background(), nil) + _require.Nil(err) + + resp1, err := fsClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.Nil(resp1.Metadata) +} + +func (s *RecordedTestSuite) TestFilesystemSetMetadataInvalidField() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + opts := filesystem.SetMetadataOptions{ + Metadata: map[string]*string{"!nval!d Field!@#%": to.Ptr("value")}, + } + _, err = fsClient.SetMetadata(context.Background(), &opts) + _require.NotNil(err) + _require.Equal(strings.Contains(err.Error(), testcommon.InvalidHeaderErrorSubstring), true) +} + +func (s *RecordedTestSuite) TestFilesystemSetMetadataNonExistent() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.SetMetadata(context.Background(), nil) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.FileSystemNotFound) +} + +func (s *RecordedTestSuite) TestFilesystemSetEmptyAccessPolicy() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.SetAccessPolicy(context.Background(), &filesystem.SetAccessPolicyOptions{}) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFilesystemSetNilAccessPolicy() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.SetAccessPolicy(context.Background(), nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFilesystemSetAccessPolicy() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + start := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) + expiration := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + permission := "r" + id := "1" + + signedIdentifiers := make([]*filesystem.SignedIdentifier, 0) + + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + AccessPolicy: &filesystem.AccessPolicy{ + Expiry: &expiration, + Start: &start, + Permission: &permission, + }, + ID: &id, + }) + options := filesystem.SetAccessPolicyOptions{FileSystemACL: signedIdentifiers} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFilesystemSetMultipleAccessPolicies() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + id := "empty" + + signedIdentifiers := make([]*filesystem.SignedIdentifier, 0) + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id, + }) + + permission2 := "r" + id2 := "partial" + + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id2, + AccessPolicy: &filesystem.AccessPolicy{ + Permission: &permission2, + }, + }) + + id3 := "full" + permission3 := "r" + start := time.Date(2021, 6, 8, 2, 10, 9, 0, time.UTC) + expiry := time.Date(2021, 6, 8, 2, 10, 9, 0, time.UTC) + + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id3, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &permission3, + }, + }) + options := filesystem.SetAccessPolicyOptions{FileSystemACL: signedIdentifiers} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.Nil(err) + + // Make a Get to assert two access policies + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Len(resp.SignedIdentifiers, 3) +} + +func (s *RecordedTestSuite) TestFilesystemSetNullAccessPolicy() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + id := "null" + + signedIdentifiers := make([]*filesystem.SignedIdentifier, 0) + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id, + }) + options := filesystem.SetAccessPolicyOptions{FileSystemACL: signedIdentifiers} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Equal(len(resp.SignedIdentifiers), 1) +} + +func (s *RecordedTestSuite) TestFilesystemGetAccessPolicyWithEmptyOpts() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + id := "null" + + signedIdentifiers := make([]*filesystem.SignedIdentifier, 0) + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id, + }) + options := filesystem.SetAccessPolicyOptions{FileSystemACL: signedIdentifiers} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.Nil(err) + + opts := &filesystem.GetAccessPolicyOptions{} + + resp, err := fsClient.GetAccessPolicy(context.Background(), opts) + _require.Nil(err) + _require.Equal(len(resp.SignedIdentifiers), 1) +} + +func (s *RecordedTestSuite) TestFilesystemGetAccessPolicyOnLeasedFilesystem() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + var proposedLeaseIDs = []*string{to.Ptr("c820a799-76d7-4ee2-6e15-546f19325c2c"), to.Ptr("326cc5e1-746e-4af8-4811-a50e6629a8ca")} + + fsLeaseClient, err := lease.NewFileSystemClient(fsClient, &lease.FileSystemClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.Nil(err) + + id := "null" + + signedIdentifiers := make([]*filesystem.SignedIdentifier, 0) + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id, + }) + options := filesystem.SetAccessPolicyOptions{FileSystemACL: signedIdentifiers} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.Nil(err) + + _, err = fsLeaseClient.AcquireLease(context.Background(), int32(15), nil) + _require.Nil(err) + + opts1 := &filesystem.GetAccessPolicyOptions{ + LeaseAccessConditions: &filesystem.LeaseAccessConditions{ + LeaseID: proposedLeaseIDs[0], + }, + } + + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), opts1) + _require.Nil(err) + _require.Equal(len(resp.SignedIdentifiers), 1) + + _, err = fsClient.Delete(context.Background(), &filesystem.DeleteOptions{AccessConditions: &filesystem.AccessConditions{ + LeaseAccessConditions: &filesystem.LeaseAccessConditions{ + LeaseID: proposedLeaseIDs[0], + }, + }}) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFilesystemGetSetPermissionsMultiplePolicies() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + // Define the policies + start, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2021") + _require.Nil(err) + expiry := start.Add(5 * time.Minute) + expiry2 := start.Add(time.Minute) + readWrite := to.Ptr(filesystem.AccessPolicyPermission{Read: true, Write: true}).String() + readOnly := to.Ptr(filesystem.AccessPolicyPermission{Read: true}).String() + id1, id2 := "0000", "0001" + permissions := []*filesystem.SignedIdentifier{ + {ID: &id1, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &readWrite, + }, + }, + {ID: &id2, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry2, + Permission: &readOnly, + }, + }, + } + options := filesystem.SetAccessPolicyOptions{FileSystemACL: permissions} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.EqualValues(resp.SignedIdentifiers, permissions) +} + +func (s *RecordedTestSuite) TestFilesystemGetPermissionsPublicAccessNotNone() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + access := filesystem.File + createContainerOptions := filesystem.CreateOptions{ + Access: &access, + } + _, err = fsClient.Create(context.Background(), &createContainerOptions) // We create the container explicitly so we can be sure the access policy is not empty + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + + _require.Nil(err) + _require.Equal(*resp.PublicAccess, filesystem.File) +} + +// TODO: TestFilesystemSetPermissionsPublicAccessNone() + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsPublicAccessTypeFile() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.File), + } + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Equal(*resp.PublicAccess, filesystem.File) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsPublicAccessFilesystem() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.FileSystem), + } + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Equal(*resp.PublicAccess, filesystem.FileSystem) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsACLMoreThanFive() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + start, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2021") + _require.Nil(err) + expiry, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2049") + _require.Nil(err) + permissions := make([]*filesystem.SignedIdentifier, 6) + listOnly := to.Ptr(filesystem.AccessPolicyPermission{Read: true}).String() + for i := 0; i < 6; i++ { + id := "000" + strconv.Itoa(i) + permissions[i] = &filesystem.SignedIdentifier{ + ID: &id, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &listOnly, + }, + } + } + + access := filesystem.File + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: &access, + } + setAccessPolicyOptions.FileSystemACL = permissions + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.InvalidXMLDocument) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsDeleteAndModifyACL() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + start, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2021") + _require.Nil(err) + expiry, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2049") + _require.Nil(err) + listOnly := to.Ptr(filesystem.AccessPolicyPermission{Read: true}).String() + permissions := make([]*filesystem.SignedIdentifier, 2) + for i := 0; i < 2; i++ { + id := "000" + strconv.Itoa(i) + permissions[i] = &filesystem.SignedIdentifier{ + ID: &id, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &listOnly, + }, + } + } + + access := filesystem.File + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: &access, + } + setAccessPolicyOptions.FileSystemACL = permissions + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.EqualValues(resp.SignedIdentifiers, permissions) + + permissions = resp.SignedIdentifiers[:1] // Delete the first policy by removing it from the slice + newId := "0004" + permissions[0].ID = &newId // Modify the remaining policy which is at index 0 in the new slice + setAccessPolicyOptions1 := filesystem.SetAccessPolicyOptions{ + Access: &access, + } + setAccessPolicyOptions1.FileSystemACL = permissions + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions1) + _require.Nil(err) + + resp, err = fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Len(resp.SignedIdentifiers, 1) + _require.EqualValues(resp.SignedIdentifiers, permissions) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsDeleteAllPolicies() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + start, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2021") + _require.Nil(err) + expiry, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2049") + _require.Nil(err) + permissions := make([]*filesystem.SignedIdentifier, 2) + listOnly := to.Ptr(filesystem.AccessPolicyPermission{Read: true}).String() + for i := 0; i < 2; i++ { + id := "000" + strconv.Itoa(i) + permissions[i] = &filesystem.SignedIdentifier{ + ID: &id, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &listOnly, + }, + } + } + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.File), + } + setAccessPolicyOptions.FileSystemACL = permissions + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Len(resp.SignedIdentifiers, len(permissions)) + _require.EqualValues(resp.SignedIdentifiers, permissions) + + setAccessPolicyOptions = filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.File), + } + setAccessPolicyOptions.FileSystemACL = []*filesystem.SignedIdentifier{} + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp, err = fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Nil(resp.SignedIdentifiers) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsInvalidPolicyTimes() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + // Swap start and expiry + expiry, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2021") + _require.Nil(err) + start, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2049") + _require.Nil(err) + permissions := make([]*filesystem.SignedIdentifier, 2) + listOnly := to.Ptr(filesystem.AccessPolicyPermission{Read: true}).String() + for i := 0; i < 2; i++ { + id := "000" + strconv.Itoa(i) + permissions[i] = &filesystem.SignedIdentifier{ + ID: &id, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &listOnly, + }, + } + } + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.File), + } + setAccessPolicyOptions.FileSystemACL = permissions + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsNilPolicySlice() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.SetAccessPolicy(context.Background(), nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsSignedIdentifierTooLong() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + id := "" + for i := 0; i < 65; i++ { + id += "a" + } + expiry, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2021") + _require.Nil(err) + start := expiry.Add(5 * time.Minute).UTC() + permissions := make([]*filesystem.SignedIdentifier, 2) + listOnly := to.Ptr(filesystem.AccessPolicyPermission{Read: true}).String() + for i := 0; i < 2; i++ { + permissions[i] = &filesystem.SignedIdentifier{ + ID: &id, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &listOnly, + }, + } + } + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.File), + } + setAccessPolicyOptions.FileSystemACL = permissions + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.InvalidXMLDocument) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{IfModifiedSince: ¤tTime}, + }, + } + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp1, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Nil(resp1.PublicAccess) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{IfModifiedSince: ¤tTime}, + }, + } + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsIfUnModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{IfUnmodifiedSince: ¤tTime}, + }, + } + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp1, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Nil(resp1.PublicAccess) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsIfUnModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{IfUnmodifiedSince: ¤tTime}, + }, + } + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *UnrecordedTestSuite) TestFilesystemSetAccessPoliciesInDifferentTimeFormats() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + id := "timeInEST" + permission := "rw" + loc, err := time.LoadLocation("EST") + _require.Nil(err) + start := time.Now().In(loc) + expiry := start.Add(10 * time.Hour) + + signedIdentifiers := make([]*filesystem.SignedIdentifier, 0) + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &permission, + }, + }) + + id2 := "timeInIST" + permission2 := "r" + loc2, err := time.LoadLocation("Asia/Kolkata") + _require.Nil(err) + start2 := time.Now().In(loc2) + expiry2 := start2.Add(5 * time.Hour) + + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id2, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start2, + Expiry: &expiry2, + Permission: &permission2, + }, + }) + + id3 := "nilTime" + permission3 := "r" + + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id3, + AccessPolicy: &filesystem.AccessPolicy{ + Permission: &permission3, + }, + }) + options := filesystem.SetAccessPolicyOptions{FileSystemACL: signedIdentifiers} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.Nil(err) + + // make a Get to assert three access policies + resp1, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Len(resp1.SignedIdentifiers, 3) + _require.EqualValues(resp1.SignedIdentifiers, signedIdentifiers) +} + +func (s *RecordedTestSuite) TestFilesystemSetAccessPolicyWithNullId() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + signedIdentifiers := make([]*filesystem.SignedIdentifier, 0) + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + AccessPolicy: &filesystem.AccessPolicy{ + Permission: to.Ptr("rw"), + }, + }) + + options := filesystem.SetAccessPolicyOptions{FileSystemACL: signedIdentifiers} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.InvalidXMLDocument) + + resp1, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Len(resp1.SignedIdentifiers, 0) +} + +func (s *UnrecordedTestSuite) TestSASFileSystemClient() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + // Adding SAS and options + permissions := sas.FileSystemPermissions{ + Read: true, + Add: true, + Write: true, + Create: true, + Delete: true, + } + expiry := time.Now().Add(time.Hour) + + // filesystemSASURL is created with GetSASURL + sasUrl, err := fsClient.GetSASURL(permissions, expiry, nil) + _require.Nil(err) + + // Create filesystem client with sasUrl + _, err = filesystem.NewClientWithNoCredential(sasUrl, nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFilesystemListPathsWithRecursive() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + client := fsClient.NewFileClient(testName + "file1") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + client = fsClient.NewFileClient(testName + "file2") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + dirClient := fsClient.NewDirectoryClient(testName + "dir1") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + dirClient = fsClient.NewDirectoryClient(testName + "dir2") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + + pager := fsClient.NewListPathsPager(true, nil) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + _require.Equal(5, len(resp.Paths)) + _require.NotNil(resp.PathList.Paths[0].IsDirectory) + + if err != nil { + break + } + } +} + +func (s *RecordedTestSuite) TestFilesystemListPathsWithRecursiveNoPrefix() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + client := fsClient.NewFileClient("file1") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + client = fsClient.NewFileClient("file2") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + dirClient := fsClient.NewDirectoryClient("dir1") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + dirClient = fsClient.NewDirectoryClient("dir2") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + + pager := fsClient.NewListPathsPager(true, nil) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + _require.Equal(4, len(resp.Paths)) + _require.NotNil(resp.PathList.Paths[0].IsDirectory) + if err != nil { + break + } + } +} + +func (s *RecordedTestSuite) TestFilesystemListPathsWithoutRecursive() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + client := fsClient.NewFileClient(testName + "file1") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + client = fsClient.NewFileClient(testName + "file2") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + dirClient := fsClient.NewDirectoryClient(testName + "dir1") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + dirClient = fsClient.NewDirectoryClient(testName + "dir2") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + + pager := fsClient.NewListPathsPager(false, nil) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + _require.Equal(1, len(resp.Paths)) + if err != nil { + break + } + } +} + +func (s *RecordedTestSuite) TestFilesystemListPathsWithMaxResults() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + client := fsClient.NewFileClient(testName + "file1") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + client = fsClient.NewFileClient(testName + "file2") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + dirClient := fsClient.NewDirectoryClient(testName + "dir1") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + dirClient = fsClient.NewDirectoryClient(testName + "dir2") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + + opts := filesystem.ListPathsOptions{ + MaxResults: to.Ptr(int32(2)), + } + pages := 3 + count := 0 + pager := fsClient.NewListPathsPager(true, &opts) + for pager.More() { + _, err = pager.NextPage(context.Background()) + _require.Nil(err) + count += 1 + if err != nil { + break + } + } + _require.Equal(pages, count) +} + +func (s *RecordedTestSuite) TestFilesystemListPathsWithPrefix() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + client := fsClient.NewFileClient(testName + "file1") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + client = fsClient.NewFileClient(testName + "file2") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + dirClient := fsClient.NewDirectoryClient(testName + "dir1") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + dirClient = fsClient.NewDirectoryClient(testName + "dir2") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + + opts := filesystem.ListPathsOptions{ + Prefix: to.Ptr("Test"), + } + pager := fsClient.NewListPathsPager(true, &opts) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + _require.Equal(4, len(resp.Paths)) + if err != nil { + break + } + } +} + +func (s *RecordedTestSuite) TestFilesystemListPathsWithContinuation() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + client := fsClient.NewFileClient(testName + "file1") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + client = fsClient.NewFileClient(testName + "file2") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + dirClient := fsClient.NewDirectoryClient(testName + "dir1") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + dirClient = fsClient.NewDirectoryClient(testName + "dir2") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + + opts := filesystem.ListPathsOptions{ + MaxResults: to.Ptr(int32(3)), + } + pager := fsClient.NewListPathsPager(true, &opts) + + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + _require.Equal(3, len(resp.Paths)) + _require.NotNil(resp.Continuation) + + token := resp.Continuation + pager = fsClient.NewListPathsPager(true, &filesystem.ListPathsOptions{ + Marker: token, + }) + resp, err = pager.NextPage(context.Background()) + _require.Nil(err) + _require.Equal(2, len(resp.Paths)) + _require.Nil(resp.Continuation) +} + +func (s *RecordedTestSuite) TestFilesystemListDeletedPaths() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + client := fsClient.NewFileClient(testName + "file1") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + client = fsClient.NewFileClient(testName + "file2") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + dirClient := fsClient.NewDirectoryClient(testName + "dir1") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + dirClient = fsClient.NewDirectoryClient(testName + "dir2") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + _, err = dirClient.Delete(context.Background(), nil) + _require.Nil(err) + + pager := fsClient.NewListDeletedPathsPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + _require.Equal(1, len(resp.ListPathsHierarchySegmentResponse.Segment.PathItems)) + if err != nil { + break + } + } +} + +func (s *RecordedTestSuite) TestFilesystemListDeletedPathsWithMaxResults() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + client := fsClient.NewFileClient(testName + "file1") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + _, err = client.Delete(context.Background(), nil) + _require.Nil(err) + client = fsClient.NewFileClient(testName + "file2") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + _, err = client.Delete(context.Background(), nil) + _require.Nil(err) + dirClient := fsClient.NewDirectoryClient(testName + "dir1") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + _, err = dirClient.Delete(context.Background(), nil) + _require.Nil(err) + dirClient = fsClient.NewDirectoryClient(testName + "dir2") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + _, err = dirClient.Delete(context.Background(), nil) + _require.Nil(err) + + opts := filesystem.ListDeletedPathsOptions{ + MaxResults: to.Ptr(int32(2)), + } + pages := 2 + count := 0 + pager := fsClient.NewListDeletedPathsPager(&opts) + for pager.More() { + _, err = pager.NextPage(context.Background()) + _require.Nil(err) + count += 1 + if err != nil { + break + } + } + _require.Equal(pages, count) +} + +func (s *RecordedTestSuite) TestFilesystemListDeletedPathsWithPrefix() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + client := fsClient.NewFileClient(testName + "file1") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + _, err = client.Delete(context.Background(), nil) + _require.Nil(err) + client = fsClient.NewFileClient(testName + "file2") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + _, err = client.Delete(context.Background(), nil) + _require.Nil(err) + dirClient := fsClient.NewDirectoryClient(testName + "dir1") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + _, err = dirClient.Delete(context.Background(), nil) + _require.Nil(err) + dirClient = fsClient.NewDirectoryClient(testName + "dir2") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + _, err = dirClient.Delete(context.Background(), nil) + _require.Nil(err) + + opts := filesystem.ListDeletedPathsOptions{ + Prefix: to.Ptr("Test"), + } + pager := fsClient.NewListDeletedPathsPager(&opts) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + _require.Equal(4, len(resp.ListPathsHierarchySegmentResponse.Segment.PathItems)) + if err != nil { + break + } + } +} + +func (s *RecordedTestSuite) TestFilesystemListDeletedPathsWithContinuation() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + client := fsClient.NewFileClient(testName + "file1") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + _, err = client.Delete(context.Background(), nil) + _require.Nil(err) + client = fsClient.NewFileClient(testName + "file2") + _, err = client.Create(context.Background(), nil) + _require.Nil(err) + _, err = client.Delete(context.Background(), nil) + _require.Nil(err) + dirClient := fsClient.NewDirectoryClient(testName + "dir1") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + _, err = dirClient.Delete(context.Background(), nil) + _require.Nil(err) + dirClient = fsClient.NewDirectoryClient(testName + "dir2") + _, err = dirClient.Create(context.Background(), nil) + _require.Nil(err) + _, err = dirClient.Delete(context.Background(), nil) + _require.Nil(err) + + opts := filesystem.ListDeletedPathsOptions{ + MaxResults: to.Ptr(int32(3)), + } + pager := fsClient.NewListDeletedPathsPager(&opts) + + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + _require.Equal(3, len(resp.ListPathsHierarchySegmentResponse.Segment.PathItems)) + _require.NotNil(resp.NextMarker) + + token := resp.NextMarker + pager = fsClient.NewListDeletedPathsPager(&filesystem.ListDeletedPathsOptions{ + Marker: token, + }) + resp, err = pager.NextPage(context.Background()) + _require.Nil(err) + _require.Equal(1, len(resp.ListPathsHierarchySegmentResponse.Segment.PathItems)) + _require.Equal("", *resp.NextMarker) +} + +func (s *UnrecordedTestSuite) TestSASFileSystemCreateAndDeleteFile() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + // Adding SAS and options + permissions := sas.FileSystemPermissions{ + Read: true, + Add: true, + Write: true, + Create: true, + Delete: true, + } + expiry := time.Now().Add(time.Hour) + + // filesystemSASURL is created with GetSASURL + sasUrl, err := fsClient.GetSASURL(permissions, expiry, nil) + _require.Nil(err) + + // Create filesystem client with sasUrl + client, err := filesystem.NewClientWithNoCredential(sasUrl, nil) + _require.Nil(err) + + fClient := client.NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), nil) + _require.Nil(err) + + _, err = fClient.Delete(context.Background(), nil) + _require.Nil(err) +} + +func (s *UnrecordedTestSuite) TestSASFileSystemCreateAndDeleteDirectory() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFileSystemName(testName) + fsClient, err := testcommon.GetFileSystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + // Adding SAS and options + permissions := sas.FileSystemPermissions{ + Read: true, + Add: true, + Write: true, + Create: true, + Delete: true, + } + expiry := time.Now().Add(time.Hour) + + // filesystemSASURL is created with GetSASURL + sasUrl, err := fsClient.GetSASURL(permissions, expiry, nil) + _require.Nil(err) + + // Create filesystem client with sasUrl + client, err := filesystem.NewClientWithNoCredential(sasUrl, nil) + _require.Nil(err) + + dClient := client.NewDirectoryClient(testcommon.GenerateDirName(testName)) + _, err = dClient.Create(context.Background(), nil) + _require.Nil(err) + + _, err = dClient.Delete(context.Background(), nil) + _require.Nil(err) +} diff --git a/sdk/storage/azdatalake/filesystem/constants.go b/sdk/storage/azdatalake/filesystem/constants.go new file mode 100644 index 000000000000..037d182790e0 --- /dev/null +++ b/sdk/storage/azdatalake/filesystem/constants.go @@ -0,0 +1,57 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package filesystem + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" +) + +// PublicAccessType defines values for AccessType - private (default) or file or filesystem. +type PublicAccessType = azblob.PublicAccessType + +const ( + File PublicAccessType = azblob.PublicAccessTypeBlob + FileSystem PublicAccessType = azblob.PublicAccessTypeContainer +) + +// StatusType defines values for StatusType +type StatusType = azdatalake.StatusType + +const ( + StatusTypeLocked StatusType = azdatalake.StatusTypeLocked + StatusTypeUnlocked StatusType = azdatalake.StatusTypeUnlocked +) + +// PossibleStatusTypeValues returns the possible values for the StatusType const type. +func PossibleStatusTypeValues() []StatusType { + return azdatalake.PossibleStatusTypeValues() +} + +// DurationType defines values for DurationType +type DurationType = azdatalake.DurationType + +const ( + DurationTypeInfinite DurationType = azdatalake.DurationTypeInfinite + DurationTypeFixed DurationType = azdatalake.DurationTypeFixed +) + +// PossibleDurationTypeValues returns the possible values for the DurationType const type. +func PossibleDurationTypeValues() []DurationType { + return azdatalake.PossibleDurationTypeValues() +} + +// StateType defines values for StateType +type StateType = azdatalake.StateType + +const ( + StateTypeAvailable StateType = azdatalake.StateTypeAvailable + StateTypeLeased StateType = azdatalake.StateTypeLeased + StateTypeExpired StateType = azdatalake.StateTypeExpired + StateTypeBreaking StateType = azdatalake.StateTypeBreaking + StateTypeBroken StateType = azdatalake.StateTypeBroken +) diff --git a/sdk/storage/azdatalake/filesystem/examples_test.go b/sdk/storage/azdatalake/filesystem/examples_test.go new file mode 100644 index 000000000000..ad8e7b293c0f --- /dev/null +++ b/sdk/storage/azdatalake/filesystem/examples_test.go @@ -0,0 +1,330 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package filesystem_test + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "net/http" + "os" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +func Example_fs_NewClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + fsName := "testfs" + fsURL := fmt.Sprintf("https://%s.dfs.core.windows.net/%s", accountName, fsName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + + fsClient, err := filesystem.NewClient(fsURL, cred, nil) + handleError(err) + fmt.Println(fsClient.DFSURL()) +} + +func Example_fs_NewClientWithSharedKeyCredential() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + fsName := "testfs" + fsURL := fmt.Sprintf("https://%s.dfs.core.windows.net/%s", accountName, fsName) + + cred, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + fsClient, err := filesystem.NewClientWithSharedKeyCredential(fsURL, cred, nil) + handleError(err) + fmt.Println(fsClient.DFSURL()) +} + +func Example_fs_NewClientWithNoCredential() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + sharedAccessSignature, ok := os.LookupEnv("AZURE_STORAGE_SHARED_ACCESS_SIGNATURE") + if !ok { + panic("AZURE_STORAGE_SHARED_ACCESS_SIGNATURE could not be found") + } + fsName := "testfs" + fsURL := fmt.Sprintf("https://%s.dfs.core.windows.net/%s?%s", accountName, fsName, sharedAccessSignature) + + fsClient, err := filesystem.NewClientWithNoCredential(fsURL, nil) + handleError(err) + fmt.Println(fsClient.DFSURL()) +} + +func Example_fs_NewClientFromConnectionString() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + fsName := "testfs" + fsClient, err := filesystem.NewClientFromConnectionString(connectionString, fsName, nil) + handleError(err) + fmt.Println(fsClient.DFSURL()) +} + +func Example_fs_ClientNewFileClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + fsName := "testfs" + fsURL := fmt.Sprintf("https://%s.dfs.core.windows.net/%s", accountName, fsName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + + fsClient, err := filesystem.NewClient(fsURL, cred, nil) + handleError(err) + + fileClient := fsClient.NewFileClient("test_File") + handleError(err) + fmt.Println(fileClient.DFSURL()) +} + +func Example_fs_ClientCreate() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + fsName := "testfs" + fsURL := fmt.Sprintf("https://%s.dfs.core.windows.net/%s", accountName, fsName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + + fsClient, err := filesystem.NewClient(fsURL, cred, nil) + handleError(err) + + fsCreateResponse, err := fsClient.Create(context.TODO(), &filesystem.CreateOptions{ + Metadata: map[string]*string{"Foo": to.Ptr("Bar")}, + }) + handleError(err) + fmt.Println(fsCreateResponse) +} + +func Example_fs_ClientDelete() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + fsName := "testfs" + fsURL := fmt.Sprintf("https://%s.dfs.core.windows.net/%s", accountName, fsName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + + fsClient, err := filesystem.NewClient(fsURL, cred, nil) + handleError(err) + + fsDeleteResponse, err := fsClient.Delete(context.TODO(), nil) + handleError(err) + fmt.Println(fsDeleteResponse) +} + +func Example_fs_ClientListPaths() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + fsName := "testfs" + fsURL := fmt.Sprintf("https://%s.dfs.core.windows.net/%s", accountName, fsName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + + fsClient, err := filesystem.NewClient(fsURL, cred, nil) + handleError(err) + + pager := fsClient.NewListPathsPager(true, nil) + + for pager.More() { + resp, err := pager.NextPage(context.TODO()) + if err != nil { + log.Fatal(err) + } + for _, path := range resp.Paths { + fmt.Println(*path.Name) + } + } +} + +func Example_fs_ClientListDeletedPaths() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + fsName := "testfs" + fsURL := fmt.Sprintf("https://%s.dfs.core.windows.net/%s", accountName, fsName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + + fsClient, err := filesystem.NewClient(fsURL, cred, nil) + handleError(err) + + pager := fsClient.NewListDeletedPathsPager(nil) + + for pager.More() { + resp, err := pager.NextPage(context.TODO()) + if err != nil { + log.Fatal(err) + } + for _, path := range resp.Segment.PathItems { + fmt.Println(*path.Name) + } + } +} + +func Example_fs_ClientGetSASURL() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + fsName := "testfs" + fsURL := fmt.Sprintf("https://%s.dfs.core.windows.net/%s", accountName, fsName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + + fsClient, err := filesystem.NewClient(fsURL, cred, nil) + handleError(err) + + permission := sas.FileSystemPermissions{Read: true} + start := time.Now() + expiry := start.AddDate(1, 0, 0) + options := filesystem.GetSASURLOptions{StartTime: &start} + sasURL, err := fsClient.GetSASURL(permission, expiry, &options) + handleError(err) + _ = sasURL +} + +// This example shows how to manipulate a fs's permissions. +func Example_fs_ClientSetAccessPolicy() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + fsName := "testfs" + fsURL := fmt.Sprintf("https://%s.dfs.core.windows.net/%s", accountName, fsName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + + fsClient, err := filesystem.NewClient(fsURL, cred, nil) + handleError(err) + + // Create the fs + _, err = fsClient.Create(context.TODO(), nil) + handleError(err) + + // Upload a simple File. + fileClient := fsClient.NewFileClient("HelloWorld.txt") + handleError(err) + + err = fileClient.UploadStream(context.TODO(), streaming.NopCloser(strings.NewReader("Hello World!")), nil) + handleError(err) + + // Attempt to read the File + get, err := http.Get(fileClient.DFSURL()) + handleError(err) + if get.StatusCode == http.StatusNotFound { + // ChangeLease the File to be public access File + _, err := fsClient.SetAccessPolicy( + context.TODO(), + &filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.File), + }, + ) + if err != nil { + log.Fatal(err) + } + + // Now, this works + get, err = http.Get(fileClient.DFSURL()) + if err != nil { + log.Fatal(err) + } + var text bytes.Buffer + _, err = text.ReadFrom(get.Body) + if err != nil { + return + } + defer func(Body io.ReadCloser) { + _ = Body.Close() + }(get.Body) + + fmt.Println("Public access File data: ", text.String()) + } +} + +func Example_fs_ClientSetMetadata() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + fsName := "testfs" + fsURL := fmt.Sprintf("https://%s.dfs.core.windows.net/%s", accountName, fsName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + + fsClient, err := filesystem.NewClient(fsURL, cred, nil) + handleError(err) + + // Create a fs with some metadata, key names are converted to lowercase before being sent to the service. + // You should always use lowercase letters, especially when querying a map for a metadata key. + creatingApp, err := os.Executable() + handleError(err) + _, err = fsClient.Create(context.TODO(), &filesystem.CreateOptions{Metadata: map[string]*string{"author": to.Ptr("azFile"), "app": to.Ptr(creatingApp)}}) + handleError(err) + + // Query the fs's metadata + fsGetPropertiesResponse, err := fsClient.GetProperties(context.TODO(), nil) + handleError(err) + + if fsGetPropertiesResponse.Metadata == nil { + log.Fatal("metadata is empty!") + } + + for k, v := range fsGetPropertiesResponse.Metadata { + fmt.Printf("%s=%s\n", k, *v) + } + + // Update the metadata and write it back to the fs + fsGetPropertiesResponse.Metadata["author"] = to.Ptr("Mohit") + _, err = fsClient.SetMetadata(context.TODO(), &filesystem.SetMetadataOptions{Metadata: fsGetPropertiesResponse.Metadata}) + handleError(err) +} diff --git a/sdk/storage/azdatalake/filesystem/models.go b/sdk/storage/azdatalake/filesystem/models.go new file mode 100644 index 000000000000..05c395fc20c3 --- /dev/null +++ b/sdk/storage/azdatalake/filesystem/models.go @@ -0,0 +1,252 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package filesystem + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "time" +) + +// SetAccessPolicyOptions provides set of configurations for FileSystem.SetAccessPolicy operation. +type SetAccessPolicyOptions struct { + // Access Specifies whether data in the filesystem may be accessed publicly and the level of access. + // If this header is not included in the request, filesystem data is private to the account owner. + Access *PublicAccessType + // AccessConditions identifies filesystem-specific access conditions which you optionally set. + AccessConditions *AccessConditions + // FileSystemACL sets the access policy for the filesystem. + FileSystemACL []*SignedIdentifier +} + +func (o *SetAccessPolicyOptions) format() *container.SetAccessPolicyOptions { + if o == nil { + return nil + } + return &container.SetAccessPolicyOptions{ + Access: o.Access, + AccessConditions: exported.FormatContainerAccessConditions(o.AccessConditions), + ContainerACL: o.FileSystemACL, + } +} + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // Access specifies whether data in the filesystem may be accessed publicly and the level of access. + Access *PublicAccessType + // Metadata specifies a user-defined name-value pair associated with the filesystem. + Metadata map[string]*string + // CPKScopeInfo specifies the encryption scope settings to set on the filesystem. + CPKScopeInfo *CPKScopeInfo +} + +func (o *CreateOptions) format() *container.CreateOptions { + if o == nil { + return nil + } + return &container.CreateOptions{ + Access: o.Access, + Metadata: o.Metadata, + CPKScopeInfo: o.CPKScopeInfo, + } +} + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + // AccessConditions identifies filesystem-specific access conditions which you optionally set. + AccessConditions *AccessConditions +} + +func (o *DeleteOptions) format() *container.DeleteOptions { + if o == nil { + return nil + } + return &container.DeleteOptions{ + AccessConditions: exported.FormatContainerAccessConditions(o.AccessConditions), + } +} + +// GetPropertiesOptions contains the optional parameters for the FileSystemClient.GetProperties method. +type GetPropertiesOptions struct { + // LeaseAccessConditions contains parameters to access leased filesystem. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetPropertiesOptions) format() *container.GetPropertiesOptions { + if o == nil { + return nil + } + if o.LeaseAccessConditions == nil { + o.LeaseAccessConditions = &LeaseAccessConditions{} + } + return &container.GetPropertiesOptions{ + LeaseAccessConditions: &container.LeaseAccessConditions{ + LeaseID: o.LeaseAccessConditions.LeaseID, + }, + } +} + +// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. +type SetMetadataOptions struct { + // Metadata sets the metadata key-value pairs to set on the filesystem. + Metadata map[string]*string + // AccessConditions identifies filesystem-specific access conditions which you optionally set. + AccessConditions *AccessConditions +} + +func (o *SetMetadataOptions) format() *container.SetMetadataOptions { + if o == nil { + return nil + } + accConditions := exported.FormatContainerAccessConditions(o.AccessConditions) + return &container.SetMetadataOptions{ + Metadata: o.Metadata, + LeaseAccessConditions: accConditions.LeaseAccessConditions, + ModifiedAccessConditions: accConditions.ModifiedAccessConditions, + } +} + +// GetAccessPolicyOptions contains the optional parameters for the Client.GetAccessPolicy method. +type GetAccessPolicyOptions struct { + // LeaseAccessConditions contains parameters to access leased filesystem. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetAccessPolicyOptions) format() *container.GetAccessPolicyOptions { + if o == nil { + return nil + } + if o.LeaseAccessConditions == nil { + o.LeaseAccessConditions = &LeaseAccessConditions{} + } + return &container.GetAccessPolicyOptions{ + LeaseAccessConditions: &container.LeaseAccessConditions{ + LeaseID: o.LeaseAccessConditions.LeaseID, + }, + } +} + +// ListPathsOptions contains the optional parameters for the FileSystem.ListPaths operation. +type ListPathsOptions struct { + // Marker contains last continuation token returned from the service for listing. + Marker *string + // MaxResults sets the maximum number of paths that will be returned per page. + MaxResults *int32 + // Prefix filters the results to return only paths whose names begin with the specified prefix path. + Prefix *string + // UPN is the user principal name. + UPN *bool +} + +func (o *ListPathsOptions) format() generated.FileSystemClientListPathsOptions { + if o == nil { + return generated.FileSystemClientListPathsOptions{} + } + + return generated.FileSystemClientListPathsOptions{ + Continuation: o.Marker, + MaxResults: o.MaxResults, + Path: o.Prefix, + Upn: o.UPN, + } +} + +// ListDeletedPathsOptions contains the optional parameters for the FileSystem.ListDeletedPaths operation. +type ListDeletedPathsOptions struct { + // Marker contains last continuation token returned from the service for listing. + Marker *string + // MaxResults sets the maximum number of paths that will be returned per page. + MaxResults *int32 + // Prefix filters the results to return only paths whose names begin with the specified prefix path. + Prefix *string +} + +func (o *ListDeletedPathsOptions) format() generated.FileSystemClientListBlobHierarchySegmentOptions { + showOnly := "deleted" + if o == nil { + return generated.FileSystemClientListBlobHierarchySegmentOptions{Showonly: &showOnly} + } + return generated.FileSystemClientListBlobHierarchySegmentOptions{ + Marker: o.Marker, + MaxResults: o.MaxResults, + Prefix: o.Prefix, + Showonly: &showOnly, + } +} + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + // StartTime is the time after which the SAS will become valid. + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +//// UndeletePathOptions contains the optional parameters for the FileSystem.UndeletePath operation. +//type UndeletePathOptions struct { +// // placeholder +//} +// +//func (o *UndeletePathOptions) format() *UndeletePathOptions { +// if o == nil { +// return nil +// } +// return &UndeletePathOptions{} +//} + +// CPKScopeInfo contains a group of parameters for the FileSystemClient.Create method. +type CPKScopeInfo = container.CPKScopeInfo + +// AccessPolicy - An Access policy. +type AccessPolicy = container.AccessPolicy + +// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission = exported.AccessPolicyPermission + +// SignedIdentifier - signed identifier. +type SignedIdentifier = container.SignedIdentifier + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// AccessConditions identifies filesystem-specific access conditions which you optionally set. +type AccessConditions = exported.AccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions + +// PathList contains the path list from the ListPaths operation +type PathList = generated.PathList + +// Path contains the path properties from the ListPaths operation +type Path = generated.Path + +// PathItem contains the response from method FileSystemClient.ListPathsHierarchySegment. +type PathItem = generated.PathItemInternal + +// PathProperties contains the response from method FileSystemClient.ListPathsHierarchySegment. +type PathProperties = generated.PathPropertiesInternal + +// PathPrefix contains the response from method FileSystemClient.ListPathsHierarchySegment. +type PathPrefix = generated.PathPrefix diff --git a/sdk/storage/azdatalake/filesystem/responses.go b/sdk/storage/azdatalake/filesystem/responses.go new file mode 100644 index 000000000000..15b97decc321 --- /dev/null +++ b/sdk/storage/azdatalake/filesystem/responses.go @@ -0,0 +1,152 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package filesystem + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "time" +) + +// GetAccessPolicyResponse contains the response from method FileSystemClient.GetAccessPolicy. +type GetAccessPolicyResponse struct { + // PublicAccess contains the information returned from the x-ms-blob-public-access header response. + PublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // a collection of signed identifiers + SignedIdentifiers []*SignedIdentifier + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// since we want to remove the blob prefix in access type +func formatGetAccessPolicyResponse(r *GetAccessPolicyResponse, contResp *container.GetAccessPolicyResponse) { + r.PublicAccess = contResp.BlobPublicAccess + r.ClientRequestID = contResp.ClientRequestID + r.Date = contResp.Date + r.ETag = contResp.ETag + r.LastModified = contResp.LastModified + r.RequestID = contResp.RequestID + r.SignedIdentifiers = contResp.SignedIdentifiers + r.Version = contResp.Version +} + +// GetPropertiesResponse contains the response from method FileSystemClient.GetProperties. +type GetPropertiesResponse struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + PublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response. + DefaultEncryptionScope *string + + // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response. + DenyEncryptionScopeOverride *bool + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. + HasImmutabilityPolicy *bool + + // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response. + HasLegalHold *bool + + // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled + // header response. + IsImmutableStorageWithVersioningEnabled *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *DurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *StateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *StatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// removes the blob prefix in access type +func formatFileSystemProperties(r *GetPropertiesResponse, contResp *container.GetPropertiesResponse) { + r.PublicAccess = contResp.BlobPublicAccess + r.ClientRequestID = contResp.ClientRequestID + r.Date = contResp.Date + r.DefaultEncryptionScope = contResp.DefaultEncryptionScope + r.DenyEncryptionScopeOverride = contResp.DenyEncryptionScopeOverride + r.ETag = contResp.ETag + r.HasImmutabilityPolicy = contResp.HasImmutabilityPolicy + r.HasLegalHold = contResp.HasLegalHold + r.IsImmutableStorageWithVersioningEnabled = contResp.IsImmutableStorageWithVersioningEnabled + r.LastModified = contResp.LastModified + r.LeaseDuration = contResp.LeaseDuration + r.LeaseState = contResp.LeaseState + r.LeaseStatus = contResp.LeaseStatus + r.Metadata = contResp.Metadata + r.RequestID = contResp.RequestID + r.Version = contResp.Version +} + +// CreateResponse contains the response from method FileSystemClient.Create. +type CreateResponse = container.CreateResponse + +// DeleteResponse contains the response from method FileSystemClient.Delete. +type DeleteResponse = container.DeleteResponse + +// SetMetadataResponse contains the response from method FileSystemClient.SetMetadata. +type SetMetadataResponse = container.SetMetadataResponse + +// SetAccessPolicyResponse contains the response from method FileSystemClient.SetAccessPolicy. +type SetAccessPolicyResponse = container.SetAccessPolicyResponse + +// ListPathsSegmentResponse contains the response from method FileSystemClient.ListPathsSegment. +type ListPathsSegmentResponse = generated.FileSystemClientListPathsResponse + +// UndeletePathResponse contains the response from method FileSystemClient.UndeletePath. +type UndeletePathResponse = generated.PathClientUndeleteResponse + +// ListDeletedPathsSegmentResponse contains the response from method FileSystemClient.ListPathsSegment. +type ListDeletedPathsSegmentResponse = generated.FileSystemClientListPathHierarchySegmentResponse + +// ListPathsHierarchySegmentResponse contains the response from method FileSystemClient.ListPathsHierarchySegment. +type ListPathsHierarchySegmentResponse = generated.ListPathsHierarchySegmentResponse + +// PathHierarchyListSegment contains the response from method FileSystemClient.ListPathsHierarchySegment. +type PathHierarchyListSegment = generated.PathHierarchyListSegment diff --git a/sdk/storage/azdatalake/go.mod b/sdk/storage/azdatalake/go.mod new file mode 100644 index 000000000000..e909b1dc0a62 --- /dev/null +++ b/sdk/storage/azdatalake/go.mod @@ -0,0 +1,28 @@ +module github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake + +go 1.18 + +require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 + github.com/stretchr/testify v1.7.1 +) + +require ( + github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dnaeon/go-vcr v1.1.0 // indirect + github.com/golang-jwt/jwt v3.2.1+incompatible // indirect + github.com/google/uuid v1.1.1 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/sdk/storage/azdatalake/go.sum b/sdk/storage/azdatalake/go.sum new file mode 100644 index 000000000000..eafa822d8fa4 --- /dev/null +++ b/sdk/storage/azdatalake/go.sum @@ -0,0 +1,48 @@ +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= +github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE= +github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw= +golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/sdk/storage/azdatalake/internal/base/clients.go b/sdk/storage/azdatalake/internal/base/clients.go new file mode 100644 index 000000000000..e5b9e2a300c5 --- /dev/null +++ b/sdk/storage/azdatalake/internal/base/clients.go @@ -0,0 +1,93 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package base + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated_blob" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions + pipelineOptions *runtime.PipelineOptions +} + +func GetPipelineOptions(clOpts *ClientOptions) *runtime.PipelineOptions { + return clOpts.pipelineOptions +} + +func SetPipelineOptions(clOpts *ClientOptions, plOpts *runtime.PipelineOptions) { + clOpts.pipelineOptions = plOpts +} + +type CompositeClient[T, K, U any] struct { + // generated client with dfs + innerT *T + // generated client with blob + innerK *K + // blob client + innerU *U + sharedKey *exported.SharedKeyCredential + identityCred *azcore.TokenCredential + options *ClientOptions +} + +func InnerClients[T, K, U any](client *CompositeClient[T, K, U]) (*T, *K, *U) { + return client.innerT, client.innerK, client.innerU +} + +func SharedKeyComposite[T, K, U any](client *CompositeClient[T, K, U]) *exported.SharedKeyCredential { + return client.sharedKey +} + +func IdentityCredentialComposite[T, K, U any](client *CompositeClient[T, K, U]) *azcore.TokenCredential { + return client.identityCred +} + +func NewFileSystemClient(fsURL string, fsURLWithBlobEndpoint string, client *container.Client, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential, identityCred *azcore.TokenCredential, options *ClientOptions) *CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client] { + return &CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client]{ + innerT: generated.NewFileSystemClient(fsURL, azClient), + innerK: generated.NewFileSystemClient(fsURLWithBlobEndpoint, azClient), + sharedKey: sharedKey, + identityCred: identityCred, + innerU: client, + options: options, + } +} + +func NewServiceClient(serviceURL string, serviceURLWithBlobEndpoint string, client *service.Client, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential, identityCred *azcore.TokenCredential, options *ClientOptions) *CompositeClient[generated.ServiceClient, generated_blob.ServiceClient, service.Client] { + return &CompositeClient[generated.ServiceClient, generated_blob.ServiceClient, service.Client]{ + innerT: generated.NewServiceClient(serviceURL, azClient), + innerK: generated_blob.NewServiceClient(serviceURLWithBlobEndpoint, azClient), + sharedKey: sharedKey, + identityCred: identityCred, + innerU: client, + options: options, + } +} + +func NewPathClient(pathURL string, pathURLWithBlobEndpoint string, client *blockblob.Client, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential, identityCred *azcore.TokenCredential, options *ClientOptions) *CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client] { + return &CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client]{ + innerT: generated.NewPathClient(pathURL, azClient), + innerK: generated_blob.NewBlobClient(pathURLWithBlobEndpoint, azClient), + sharedKey: sharedKey, + identityCred: identityCred, + innerU: client, + options: options, + } +} + +func GetCompositeClientOptions[T, K, U any](client *CompositeClient[T, K, U]) *ClientOptions { + return client.options +} diff --git a/sdk/storage/azdatalake/internal/exported/access_conditions.go b/sdk/storage/azdatalake/internal/exported/access_conditions.go new file mode 100644 index 000000000000..e7b2a8ac7a3f --- /dev/null +++ b/sdk/storage/azdatalake/internal/exported/access_conditions.go @@ -0,0 +1,106 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" +) + +// AccessConditions identifies container-specific access conditions which you optionally set. +type AccessConditions struct { + ModifiedAccessConditions *ModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +// SourceAccessConditions identifies container-specific access conditions which you optionally set. +type SourceAccessConditions struct { + SourceModifiedAccessConditions *SourceModifiedAccessConditions + SourceLeaseAccessConditions *LeaseAccessConditions +} + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = generated.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = generated.ModifiedAccessConditions + +// SourceModifiedAccessConditions contains a group of parameters for specifying access conditions of a source. +type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions + +// FormatContainerAccessConditions formats FileSystemAccessConditions into container's LeaseAccessConditions and ModifiedAccessConditions. +func FormatContainerAccessConditions(b *AccessConditions) *container.AccessConditions { + if b == nil { + return &container.AccessConditions{ + LeaseAccessConditions: &container.LeaseAccessConditions{}, + ModifiedAccessConditions: &container.ModifiedAccessConditions{}, + } + } + if b.LeaseAccessConditions == nil { + b.LeaseAccessConditions = &LeaseAccessConditions{} + } + if b.ModifiedAccessConditions == nil { + b.ModifiedAccessConditions = &ModifiedAccessConditions{} + } + return &container.AccessConditions{ + LeaseAccessConditions: &container.LeaseAccessConditions{ + LeaseID: b.LeaseAccessConditions.LeaseID, + }, + ModifiedAccessConditions: &container.ModifiedAccessConditions{ + IfMatch: b.ModifiedAccessConditions.IfMatch, + IfNoneMatch: b.ModifiedAccessConditions.IfNoneMatch, + IfModifiedSince: b.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: b.ModifiedAccessConditions.IfUnmodifiedSince, + }, + } +} + +// FormatPathAccessConditions formats PathAccessConditions into path's LeaseAccessConditions and ModifiedAccessConditions. +func FormatPathAccessConditions(p *AccessConditions) (*generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if p == nil { + return &generated.LeaseAccessConditions{}, &generated.ModifiedAccessConditions{} + } + if p.LeaseAccessConditions == nil { + p.LeaseAccessConditions = &LeaseAccessConditions{} + } + if p.ModifiedAccessConditions == nil { + p.ModifiedAccessConditions = &ModifiedAccessConditions{} + } + return &generated.LeaseAccessConditions{ + LeaseID: p.LeaseAccessConditions.LeaseID, + }, &generated.ModifiedAccessConditions{ + IfMatch: p.ModifiedAccessConditions.IfMatch, + IfNoneMatch: p.ModifiedAccessConditions.IfNoneMatch, + IfModifiedSince: p.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: p.ModifiedAccessConditions.IfUnmodifiedSince, + } +} + +// FormatBlobAccessConditions formats PathAccessConditions into blob's LeaseAccessConditions and ModifiedAccessConditions. +func FormatBlobAccessConditions(p *AccessConditions) *blob.AccessConditions { + if p == nil { + return &blob.AccessConditions{ + LeaseAccessConditions: &blob.LeaseAccessConditions{}, + ModifiedAccessConditions: &blob.ModifiedAccessConditions{}, + } + } + if p.LeaseAccessConditions == nil { + p.LeaseAccessConditions = &LeaseAccessConditions{} + } + if p.ModifiedAccessConditions == nil { + p.ModifiedAccessConditions = &ModifiedAccessConditions{} + } + return &blob.AccessConditions{LeaseAccessConditions: &blob.LeaseAccessConditions{ + LeaseID: p.LeaseAccessConditions.LeaseID, + }, ModifiedAccessConditions: &blob.ModifiedAccessConditions{ + IfMatch: p.ModifiedAccessConditions.IfMatch, + IfNoneMatch: p.ModifiedAccessConditions.IfNoneMatch, + IfModifiedSince: p.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: p.ModifiedAccessConditions.IfUnmodifiedSince, + }} +} diff --git a/sdk/storage/azdatalake/internal/exported/access_policy.go b/sdk/storage/azdatalake/internal/exported/access_policy.go new file mode 100644 index 000000000000..14c293cf6569 --- /dev/null +++ b/sdk/storage/azdatalake/internal/exported/access_policy.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "fmt" +) + +// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Add, Create, Write, Delete, List bool +} + +// String produces the access policy permission string for an Azure Storage container. +// Call this method to set AccessPolicy's Permission field. +func (p *AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/sdk/storage/azdatalake/internal/exported/exported.go b/sdk/storage/azdatalake/internal/exported/exported.go new file mode 100644 index 000000000000..c6bbc6fe428d --- /dev/null +++ b/sdk/storage/azdatalake/internal/exported/exported.go @@ -0,0 +1,52 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "strconv" + "strings" +) + +const SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange = blob.HTTPRange + +// FormatHTTPRange converts an HTTPRange to its string format. +func FormatHTTPRange(r HTTPRange) *string { + if r.Offset == 0 && r.Count == 0 { + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) + return &dataRange +} + +func ConvertToDFSError(err error) error { + if err == nil { + return nil + } + var responseErr *azcore.ResponseError + isRespErr := errors.As(err, &responseErr) + if isRespErr { + responseErr.ErrorCode = strings.Replace(responseErr.ErrorCode, "blob", "path", -1) + responseErr.ErrorCode = strings.Replace(responseErr.ErrorCode, "Blob", "Path", -1) + responseErr.ErrorCode = strings.Replace(responseErr.ErrorCode, "container", "filesystem", -1) + responseErr.ErrorCode = strings.Replace(responseErr.ErrorCode, "Container", "FileSystem", -1) + return responseErr + } + return err +} diff --git a/sdk/storage/azdatalake/internal/exported/log_events.go b/sdk/storage/azdatalake/internal/exported/log_events.go new file mode 100644 index 000000000000..4a9c3cafd628 --- /dev/null +++ b/sdk/storage/azdatalake/internal/exported/log_events.go @@ -0,0 +1,20 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// NOTE: these are publicly exported via type-aliasing in azdatalake/log.go +const ( + // EventUpload is used when we compute number of chunks to upload and size of each chunk. + EventUpload log.Event = "azdatalake.Upload" + + // EventError is used for logging errors. + EventError log.Event = "azdatalake.Error" +) diff --git a/sdk/storage/azdatalake/internal/exported/shared_key_credential.go b/sdk/storage/azdatalake/internal/exported/shared_key_credential.go new file mode 100644 index 000000000000..e75b29f0ad8b --- /dev/null +++ b/sdk/storage/azdatalake/internal/exported/shared_key_credential.go @@ -0,0 +1,228 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "net/http" + "net/url" + "sort" + "strings" + "sync/atomic" + "time" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" +) + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCredential, error) { + c := SharedKeyCredential{accountName: accountName, accountKeyString: accountKey} + if err := c.SetAccountKey(accountKey); err != nil { + return nil, err + } + return &c, nil +} + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey atomic.Value // []byte + accountKeyString string +} + +// AccountName returns the Storage account's name. +func (c *SharedKeyCredential) AccountName() string { + return c.accountName +} + +func ConvertToBlobSharedKey(c *SharedKeyCredential) (*azblob.SharedKeyCredential, error) { + cred, err := azblob.NewSharedKeyCredential(c.accountName, c.accountKeyString) + if err != nil { + return nil, err + } + return cred, nil +} + +// SetAccountKey replaces the existing account key with the specified account key. +func (c *SharedKeyCredential) SetAccountKey(accountKey string) error { + _bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return fmt.Errorf("decode account key: %w", err) + } + c.accountKey.Store(_bytes) + return nil +} + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (c *SharedKeyCredential) computeHMACSHA256(message string) (string, error) { + h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := req.Header + contentLength := getHeader(shared.HeaderContentLength, headers) + if contentLength == "0" { + contentLength = "" + } + + canonicalizedResource, err := c.buildCanonicalizedResource(req.URL) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{ + req.Method, + getHeader(shared.HeaderContentEncoding, headers), + getHeader(shared.HeaderContentLanguage, headers), + contentLength, + getHeader(shared.HeaderContentMD5, headers), + getHeader(shared.HeaderContentType, headers), + "", // Empty date because x-ms-date is expected (as per web page above) + getHeader(shared.HeaderIfModifiedSince, headers), + getHeader(shared.HeaderIfMatch, headers), + getHeader(shared.HeaderIfNoneMatch, headers), + getHeader(shared.HeaderIfUnmodifiedSince, headers), + getHeader(shared.HeaderRange, headers), + c.buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + return stringToSign, nil +} + +func getHeader(key string, headers map[string][]string) string { + if headers == nil { + return "" + } + if v, ok := headers[key]; ok { + if len(v) > 0 { + return v[0] + } + } + + return "" +} + +func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + sort.Strings(keys) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return ch.String() +} + +func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(c.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + return "", fmt.Errorf("failed to parse query params: %w", err) + } + + if len(params) > 0 { // There is at least 1 query parameter + var paramNames []string // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + strings.ToLower(paramName) + ":" + strings.Join(paramValues, ",")) + } + } + return cr.String(), nil +} + +// ComputeHMACSHA256 is a helper for computing the signed string outside of this package. +func ComputeHMACSHA256(cred *SharedKeyCredential, message string) (string, error) { + return cred.computeHMACSHA256(message) +} + +// the following content isn't actually exported but must live +// next to SharedKeyCredential as it uses its unexported methods + +type SharedKeyCredPolicy struct { + cred *SharedKeyCredential +} + +func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { + return &SharedKeyCredPolicy{cred: cred} +} + +func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { + req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) + } + stringToSign, err := s.cred.buildStringToSign(req.Raw()) + if err != nil { + return nil, err + } + signature, err := s.cred.computeHMACSHA256(stringToSign) + if err != nil { + return nil, err + } + authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") + req.Raw().Header.Set(shared.HeaderAuthorization, authHeader) + + response, err := req.Next() + if err != nil && response != nil && response.StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") + } + return response, err +} diff --git a/sdk/storage/azdatalake/internal/exported/transfer_validation_option.go b/sdk/storage/azdatalake/internal/exported/transfer_validation_option.go new file mode 100644 index 000000000000..85430ebd1c7e --- /dev/null +++ b/sdk/storage/azdatalake/internal/exported/transfer_validation_option.go @@ -0,0 +1,56 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "encoding/binary" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" + "hash/crc64" + "io" +) + +// TransferValidationType abstracts the various mechanisms used to verify a transfer. +type TransferValidationType interface { + Apply(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error) + notPubliclyImplementable() +} + +// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed CRC64. +type TransferValidationTypeCRC64 uint64 + +func (c TransferValidationTypeCRC64) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, uint64(c)) + cfg.SetCRC64(buf) + return rsc, nil +} + +func (TransferValidationTypeCRC64) notPubliclyImplementable() {} + +// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer. +func TransferValidationTypeComputeCRC64() TransferValidationType { + return transferValidationTypeFn(func(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + buf, err := io.ReadAll(rsc) + if err != nil { + return nil, err + } + + crc := crc64.Checksum(buf, shared.CRC64Table) + return TransferValidationTypeCRC64(crc).Apply(streaming.NopCloser(bytes.NewReader(buf)), cfg) + }) +} + +type transferValidationTypeFn func(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error) + +func (t transferValidationTypeFn) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + return t(rsc, cfg) +} + +func (transferValidationTypeFn) notPubliclyImplementable() {} diff --git a/sdk/storage/azdatalake/internal/exported/user_delegation_credential.go b/sdk/storage/azdatalake/internal/exported/user_delegation_credential.go new file mode 100644 index 000000000000..a2a362d883a7 --- /dev/null +++ b/sdk/storage/azdatalake/internal/exported/user_delegation_credential.go @@ -0,0 +1,64 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated_blob" +) + +// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's Name and a user delegation Key from it +func NewUserDelegationCredential(accountName string, udk UserDelegationKey) *UserDelegationCredential { + return &UserDelegationCredential{ + accountName: accountName, + userDelegationKey: udk, + } +} + +// UserDelegationKey contains UserDelegationKey. +type UserDelegationKey = generated_blob.UserDelegationKey + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential struct { + accountName string + userDelegationKey UserDelegationKey +} + +// getAccountName returns the Storage account's Name +func (f *UserDelegationCredential) getAccountName() string { + return f.accountName +} + +// GetAccountName is a helper method for accessing the user delegation key parameters outside this package. +func GetAccountName(udc *UserDelegationCredential) string { + return udc.getAccountName() +} + +// computeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (f *UserDelegationCredential) computeHMACSHA256(message string) (string, error) { + bytes, _ := base64.StdEncoding.DecodeString(*f.userDelegationKey.Value) + h := hmac.New(sha256.New, bytes) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +// ComputeUDCHMACSHA256 is a helper method for computing the signed string outside this package. +func ComputeUDCHMACSHA256(udc *UserDelegationCredential, message string) (string, error) { + return udc.computeHMACSHA256(message) +} + +// getUDKParams returns UserDelegationKey +func (f *UserDelegationCredential) getUDKParams() *UserDelegationKey { + return &f.userDelegationKey +} + +// GetUDKParams is a helper method for accessing the user delegation key parameters outside this package. +func GetUDKParams(udc *UserDelegationCredential) *UserDelegationKey { + return udc.getUDKParams() +} diff --git a/sdk/storage/azdatalake/internal/exported/version.go b/sdk/storage/azdatalake/internal/exported/version.go new file mode 100644 index 000000000000..8fdb8eaff91d --- /dev/null +++ b/sdk/storage/azdatalake/internal/exported/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +const ( + ModuleName = "azdatalake" + ModuleVersion = "v0.1.0-beta.1" +) diff --git a/sdk/storage/azdatalake/internal/generated/autorest.md b/sdk/storage/azdatalake/internal/generated/autorest.md new file mode 100644 index 000000000000..edc674717865 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/autorest.md @@ -0,0 +1,279 @@ +# Code Generation - Azure Datalake SDK for Golang + +### Settings + +```yaml +go: true +clear-output-folder: false +version: "^3.0.0" +license-header: MICROSOFT_MIT_NO_VERSION +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/storage/data-plane/Azure.Storage.Files.DataLake/preview/2020-10-02/DataLakeStorage.json" +credential-scope: "https://storage.azure.com/.default" +output-folder: ../generated +file-prefix: "zz_" +openapi-type: "data-plane" +verbose: true +security: AzureKey +modelerfour: + group-parameters: false + seal-single-value-enum-by-default: true + lenient-model-deduplication: true +export-clients: true +use: "@autorest/go@4.0.0-preview.49" +``` + +### Remove FileSystem and PathName from parameter list since they are not needed +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('/{filesystem}/{path}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/FileSystem") && false == param['$ref'].endsWith("#/parameters/Path"))}); + } + else if (property.includes('/{filesystem}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/FileSystem"))}); + } + } +``` + +### Remove pager methods and export various generated methods in filesystem client + +``` yaml +directive: + - from: zz_filesystem_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*FileSystemClient\) NewListBlobHierarchySegmentPager\(.+\/\/ listBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request/s, `//\n// ListBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request`). + replace(/\(client \*FileSystemClient\) listBlobHierarchySegmentCreateRequest\(/, `(client *FileSystemClient) ListBlobHierarchySegmentCreateRequest(`). + replace(/\(client \*FileSystemClient\) listBlobHierarchySegmentHandleResponse\(/, `(client *FileSystemClient) ListBlobHierarchySegmentHandleResponse(`); +``` + +### Remove pager methods and export various generated methods in filesystem client + +``` yaml +directive: + - from: zz_filesystem_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*FileSystemClient\) NewListPathsPager\(.+\/\/ listPathsCreateRequest creates the ListPaths request/s, `//\n// ListPathsCreateRequest creates the ListPaths request`). + replace(/\(client \*FileSystemClient\) listPathsCreateRequest\(/, `(client *FileSystemClient) ListPathsCreateRequest(`). + replace(/\(client \*FileSystemClient\) listPathsHandleResponse\(/, `(client *FileSystemClient) ListPathsHandleResponse(`); +``` + +### Remove pager methods and export various generated methods in service client + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ServiceClient\) NewListFileSystemsPager\(.+\/\/ listFileSystemsCreateRequest creates the ListFileSystems request/s, `//\n// ListFileSystemsCreateRequest creates the ListFileSystems request`). + replace(/\(client \*ServiceClient\) listFileSystemsCreateRequest\(/, `(client *ServiceClient) ListFileSystemsCreateRequest(`). + replace(/\(client \*ServiceClient\) listFileSystemsHandleResponse\(/, `(client *ServiceClient) ListFileSystemsHandleResponse(`); +``` + + +### Remove pager methods and export various generated methods in path client + +``` yaml +directive: + - from: zz_path_client.go + where: $ + transform: >- + return $. + replace(/\(client \*PathClient\) setAccessControlRecursiveCreateRequest\(/, `(client *PathClient) SetAccessControlRecursiveCreateRequest(`). + replace(/\(client \*PathClient\) setAccessControlRecursiveHandleResponse\(/, `(client *PathClient) SetAccessControlRecursiveHandleResponse(`). + replace(/setAccessControlRecursiveCreateRequest/g, 'SetAccessControlRecursiveCreateRequest'). + replace(/setAccessControlRecursiveHandleResponse/g, 'SetAccessControlRecursiveHandleResponse'); +``` + +### Fix EncryptionAlgorithm + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.EncryptionAlgorithm.enum; + $.EncryptionAlgorithm.enum = [ + "None", + "AES256" + ]; +``` + +### Clean up some const type names so they don't stutter + +``` yaml +directive: +- from: swagger-document + where: $.parameters['PathExpiryOptions'] + transform: > + $["x-ms-enum"].name = "ExpiryOptions"; + $["x-ms-client-name"].name = "ExpiryOptions"; + +``` + +### use azcore.ETag + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/import "time"/, `import (\n\t"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n)`). + replace(/Etag\s+\*string/g, `ETag *azcore.ETag`). + replace(/IfMatch\s+\*string/g, `IfMatch *azcore.ETag`). + replace(/IfNoneMatch\s+\*string/g, `IfNoneMatch *azcore.ETag`). + replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`). + replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`); + +- from: zz_response_types.go + where: $ + transform: >- + return $. + replace(/"time"/, `"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"`). + replace(/ETag\s+\*string/g, `ETag *azcore.ETag`); + +- from: + - zz_filesystem_client.go + - zz_path_client.go + where: $ + transform: >- + return $. + replace(/"github\.com\/Azure\/azure\-sdk\-for\-go\/sdk\/azcore\/policy"/, `"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"`). + replace(/result\.ETag\s+=\s+&val/g, `result.ETag = (*azcore.ETag)(&val)`). + replace(/\*modifiedAccessConditions.IfMatch/g, `string(*modifiedAccessConditions.IfMatch)`). + replace(/\*modifiedAccessConditions.IfNoneMatch/g, `string(*modifiedAccessConditions.IfNoneMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfMatch/g, `string(*sourceModifiedAccessConditions.SourceIfMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfNoneMatch/g, `string(*sourceModifiedAccessConditions.SourceIfNoneMatch)`); + +``` + +### Fix up x-ms-content-crc64 header response name + +``` yaml +directive: +- from: swagger-document + where: $.x-ms-paths.*.*.responses.*.headers.x-ms-content-crc64 + transform: > + $["x-ms-client-name"] = "ContentCRC64" +``` + +### Updating encoding URL, Golang adds '+' which disrupts encoding with service + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/req.Raw\(\).URL.RawQuery \= reqQP.Encode\(\)/, `req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)`) +``` + +### Change `Duration` parameter in leases to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.LeaseDuration + transform: > + $.required = true; +``` + +### Change CPK acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cpk/g, "CPK"); +``` + +### Change CORS acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cors/g, "CORS"); +``` + +### Change cors xml to be correct + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/xml:"CORS>CORSRule"/g, "xml:\"Cors>CorsRule\""); +``` + +### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers + +``` yaml +directive: +- from: + - zz_filesystem_client.go + - zz_path.go + where: $ + transform: >- + return $. + replace (/req\.Raw\(\)\.Header\[\"If-Modified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"If-Unmodified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-modified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-unmodified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-immutability-policy-until-date\"\]\s+=\s+\[\]string\{options\.ImmutabilityPolicyExpiry\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}`); + +``` + +### Change container prefix to filesystem +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/PublicAccessTypeBlob/g, 'PublicAccessTypeFile'). + replace(/PublicAccessTypeContainer/g, 'PublicAccessTypeFileSystem'). + replace(/FileSystemClientListBlobHierarchySegmentResponse/g, 'FileSystemClientListPathHierarchySegmentResponse'). + replace(/ListBlobsHierarchySegmentResponse/g, 'ListPathsHierarchySegmentResponse'). + replace(/ContainerName\s*\*string/g, 'FileSystemName *string'). + replace(/BlobHierarchyListSegment/g, 'PathHierarchyListSegment'). + replace(/BlobItems/g, 'PathItems'). + replace(/BlobItem/g, 'PathItem'). + replace(/BlobPrefix/g, 'PathPrefix'). + replace(/BlobPrefixes/g, 'PathPrefixes'). + replace(/BlobProperties/g, 'PathProperties'). + replace(/ContainerProperties/g, 'FileSystemProperties'); +``` + +### +``` yaml +directive: +- from: + - zz_models_serde.go + where: $ + transform: >- + return $. + replace(/err = unpopulate\((.*), "ContentLength", &p\.ContentLength\)/g, 'var rawVal string\nerr = unpopulate(val, "ContentLength", &rawVal)\nintVal, _ := strconv.ParseInt(rawVal, 10, 64)\np.ContentLength = &intVal'). + replace(/err = unpopulate\((.*), "IsDirectory", &p\.IsDirectory\)/g, 'var rawVal string\nerr = unpopulate(val, "IsDirectory", &rawVal)\nboolVal, _ := strconv.ParseBool(rawVal)\np.IsDirectory = &boolVal'); +``` \ No newline at end of file diff --git a/sdk/storage/azdatalake/internal/generated/build.go b/sdk/storage/azdatalake/internal/generated/build.go new file mode 100644 index 000000000000..57f112001bd2 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated diff --git a/sdk/storage/azdatalake/internal/generated/filesystem_client.go b/sdk/storage/azdatalake/internal/generated/filesystem_client.go new file mode 100644 index 000000000000..35c7c2e8e8a8 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/filesystem_client.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// used to convert times from UTC to GMT before sending across the wire +var gmt = time.FixedZone("GMT", 0) + +func (client *FileSystemClient) Endpoint() string { + return client.endpoint +} + +func (client *FileSystemClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewFileSystemClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, share, directory or file that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewFileSystemClient(endpoint string, azClient *azcore.Client) *FileSystemClient { + client := &FileSystemClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/sdk/storage/azdatalake/internal/generated/models.go b/sdk/storage/azdatalake/internal/generated/models.go new file mode 100644 index 000000000000..b3f86d5973cb --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/models.go @@ -0,0 +1,15 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +type TransactionalContentSetter interface { + SetCRC64([]byte) +} + +func (a *PathClientAppendDataOptions) SetCRC64(v []byte) { + a.TransactionalContentCRC64 = v +} diff --git a/sdk/storage/azdatalake/internal/generated/path_client.go b/sdk/storage/azdatalake/internal/generated/path_client.go new file mode 100644 index 000000000000..e12c424816c9 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/path_client.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *PathClient) Endpoint() string { + return client.endpoint +} + +func (client *PathClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewPathClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, share, directory or file that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewPathClient(endpoint string, azClient *azcore.Client) *PathClient { + client := &PathClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/sdk/storage/azdatalake/internal/generated/service_client.go b/sdk/storage/azdatalake/internal/generated/service_client.go new file mode 100644 index 000000000000..8c99594b7dbd --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/service_client.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *ServiceClient) Endpoint() string { + return client.endpoint +} + +func (client *ServiceClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewServiceClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, share, directory or file that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewServiceClient(endpoint string, azClient *azcore.Client) *ServiceClient { + client := &ServiceClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/sdk/storage/azdatalake/internal/generated/zz_constants.go b/sdk/storage/azdatalake/internal/generated/zz_constants.go new file mode 100644 index 000000000000..bfb6f9ceee76 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/zz_constants.go @@ -0,0 +1,192 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +type EncryptionAlgorithmType string + +const ( + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256" + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" +) + +// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return []EncryptionAlgorithmType{ + EncryptionAlgorithmTypeAES256, + EncryptionAlgorithmTypeNone, + } +} + +type ExpiryOptions string + +const ( + ExpiryOptionsAbsolute ExpiryOptions = "Absolute" + ExpiryOptionsNeverExpire ExpiryOptions = "NeverExpire" + ExpiryOptionsRelativeToCreation ExpiryOptions = "RelativeToCreation" + ExpiryOptionsRelativeToNow ExpiryOptions = "RelativeToNow" +) + +// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type. +func PossibleExpiryOptionsValues() []ExpiryOptions { + return []ExpiryOptions{ + ExpiryOptionsAbsolute, + ExpiryOptionsNeverExpire, + ExpiryOptionsRelativeToCreation, + ExpiryOptionsRelativeToNow, + } +} + +type ListBlobsIncludeItem string + +const ( + ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy" + ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted" + ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" + ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" + ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" + ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" + ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" +) + +// PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type. +func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { + return []ListBlobsIncludeItem{ + ListBlobsIncludeItemCopy, + ListBlobsIncludeItemDeleted, + ListBlobsIncludeItemMetadata, + ListBlobsIncludeItemSnapshots, + ListBlobsIncludeItemTags, + ListBlobsIncludeItemUncommittedblobs, + ListBlobsIncludeItemVersions, + } +} + +type PathExpiryOptions string + +const ( + PathExpiryOptionsAbsolute PathExpiryOptions = "Absolute" + PathExpiryOptionsNeverExpire PathExpiryOptions = "NeverExpire" + PathExpiryOptionsRelativeToCreation PathExpiryOptions = "RelativeToCreation" + PathExpiryOptionsRelativeToNow PathExpiryOptions = "RelativeToNow" +) + +// PossiblePathExpiryOptionsValues returns the possible values for the PathExpiryOptions const type. +func PossiblePathExpiryOptionsValues() []PathExpiryOptions { + return []PathExpiryOptions{ + PathExpiryOptionsAbsolute, + PathExpiryOptionsNeverExpire, + PathExpiryOptionsRelativeToCreation, + PathExpiryOptionsRelativeToNow, + } +} + +type PathGetPropertiesAction string + +const ( + PathGetPropertiesActionGetAccessControl PathGetPropertiesAction = "getAccessControl" + PathGetPropertiesActionGetStatus PathGetPropertiesAction = "getStatus" +) + +// PossiblePathGetPropertiesActionValues returns the possible values for the PathGetPropertiesAction const type. +func PossiblePathGetPropertiesActionValues() []PathGetPropertiesAction { + return []PathGetPropertiesAction{ + PathGetPropertiesActionGetAccessControl, + PathGetPropertiesActionGetStatus, + } +} + +type PathLeaseAction string + +const ( + PathLeaseActionAcquire PathLeaseAction = "acquire" + PathLeaseActionBreak PathLeaseAction = "break" + PathLeaseActionChange PathLeaseAction = "change" + PathLeaseActionRelease PathLeaseAction = "release" + PathLeaseActionRenew PathLeaseAction = "renew" +) + +// PossiblePathLeaseActionValues returns the possible values for the PathLeaseAction const type. +func PossiblePathLeaseActionValues() []PathLeaseAction { + return []PathLeaseAction{ + PathLeaseActionAcquire, + PathLeaseActionBreak, + PathLeaseActionChange, + PathLeaseActionRelease, + PathLeaseActionRenew, + } +} + +type PathRenameMode string + +const ( + PathRenameModeLegacy PathRenameMode = "legacy" + PathRenameModePosix PathRenameMode = "posix" +) + +// PossiblePathRenameModeValues returns the possible values for the PathRenameMode const type. +func PossiblePathRenameModeValues() []PathRenameMode { + return []PathRenameMode{ + PathRenameModeLegacy, + PathRenameModePosix, + } +} + +type PathResourceType string + +const ( + PathResourceTypeDirectory PathResourceType = "directory" + PathResourceTypeFile PathResourceType = "file" +) + +// PossiblePathResourceTypeValues returns the possible values for the PathResourceType const type. +func PossiblePathResourceTypeValues() []PathResourceType { + return []PathResourceType{ + PathResourceTypeDirectory, + PathResourceTypeFile, + } +} + +type PathSetAccessControlRecursiveMode string + +const ( + PathSetAccessControlRecursiveModeModify PathSetAccessControlRecursiveMode = "modify" + PathSetAccessControlRecursiveModeRemove PathSetAccessControlRecursiveMode = "remove" + PathSetAccessControlRecursiveModeSet PathSetAccessControlRecursiveMode = "set" +) + +// PossiblePathSetAccessControlRecursiveModeValues returns the possible values for the PathSetAccessControlRecursiveMode const type. +func PossiblePathSetAccessControlRecursiveModeValues() []PathSetAccessControlRecursiveMode { + return []PathSetAccessControlRecursiveMode{ + PathSetAccessControlRecursiveModeModify, + PathSetAccessControlRecursiveModeRemove, + PathSetAccessControlRecursiveModeSet, + } +} + +type PathUpdateAction string + +const ( + PathUpdateActionAppend PathUpdateAction = "append" + PathUpdateActionFlush PathUpdateAction = "flush" + PathUpdateActionSetAccessControl PathUpdateAction = "setAccessControl" + PathUpdateActionSetAccessControlRecursive PathUpdateAction = "setAccessControlRecursive" + PathUpdateActionSetProperties PathUpdateAction = "setProperties" +) + +// PossiblePathUpdateActionValues returns the possible values for the PathUpdateAction const type. +func PossiblePathUpdateActionValues() []PathUpdateAction { + return []PathUpdateAction{ + PathUpdateActionAppend, + PathUpdateActionFlush, + PathUpdateActionSetAccessControl, + PathUpdateActionSetAccessControlRecursive, + PathUpdateActionSetProperties, + } +} diff --git a/sdk/storage/azdatalake/internal/generated/zz_filesystem_client.go b/sdk/storage/azdatalake/internal/generated/zz_filesystem_client.go new file mode 100644 index 000000000000..377a623ea155 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/zz_filesystem_client.go @@ -0,0 +1,486 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strconv" + "strings" + "time" +) + +// FileSystemClient contains the methods for the FileSystem group. +// Don't use this type directly, use a constructor function instead. +type FileSystemClient struct { + internal *azcore.Client + endpoint string +} + +// Create - Create a FileSystem rooted at the specified location. If the FileSystem already exists, the operation fails. This +// operation does not support conditional HTTP requests. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileSystemClientCreateOptions contains the optional parameters for the FileSystemClient.Create method. +func (client *FileSystemClient) Create(ctx context.Context, options *FileSystemClientCreateOptions) (FileSystemClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, options) + if err != nil { + return FileSystemClientCreateResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FileSystemClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return FileSystemClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *FileSystemClient) createCreateRequest(ctx context.Context, options *FileSystemClientCreateOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("resource", "filesystem") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.Properties != nil { + req.Raw().Header["x-ms-properties"] = []string{*options.Properties} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *FileSystemClient) createHandleResponse(resp *http.Response) (FileSystemClientCreateResponse, error) { + result := FileSystemClientCreateResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileSystemClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileSystemClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-namespace-enabled"); val != "" { + result.NamespaceEnabled = &val + } + return result, nil +} + +// Delete - Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same identifier cannot +// be created for at least 30 seconds. While the filesystem is being deleted, attempts to +// create a filesystem with the same identifier will fail with status code 409 (Conflict), with the service returning additional +// error information indicating that the filesystem is being deleted. All +// other operations, including operations on any files or directories within the filesystem, will fail with status code 404 +// (Not Found) while the filesystem is being deleted. This operation supports +// conditional HTTP requests. For more information, see Specifying Conditional Headers for Blob Service Operations +// [https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations]. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileSystemClientDeleteOptions contains the optional parameters for the FileSystemClient.Delete method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the FileSystemClient.SetProperties +// method. +func (client *FileSystemClient) Delete(ctx context.Context, options *FileSystemClientDeleteOptions, modifiedAccessConditions *ModifiedAccessConditions) (FileSystemClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return FileSystemClientDeleteResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FileSystemClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return FileSystemClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *FileSystemClient) deleteCreateRequest(ctx context.Context, options *FileSystemClientDeleteOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("resource", "filesystem") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *FileSystemClient) deleteHandleResponse(resp *http.Response) (FileSystemClientDeleteResponse, error) { + result := FileSystemClientDeleteResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileSystemClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// GetProperties - All system and user-defined filesystem properties are specified in the response headers. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileSystemClientGetPropertiesOptions contains the optional parameters for the FileSystemClient.GetProperties +// method. +func (client *FileSystemClient) GetProperties(ctx context.Context, options *FileSystemClientGetPropertiesOptions) (FileSystemClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return FileSystemClientGetPropertiesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FileSystemClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileSystemClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *FileSystemClient) getPropertiesCreateRequest(ctx context.Context, options *FileSystemClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("resource", "filesystem") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *FileSystemClient) getPropertiesHandleResponse(resp *http.Response) (FileSystemClientGetPropertiesResponse, error) { + result := FileSystemClientGetPropertiesResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileSystemClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileSystemClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-properties"); val != "" { + result.Properties = &val + } + if val := resp.Header.Get("x-ms-namespace-enabled"); val != "" { + result.NamespaceEnabled = &val + } + return result, nil +} + +// NewListBlobHierarchySegmentPager - The List Blobs operation returns a list of the blobs under the specified container +// +// Generated from API version 2020-10-02 +// - options - FileSystemClientListBlobHierarchySegmentOptions contains the optional parameters for the FileSystemClient.NewListBlobHierarchySegmentPager +// method. +// +// ListBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request. +func (client *FileSystemClient) ListBlobHierarchySegmentCreateRequest(ctx context.Context, options *FileSystemClientListBlobHierarchySegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Delimiter != nil { + reqQP.Set("delimiter", *options.Delimiter) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.MaxResults != nil { + reqQP.Set("maxResults", strconv.FormatInt(int64(*options.MaxResults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Showonly != nil { + reqQP.Set("showonly", "deleted") + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. +func (client *FileSystemClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (FileSystemClientListPathHierarchySegmentResponse, error) { + result := FileSystemClientListPathHierarchySegmentResponse{} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileSystemClientListPathHierarchySegmentResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListPathsHierarchySegmentResponse); err != nil { + return FileSystemClientListPathHierarchySegmentResponse{}, err + } + return result, nil +} + +// NewListPathsPager - List FileSystem paths and their properties. +// +// Generated from API version 2020-10-02 +// - recursive - Required +// - options - FileSystemClientListPathsOptions contains the optional parameters for the FileSystemClient.NewListPathsPager +// method. +// +// ListPathsCreateRequest creates the ListPaths request. +func (client *FileSystemClient) ListPathsCreateRequest(ctx context.Context, recursive bool, options *FileSystemClientListPathsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("resource", "filesystem") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Continuation != nil { + reqQP.Set("continuation", *options.Continuation) + } + if options != nil && options.Path != nil { + reqQP.Set("directory", *options.Path) + } + reqQP.Set("recursive", strconv.FormatBool(recursive)) + if options != nil && options.MaxResults != nil { + reqQP.Set("maxResults", strconv.FormatInt(int64(*options.MaxResults), 10)) + } + if options != nil && options.Upn != nil { + reqQP.Set("upn", strconv.FormatBool(*options.Upn)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listPathsHandleResponse handles the ListPaths response. +func (client *FileSystemClient) ListPathsHandleResponse(resp *http.Response) (FileSystemClientListPathsResponse, error) { + result := FileSystemClientListPathsResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileSystemClientListPathsResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileSystemClientListPathsResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-continuation"); val != "" { + result.Continuation = &val + } + if err := runtime.UnmarshalAsJSON(resp, &result.PathList); err != nil { + return FileSystemClientListPathsResponse{}, err + } + return result, nil +} + +// SetProperties - Set properties for the FileSystem. This operation supports conditional HTTP requests. For more information, +// see Specifying Conditional Headers for Blob Service Operations +// [https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations]. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileSystemClientSetPropertiesOptions contains the optional parameters for the FileSystemClient.SetProperties +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the FileSystemClient.SetProperties +// method. +func (client *FileSystemClient) SetProperties(ctx context.Context, options *FileSystemClientSetPropertiesOptions, modifiedAccessConditions *ModifiedAccessConditions) (FileSystemClientSetPropertiesResponse, error) { + req, err := client.setPropertiesCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return FileSystemClientSetPropertiesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FileSystemClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileSystemClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.setPropertiesHandleResponse(resp) +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *FileSystemClient) setPropertiesCreateRequest(ctx context.Context, options *FileSystemClientSetPropertiesOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPatch, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("resource", "filesystem") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.Properties != nil { + req.Raw().Header["x-ms-properties"] = []string{*options.Properties} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *FileSystemClient) setPropertiesHandleResponse(resp *http.Response) (FileSystemClientSetPropertiesResponse, error) { + result := FileSystemClientSetPropertiesResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileSystemClientSetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileSystemClientSetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/sdk/storage/azdatalake/internal/generated/zz_models.go b/sdk/storage/azdatalake/internal/generated/zz_models.go new file mode 100644 index 000000000000..6201c56fa6d4 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/zz_models.go @@ -0,0 +1,698 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +type ACLFailedEntry struct { + ErrorMessage *string + Name *string + Type *string +} + +type PathHierarchyListSegment struct { + // REQUIRED + PathItems []*PathItemInternal `xml:"Blob"` + PathPrefixes []*PathPrefix `xml:"PathPrefix"` +} + +// PathItemInternal - An Azure Storage blob +type PathItemInternal struct { + // REQUIRED + Deleted *bool `xml:"Deleted"` + + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a blob + Properties *PathPropertiesInternal `xml:"Properties"` + + // REQUIRED + Snapshot *string `xml:"Snapshot"` + DeletionID *string `xml:"DeletionId"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + VersionID *string `xml:"VersionId"` +} + +type PathPrefix struct { + // REQUIRED + Name *string `xml:"Name"` +} + +// PathPropertiesInternal - Properties of a blob +type PathPropertiesInternal struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + CacheControl *string `xml:"Cache-Control"` + ContentDisposition *string `xml:"Content-Disposition"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + + // Size in bytes + ContentLength *int64 `xml:"Content-Length"` + ContentMD5 []byte `xml:"Content-MD5"` + ContentType *string `xml:"Content-Type"` + CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` + CopyID *string `xml:"CopyId"` + CopyProgress *string `xml:"CopyProgress"` + CopySource *string `xml:"CopySource"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + CreationTime *time.Time `xml:"Creation-Time"` + CustomerProvidedKeySHA256 *string `xml:"CustomerProvidedKeySha256"` + DeleteTime *time.Time `xml:"DeleteTime"` + DeletedTime *time.Time `xml:"DeletedTime"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + + // The name of the encryption scope under which the blob is encrypted. + EncryptionScope *string `xml:"EncryptionScope"` + ExpiresOn *time.Time `xml:"Expiry-Time"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + IsSealed *bool `xml:"Sealed"` + LastAccessedOn *time.Time `xml:"LastAccessTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + TagCount *int32 `xml:"TagCount"` +} + +// CPKInfo contains a group of parameters for the PathClient.Create method. +type CPKInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +type FileSystem struct { + ETag *string + LastModified *string + Name *string +} + +// FileSystemClientCreateOptions contains the optional parameters for the FileSystemClient.Create method. +type FileSystemClientCreateOptions struct { + // Optional. User-defined properties to be stored with the filesystem, in the format of a comma-separated list of name and + // value pairs "n1=v1, n2=v2, …", where each value is a base64 encoded string. Note + // that the string may only contain ASCII characters in the ISO-8859-1 character set. If the filesystem exists, any properties + // not included in the list will be removed. All properties are removed if the + // header is omitted. To merge new and existing properties, first get all existing properties and the current E-Tag, then + // make a conditional request with the E-Tag and include values for all properties. + Properties *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// FileSystemClientDeleteOptions contains the optional parameters for the FileSystemClient.Delete method. +type FileSystemClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// FileSystemClientGetPropertiesOptions contains the optional parameters for the FileSystemClient.GetProperties method. +type FileSystemClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// FileSystemClientListBlobHierarchySegmentOptions contains the optional parameters for the FileSystemClient.NewListBlobHierarchySegmentPager +// method. +type FileSystemClientListBlobHierarchySegmentOptions struct { + // When the request includes this parameter, the operation returns a PathPrefix element in the response body that acts as + // a placeholder for all blobs whose names begin with the same substring up to the + // appearance of the delimiter character. The delimiter may be a single character or a string. + Delimiter *string + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // An optional value that specifies the maximum number of items to return. If omitted or greater than 5,000, the response + // will include up to 5,000 items. + MaxResults *int32 + // Filters results to filesystems within the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Include this parameter to specify one or more datasets to include in the response.. Specifying any value will set the value + // to deleted. + Showonly *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// FileSystemClientListPathsOptions contains the optional parameters for the FileSystemClient.NewListPathsPager method. +type FileSystemClientListPathsOptions struct { + // Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number + // of paths to be deleted exceeds this limit, a continuation token is returned in + // this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation + // of the delete operation to continue deleting the directory. + Continuation *string + // An optional value that specifies the maximum number of items to return. If omitted or greater than 5,000, the response + // will include up to 5,000 items. + MaxResults *int32 + // Optional. Filters results to paths within the specified directory. An error occurs if the directory does not exist. + Path *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Optional. Valid only when Hierarchical Namespace is enabled for the account. If "true", the user identity values returned + // in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + // transformed from Azure Active Directory Object IDs to User Principal Names. If "false", the values will be returned as + // Azure Active Directory Object IDs. The default value is false. Note that group + // and application Object IDs are not translated because they do not have unique friendly names. + Upn *bool +} + +// FileSystemClientSetPropertiesOptions contains the optional parameters for the FileSystemClient.SetProperties method. +type FileSystemClientSetPropertiesOptions struct { + // Optional. User-defined properties to be stored with the filesystem, in the format of a comma-separated list of name and + // value pairs "n1=v1, n2=v2, …", where each value is a base64 encoded string. Note + // that the string may only contain ASCII characters in the ISO-8859-1 character set. If the filesystem exists, any properties + // not included in the list will be removed. All properties are removed if the + // header is omitted. To merge new and existing properties, first get all existing properties and the current E-Tag, then + // make a conditional request with the E-Tag and include values for all properties. + Properties *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +type FileSystemList struct { + Filesystems []*FileSystem +} + +// LeaseAccessConditions contains a group of parameters for the PathClient.Create method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ListPathsHierarchySegmentResponse - An enumeration of blobs +type ListPathsHierarchySegmentResponse struct { + // REQUIRED + FileSystemName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *PathHierarchyListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Delimiter *string `xml:"Delimiter"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ModifiedAccessConditions contains a group of parameters for the FileSystemClient.SetProperties method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *azcore.ETag + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *azcore.ETag + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time +} + +type Path struct { + ContentLength *int64 + CreationTime *string + ETag *string + + // The name of the encryption scope under which the blob is encrypted. + EncryptionScope *string + ExpiryTime *string + Group *string + IsDirectory *bool + LastModified *string + Name *string + Owner *string + Permissions *string +} + +// PathClientAppendDataOptions contains the optional parameters for the PathClient.AppendData method. +type PathClientAppendDataOptions struct { + // Required for "Append Data" and "Flush Data". Must be 0 for "Flush Data". Must be the length of the request content in bytes + // for "Append Data". + ContentLength *int64 + // This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file. + // It is required when uploading data to be appended to the file and when flushing + // previously uploaded data to the file. The value must be the position where the data is to be appended. Uploaded data is + // not immediately flushed, or written, to the file. To flush, the previously + // uploaded data must be contiguous, the position parameter must be specified and equal to the length of the file after all + // data has been written, and there must not be a request entity body included + // with the request. + Position *int64 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte +} + +// PathClientCreateOptions contains the optional parameters for the PathClient.Create method. +type PathClientCreateOptions struct { + // Sets POSIX access control rights on files and directories. The value is a comma-separated list of access control entries. + // Each access control entry (ACE) consists of a scope, a type, a user or group + // identifier, and permissions in the format "[scope:][type]:[id]:[permissions]". + ACL *string + // Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number + // of paths to be deleted exceeds this limit, a continuation token is returned in + // this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation + // of the delete operation to continue deleting the directory. + Continuation *string + // The time to set the blob to expiry + ExpiresOn *string + // Required. Indicates mode of the expiry time + ExpiryOptions *PathExpiryOptions + // Optional. The owning group of the blob or directory. + Group *string + // The lease duration is required to acquire a lease, and specifies the duration of the lease in seconds. The lease duration + // must be between 15 and 60 seconds or -1 for infinite lease. + LeaseDuration *int64 + // Optional. Valid only when namespace is enabled. This parameter determines the behavior of the rename operation. The value + // must be "legacy" or "posix", and the default value will be "posix". + Mode *PathRenameMode + // Optional. The owner of the blob or directory. + Owner *string + // Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file + // owner, the file owning group, and others. Each class may be granted read, + // write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + // 0766) are supported. + Permissions *string + // Optional. User-defined properties to be stored with the filesystem, in the format of a comma-separated list of name and + // value pairs "n1=v1, n2=v2, …", where each value is a base64 encoded string. Note + // that the string may only contain ASCII characters in the ISO-8859-1 character set. If the filesystem exists, any properties + // not included in the list will be removed. All properties are removed if the + // header is omitted. To merge new and existing properties, first get all existing properties and the current E-Tag, then + // make a conditional request with the E-Tag and include values for all properties. + Properties *string + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // An optional file or directory to be renamed. The value must have the following format: "/{filesystem}/{path}". If "x-ms-properties" + // is specified, the properties will overwrite the existing properties; + // otherwise, the existing properties will be preserved. This value must be a URL percent-encoded string. Note that the string + // may only contain ASCII characters in the ISO-8859-1 character set. + RenameSource *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Required only for Create File and Create Directory. The value must be "file" or "directory". + Resource *PathResourceType + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the + // parent folder does not have a default ACL, the umask restricts the permissions + // of the file or directory to be created. The resulting permission is given by p bitwise and not u, where p is the permission + // and u is the umask. For example, if p is 0777 and u is 0057, then the + // resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is + // 0027. The umask must be specified in 4-digit octal notation (e.g. 0766). + Umask *string +} + +// PathClientDeleteOptions contains the optional parameters for the PathClient.Delete method. +type PathClientDeleteOptions struct { + // Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number + // of paths to be deleted exceeds this limit, a continuation token is returned in + // this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation + // of the delete operation to continue deleting the directory. + Continuation *string + // Required + Recursive *bool + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PathClientFlushDataOptions contains the optional parameters for the PathClient.FlushData method. +type PathClientFlushDataOptions struct { + // Azure Storage Events allow applications to receive notifications when files change. When Azure Storage Events are enabled, + // a file changed event is raised. This event has a property indicating whether + // this is the final change to distinguish the difference between an intermediate flush to a file stream and the final close + // of a file stream. The close query parameter is valid only when the action is + // "flush" and change notifications are enabled. If the value of close is "true" and the flush operation completes successfully, + // the service raises a file change notification with a property indicating + // that this is the final update (the file stream has been closed). If "false" a change notification is raised indicating + // the file has changed. The default is false. This query parameter is set to true + // by the Hadoop ABFS driver to indicate that the file stream has been closed." + Close *bool + // Required for "Append Data" and "Flush Data". Must be 0 for "Flush Data". Must be the length of the request content in bytes + // for "Append Data". + ContentLength *int64 + // This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file. + // It is required when uploading data to be appended to the file and when flushing + // previously uploaded data to the file. The value must be the position where the data is to be appended. Uploaded data is + // not immediately flushed, or written, to the file. To flush, the previously + // uploaded data must be contiguous, the position parameter must be specified and equal to the length of the file after all + // data has been written, and there must not be a request entity body included + // with the request. + Position *int64 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Valid only for flush operations. If "true", uncommitted data is retained after the flush operation completes; otherwise, + // the uncommitted data is deleted after the flush operation. The default is + // false. Data at offsets less than the specified position are written to the file when flush succeeds, but this optional + // parameter allows data after the flush position to be retained for a future flush + // operation. + RetainUncommittedData *bool + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PathClientGetPropertiesOptions contains the optional parameters for the PathClient.GetProperties method. +type PathClientGetPropertiesOptions struct { + // Optional. If the value is "getStatus" only the system defined properties for the path are returned. If the value is "getAccessControl" + // the access control list is returned in the response headers + // (Hierarchical Namespace must be enabled for the account), otherwise the properties are returned. + Action *PathGetPropertiesAction + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Optional. Valid only when Hierarchical Namespace is enabled for the account. If "true", the user identity values returned + // in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + // transformed from Azure Active Directory Object IDs to User Principal Names. If "false", the values will be returned as + // Azure Active Directory Object IDs. The default value is false. Note that group + // and application Object IDs are not translated because they do not have unique friendly names. + Upn *bool +} + +// PathClientLeaseOptions contains the optional parameters for the PathClient.Lease method. +type PathClientLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The lease break period duration is optional to break a lease, and specifies the break period of the lease in seconds. The + // lease break duration must be between 0 and 60 seconds. + XMSLeaseBreakPeriod *int32 +} + +// PathClientReadOptions contains the optional parameters for the PathClient.Read method. +type PathClientReadOptions struct { + // The HTTP Range request header specifies one or more byte ranges of the resource to be retrieved. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Optional. When this header is set to "true" and specified together with the Range header, the service returns the MD5 hash + // for the range, as long as the range is less than or equal to 4MB in size. If + // this header is specified without the Range header, the service returns status code 400 (Bad Request). If this header is + // set to true when the range exceeds 4 MB in size, the service returns status code + // 400 (Bad Request). + XMSRangeGetContentMD5 *bool +} + +// PathClientSetAccessControlOptions contains the optional parameters for the PathClient.SetAccessControl method. +type PathClientSetAccessControlOptions struct { + // Sets POSIX access control rights on files and directories. The value is a comma-separated list of access control entries. + // Each access control entry (ACE) consists of a scope, a type, a user or group + // identifier, and permissions in the format "[scope:][type]:[id]:[permissions]". + ACL *string + // Optional. The owning group of the blob or directory. + Group *string + // Optional. The owner of the blob or directory. + Owner *string + // Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file + // owner, the file owning group, and others. Each class may be granted read, + // write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + // 0766) are supported. + Permissions *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PathClientSetAccessControlRecursiveOptions contains the optional parameters for the PathClient.SetAccessControlRecursive +// method. +type PathClientSetAccessControlRecursiveOptions struct { + // Sets POSIX access control rights on files and directories. The value is a comma-separated list of access control entries. + // Each access control entry (ACE) consists of a scope, a type, a user or group + // identifier, and permissions in the format "[scope:][type]:[id]:[permissions]". + ACL *string + // Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number + // of paths to be deleted exceeds this limit, a continuation token is returned in + // this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation + // of the delete operation to continue deleting the directory. + Continuation *string + // Optional. Valid for "SetAccessControlRecursive" operation. If set to false, the operation will terminate quickly on encountering + // user errors (4XX). If true, the operation will ignore user errors and + // proceed with the operation on other sub-entities of the directory. Continuation token will only be returned when forceFlag + // is true in case of user errors. If not set the default value is false for + // this. + ForceFlag *bool + // Optional. It specifies the maximum number of files or directories on which the acl change will be applied. If omitted or + // greater than 2,000, the request will process up to 2,000 items + MaxRecords *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PathClientSetExpiryOptions contains the optional parameters for the PathClient.SetExpiry method. +type PathClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PathClientUndeleteOptions contains the optional parameters for the PathClient.Undelete method. +type PathClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Only for hierarchical namespace enabled accounts. Optional. The path of the soft deleted blob to undelete. + UndeleteSource *string +} + +// PathClientUpdateOptions contains the optional parameters for the PathClient.Update method. +type PathClientUpdateOptions struct { + // Sets POSIX access control rights on files and directories. The value is a comma-separated list of access control entries. + // Each access control entry (ACE) consists of a scope, a type, a user or group + // identifier, and permissions in the format "[scope:][type]:[id]:[permissions]". + ACL *string + // Azure Storage Events allow applications to receive notifications when files change. When Azure Storage Events are enabled, + // a file changed event is raised. This event has a property indicating whether + // this is the final change to distinguish the difference between an intermediate flush to a file stream and the final close + // of a file stream. The close query parameter is valid only when the action is + // "flush" and change notifications are enabled. If the value of close is "true" and the flush operation completes successfully, + // the service raises a file change notification with a property indicating + // that this is the final update (the file stream has been closed). If "false" a change notification is raised indicating + // the file has changed. The default is false. This query parameter is set to true + // by the Hadoop ABFS driver to indicate that the file stream has been closed." + Close *bool + // Required for "Append Data" and "Flush Data". Must be 0 for "Flush Data". Must be the length of the request content in bytes + // for "Append Data". + ContentLength *int64 + // Optional. The number of paths processed with each invocation is limited. If the number of paths to be processed exceeds + // this limit, a continuation token is returned in the response header + // x-ms-continuation. When a continuation token is returned in the response, it must be percent-encoded and specified in a + // subsequent invocation of setAccessControlRecursive operation. + Continuation *string + // Optional. Valid for "SetAccessControlRecursive" operation. If set to false, the operation will terminate quickly on encountering + // user errors (4XX). If true, the operation will ignore user errors and + // proceed with the operation on other sub-entities of the directory. Continuation token will only be returned when forceFlag + // is true in case of user errors. If not set the default value is false for + // this. + ForceFlag *bool + // Optional. The owning group of the blob or directory. + Group *string + // Optional. Valid for "SetAccessControlRecursive" operation. It specifies the maximum number of files or directories on which + // the acl change will be applied. If omitted or greater than 2,000, the + // request will process up to 2,000 items + MaxRecords *int32 + // Optional. The owner of the blob or directory. + Owner *string + // Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file + // owner, the file owning group, and others. Each class may be granted read, + // write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + // 0766) are supported. + Permissions *string + // This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file. + // It is required when uploading data to be appended to the file and when flushing + // previously uploaded data to the file. The value must be the position where the data is to be appended. Uploaded data is + // not immediately flushed, or written, to the file. To flush, the previously + // uploaded data must be contiguous, the position parameter must be specified and equal to the length of the file after all + // data has been written, and there must not be a request entity body included + // with the request. + Position *int64 + // Optional. User-defined properties to be stored with the filesystem, in the format of a comma-separated list of name and + // value pairs "n1=v1, n2=v2, …", where each value is a base64 encoded string. Note + // that the string may only contain ASCII characters in the ISO-8859-1 character set. If the filesystem exists, any properties + // not included in the list will be removed. All properties are removed if the + // header is omitted. To merge new and existing properties, first get all existing properties and the current E-Tag, then + // make a conditional request with the E-Tag and include values for all properties. + Properties *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Valid only for flush operations. If "true", uncommitted data is retained after the flush operation completes; otherwise, + // the uncommitted data is deleted after the flush operation. The default is + // false. Data at offsets less than the specified position are written to the file when flush succeeds, but this optional + // parameter allows data after the flush position to be retained for a future flush + // operation. + RetainUncommittedData *bool + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PathHTTPHeaders contains a group of parameters for the PathClient.Create method. +type PathHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + CacheControl *string + // Optional. Sets the blob's Content-Disposition header. + ContentDisposition *string + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + ContentEncoding *string + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + ContentLanguage *string + // Specify the transactional md5 for the body, to be validated by the service. + ContentMD5 []byte + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + ContentType *string + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentHash []byte +} + +type PathList struct { + Paths []*Path +} + +// ServiceClientListFileSystemsOptions contains the optional parameters for the ServiceClient.NewListFileSystemsPager method. +type ServiceClientListFileSystemsOptions struct { + // Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number + // of paths to be deleted exceeds this limit, a continuation token is returned in + // this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation + // of the delete operation to continue deleting the directory. + Continuation *string + // An optional value that specifies the maximum number of items to return. If omitted or greater than 5,000, the response + // will include up to 5,000 items. + MaxResults *int32 + // Filters results to filesystems within the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +type SetAccessControlRecursiveResponse struct { + DirectoriesSuccessful *int32 + FailedEntries []*ACLFailedEntry + FailureCount *int32 + FilesSuccessful *int32 +} + +// SourceModifiedAccessConditions contains a group of parameters for the PathClient.Create method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *azcore.ETag + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *azcore.ETag + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} + +type StorageError struct { + // The service error response object. + Error *StorageErrorError +} + +// StorageErrorError - The service error response object. +type StorageErrorError struct { + // The service error code. + Code *string + + // The service error message. + Message *string +} diff --git a/sdk/storage/azdatalake/internal/generated/zz_models_serde.go b/sdk/storage/azdatalake/internal/generated/zz_models_serde.go new file mode 100644 index 000000000000..cb66df1e8173 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/zz_models_serde.go @@ -0,0 +1,422 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" + "strconv" + "time" +) + +// MarshalJSON implements the json.Marshaller interface for type ACLFailedEntry. +func (a ACLFailedEntry) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "errorMessage", a.ErrorMessage) + populate(objectMap, "name", a.Name) + populate(objectMap, "type", a.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ACLFailedEntry. +func (a *ACLFailedEntry) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "errorMessage": + err = unpopulate(val, "ErrorMessage", &a.ErrorMessage) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &a.Name) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &a.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type PathHierarchyListSegment. +func (b PathHierarchyListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias PathHierarchyListSegment + aux := &struct { + *alias + PathItems *[]*PathItemInternal `xml:"Blob"` + PathPrefixes *[]*PathPrefix `xml:"PathPrefix"` + }{ + alias: (*alias)(&b), + } + if b.PathItems != nil { + aux.PathItems = &b.PathItems + } + if b.PathPrefixes != nil { + aux.PathPrefixes = &b.PathPrefixes + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type PathPropertiesInternal. +func (b PathPropertiesInternal) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias PathPropertiesInternal + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeleteTime *timeRFC1123 `xml:"DeleteTime"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&b), + AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), + CreationTime: (*timeRFC1123)(b.CreationTime), + DeleteTime: (*timeRFC1123)(b.DeleteTime), + DeletedTime: (*timeRFC1123)(b.DeletedTime), + ExpiresOn: (*timeRFC1123)(b.ExpiresOn), + LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), + LastModified: (*timeRFC1123)(b.LastModified), + } + if b.ContentMD5 != nil { + encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) + aux.ContentMD5 = &encodedContentMD5 + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type PathPropertiesInternal. +func (b *PathPropertiesInternal) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias PathPropertiesInternal + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeleteTime *timeRFC1123 `xml:"DeleteTime"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + if aux.ContentMD5 != nil { + if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil { + return err + } + } + b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) + b.CreationTime = (*time.Time)(aux.CreationTime) + b.DeleteTime = (*time.Time)(aux.DeleteTime) + b.DeletedTime = (*time.Time)(aux.DeletedTime) + b.ExpiresOn = (*time.Time)(aux.ExpiresOn) + b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) + b.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FileSystem. +func (f FileSystem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "eTag", f.ETag) + populate(objectMap, "lastModified", f.LastModified) + populate(objectMap, "name", f.Name) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FileSystem. +func (f *FileSystem) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "eTag": + err = unpopulate(val, "ETag", &f.ETag) + delete(rawMsg, key) + case "lastModified": + err = unpopulate(val, "LastModified", &f.LastModified) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &f.Name) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FileSystemList. +func (f FileSystemList) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "filesystems", f.Filesystems) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FileSystemList. +func (f *FileSystemList) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "filesystems": + err = unpopulate(val, "Filesystems", &f.Filesystems) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Path. +func (p Path) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "contentLength", p.ContentLength) + populate(objectMap, "creationTime", p.CreationTime) + populate(objectMap, "eTag", p.ETag) + populate(objectMap, "EncryptionScope", p.EncryptionScope) + populate(objectMap, "expiryTime", p.ExpiryTime) + populate(objectMap, "group", p.Group) + populate(objectMap, "isDirectory", p.IsDirectory) + populate(objectMap, "lastModified", p.LastModified) + populate(objectMap, "name", p.Name) + populate(objectMap, "owner", p.Owner) + populate(objectMap, "permissions", p.Permissions) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Path. +func (p *Path) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "contentLength": + var rawVal string + err = unpopulate(val, "ContentLength", &rawVal) + intVal, _ := strconv.ParseInt(rawVal, 10, 64) + p.ContentLength = &intVal + delete(rawMsg, key) + case "creationTime": + err = unpopulate(val, "CreationTime", &p.CreationTime) + delete(rawMsg, key) + case "eTag": + err = unpopulate(val, "ETag", &p.ETag) + delete(rawMsg, key) + case "EncryptionScope": + err = unpopulate(val, "EncryptionScope", &p.EncryptionScope) + delete(rawMsg, key) + case "expiryTime": + err = unpopulate(val, "ExpiryTime", &p.ExpiryTime) + delete(rawMsg, key) + case "group": + err = unpopulate(val, "Group", &p.Group) + delete(rawMsg, key) + case "isDirectory": + var rawVal string + err = unpopulate(val, "IsDirectory", &rawVal) + boolVal, _ := strconv.ParseBool(rawVal) + p.IsDirectory = &boolVal + delete(rawMsg, key) + case "lastModified": + err = unpopulate(val, "LastModified", &p.LastModified) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &p.Name) + delete(rawMsg, key) + case "owner": + err = unpopulate(val, "Owner", &p.Owner) + delete(rawMsg, key) + case "permissions": + err = unpopulate(val, "Permissions", &p.Permissions) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PathList. +func (p PathList) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "paths", p.Paths) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PathList. +func (p *PathList) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "paths": + err = unpopulate(val, "Paths", &p.Paths) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SetAccessControlRecursiveResponse. +func (s SetAccessControlRecursiveResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "directoriesSuccessful", s.DirectoriesSuccessful) + populate(objectMap, "failedEntries", s.FailedEntries) + populate(objectMap, "failureCount", s.FailureCount) + populate(objectMap, "filesSuccessful", s.FilesSuccessful) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SetAccessControlRecursiveResponse. +func (s *SetAccessControlRecursiveResponse) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "directoriesSuccessful": + err = unpopulate(val, "DirectoriesSuccessful", &s.DirectoriesSuccessful) + delete(rawMsg, key) + case "failedEntries": + err = unpopulate(val, "FailedEntries", &s.FailedEntries) + delete(rawMsg, key) + case "failureCount": + err = unpopulate(val, "FailureCount", &s.FailureCount) + delete(rawMsg, key) + case "filesSuccessful": + err = unpopulate(val, "FilesSuccessful", &s.FilesSuccessful) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type StorageError. +func (s StorageError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "error", s.Error) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageError. +func (s *StorageError) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "error": + err = unpopulate(val, "Error", &s.Error) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type StorageErrorError. +func (s StorageErrorError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "Code", s.Code) + populate(objectMap, "Message", s.Message) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageErrorError. +func (s *StorageErrorError) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Code": + err = unpopulate(val, "Code", &s.Code) + delete(rawMsg, key) + case "Message": + err = unpopulate(val, "Message", &s.Message) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/sdk/storage/azdatalake/internal/generated/zz_path_client.go b/sdk/storage/azdatalake/internal/generated/zz_path_client.go new file mode 100644 index 000000000000..f0547cb91e1a --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/zz_path_client.go @@ -0,0 +1,1546 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// PathClient contains the methods for the Path group. +// Don't use this type directly, use a constructor function instead. +type PathClient struct { + internal *azcore.Client + endpoint string + xmsLeaseDuration int32 +} + +// AppendData - Append data to the file. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - body - Initial data +// - options - PathClientAppendDataOptions contains the optional parameters for the PathClient.AppendData method. +// - PathHTTPHeaders - PathHTTPHeaders contains a group of parameters for the PathClient.Create method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the PathClient.Create method. +// - CPKInfo - CPKInfo contains a group of parameters for the PathClient.Create method. +func (client *PathClient) AppendData(ctx context.Context, body io.ReadSeekCloser, options *PathClientAppendDataOptions, pathHTTPHeaders *PathHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo) (PathClientAppendDataResponse, error) { + req, err := client.appendDataCreateRequest(ctx, body, options, pathHTTPHeaders, leaseAccessConditions, cpkInfo) + if err != nil { + return PathClientAppendDataResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PathClientAppendDataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return PathClientAppendDataResponse{}, runtime.NewResponseError(resp) + } + return client.appendDataHandleResponse(resp) +} + +// appendDataCreateRequest creates the AppendData request. +func (client *PathClient) appendDataCreateRequest(ctx context.Context, body io.ReadSeekCloser, options *PathClientAppendDataOptions, pathHTTPHeaders *PathHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPatch, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("action", "append") + if options != nil && options.Position != nil { + reqQP.Set("position", strconv.FormatInt(*options.Position, 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.ContentLength != nil { + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(*options.ContentLength, 10)} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.TransactionalContentHash != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(pathHTTPHeaders.TransactionalContentHash)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + req.Raw().Header["Accept"] = []string{"application/json"} + if err := req.SetBody(body, "application/json"); err != nil { + return nil, err + } + return req, nil +} + +// appendDataHandleResponse handles the AppendData response. +func (client *PathClient) appendDataHandleResponse(resp *http.Response) (PathClientAppendDataResponse, error) { + result := PathClientAppendDataResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientAppendDataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PathClientAppendDataResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PathClientAppendDataResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PathClientAppendDataResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + return result, nil +} + +// Create - Create or rename a file or directory. By default, the destination is overwritten and if the destination already +// exists and has a lease the lease is broken. This operation supports conditional HTTP +// requests. For more information, see Specifying Conditional Headers for Blob Service Operations +// [https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations]. +// To fail if the destination already exists, use a conditional request with +// If-None-Match: "*". +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - PathClientCreateOptions contains the optional parameters for the PathClient.Create method. +// - PathHTTPHeaders - PathHTTPHeaders contains a group of parameters for the PathClient.Create method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the PathClient.Create method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the FileSystemClient.SetProperties +// method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the PathClient.Create +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the PathClient.Create method. +func (client *PathClient) Create(ctx context.Context, options *PathClientCreateOptions, pathHTTPHeaders *PathHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, cpkInfo *CPKInfo) (PathClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, options, pathHTTPHeaders, leaseAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions, cpkInfo) + if err != nil { + return PathClientCreateResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PathClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PathClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *PathClient) createCreateRequest(ctx context.Context, options *PathClientCreateOptions, pathHTTPHeaders *PathHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, cpkInfo *CPKInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Resource != nil { + reqQP.Set("resource", string(*options.Resource)) + } + if options != nil && options.Continuation != nil { + reqQP.Set("continuation", *options.Continuation) + } + if options != nil && options.Mode != nil { + reqQP.Set("mode", string(*options.Mode)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if pathHTTPHeaders != nil && pathHTTPHeaders.CacheControl != nil { + req.Raw().Header["x-ms-cache-control"] = []string{*pathHTTPHeaders.CacheControl} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentEncoding != nil { + req.Raw().Header["x-ms-content-encoding"] = []string{*pathHTTPHeaders.ContentEncoding} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentLanguage != nil { + req.Raw().Header["x-ms-content-language"] = []string{*pathHTTPHeaders.ContentLanguage} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentDisposition != nil { + req.Raw().Header["x-ms-content-disposition"] = []string{*pathHTTPHeaders.ContentDisposition} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentType != nil { + req.Raw().Header["x-ms-content-type"] = []string{*pathHTTPHeaders.ContentType} + } + if options != nil && options.RenameSource != nil { + req.Raw().Header["x-ms-rename-source"] = []string{*options.RenameSource} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.SourceLeaseID != nil { + req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID} + } + if options != nil && options.Properties != nil { + req.Raw().Header["x-ms-properties"] = []string{*options.Properties} + } + if options != nil && options.Permissions != nil { + req.Raw().Header["x-ms-permissions"] = []string{*options.Permissions} + } + if options != nil && options.Umask != nil { + req.Raw().Header["x-ms-umask"] = []string{*options.Umask} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if options != nil && options.Owner != nil { + req.Raw().Header["x-ms-owner"] = []string{*options.Owner} + } + if options != nil && options.Group != nil { + req.Raw().Header["x-ms-group"] = []string{*options.Group} + } + if options != nil && options.ACL != nil { + req.Raw().Header["x-ms-acl"] = []string{*options.ACL} + } + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + if options != nil && options.LeaseDuration != nil { + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(*options.LeaseDuration, 10)} + } + if options != nil && options.ExpiryOptions != nil { + req.Raw().Header["x-ms-expiry-option"] = []string{string(*options.ExpiryOptions)} + } + if options != nil && options.ExpiresOn != nil { + req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *PathClient) createHandleResponse(resp *http.Response) (PathClientCreateResponse, error) { + result := PathClientCreateResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-continuation"); val != "" { + result.Continuation = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PathClientCreateResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PathClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + return result, nil +} + +// Delete - Delete the file or directory. This operation supports conditional HTTP requests. For more information, see Specifying +// Conditional Headers for Blob Service Operations +// [https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations]. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - PathClientDeleteOptions contains the optional parameters for the PathClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the PathClient.Create method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the FileSystemClient.SetProperties +// method. +func (client *PathClient) Delete(ctx context.Context, options *PathClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PathClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return PathClientDeleteResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PathClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PathClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *PathClient) deleteCreateRequest(ctx context.Context, options *PathClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Recursive != nil { + reqQP.Set("recursive", strconv.FormatBool(*options.Recursive)) + } + if options != nil && options.Continuation != nil { + reqQP.Set("continuation", *options.Continuation) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *PathClient) deleteHandleResponse(resp *http.Response) (PathClientDeleteResponse, error) { + result := PathClientDeleteResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientDeleteResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-continuation"); val != "" { + result.Continuation = &val + } + if val := resp.Header.Get("x-ms-deletion-id"); val != "" { + result.DeletionID = &val + } + return result, nil +} + +// FlushData - Set the owner, group, permissions, or access control list for a path. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - PathClientFlushDataOptions contains the optional parameters for the PathClient.FlushData method. +// - PathHTTPHeaders - PathHTTPHeaders contains a group of parameters for the PathClient.Create method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the PathClient.Create method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the FileSystemClient.SetProperties +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the PathClient.Create method. +func (client *PathClient) FlushData(ctx context.Context, options *PathClientFlushDataOptions, pathHTTPHeaders *PathHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, cpkInfo *CPKInfo) (PathClientFlushDataResponse, error) { + req, err := client.flushDataCreateRequest(ctx, options, pathHTTPHeaders, leaseAccessConditions, modifiedAccessConditions, cpkInfo) + if err != nil { + return PathClientFlushDataResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PathClientFlushDataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PathClientFlushDataResponse{}, runtime.NewResponseError(resp) + } + return client.flushDataHandleResponse(resp) +} + +// flushDataCreateRequest creates the FlushData request. +func (client *PathClient) flushDataCreateRequest(ctx context.Context, options *PathClientFlushDataOptions, pathHTTPHeaders *PathHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, cpkInfo *CPKInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPatch, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("action", "flush") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Position != nil { + reqQP.Set("position", strconv.FormatInt(*options.Position, 10)) + } + if options != nil && options.RetainUncommittedData != nil { + reqQP.Set("retainUncommittedData", strconv.FormatBool(*options.RetainUncommittedData)) + } + if options != nil && options.Close != nil { + reqQP.Set("close", strconv.FormatBool(*options.Close)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.ContentLength != nil { + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(*options.ContentLength, 10)} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentMD5 != nil { + req.Raw().Header["x-ms-content-md5"] = []string{base64.StdEncoding.EncodeToString(pathHTTPHeaders.ContentMD5)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.CacheControl != nil { + req.Raw().Header["x-ms-cache-control"] = []string{*pathHTTPHeaders.CacheControl} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentType != nil { + req.Raw().Header["x-ms-content-type"] = []string{*pathHTTPHeaders.ContentType} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentDisposition != nil { + req.Raw().Header["x-ms-content-disposition"] = []string{*pathHTTPHeaders.ContentDisposition} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentEncoding != nil { + req.Raw().Header["x-ms-content-encoding"] = []string{*pathHTTPHeaders.ContentEncoding} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentLanguage != nil { + req.Raw().Header["x-ms-content-language"] = []string{*pathHTTPHeaders.ContentLanguage} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// flushDataHandleResponse handles the FlushData response. +func (client *PathClient) flushDataHandleResponse(resp *http.Response) (PathClientFlushDataResponse, error) { + result := PathClientFlushDataResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientFlushDataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientFlushDataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PathClientFlushDataResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PathClientFlushDataResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + return result, nil +} + +// GetProperties - Get Properties returns all system and user defined properties for a path. Get Status returns all system +// defined properties for a path. Get Access Control List returns the access control list for a +// path. This operation supports conditional HTTP requests. For more information, see Specifying Conditional Headers for Blob +// Service Operations +// [https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations]. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - PathClientGetPropertiesOptions contains the optional parameters for the PathClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the PathClient.Create method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the FileSystemClient.SetProperties +// method. +func (client *PathClient) GetProperties(ctx context.Context, options *PathClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PathClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return PathClientGetPropertiesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PathClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PathClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *PathClient) getPropertiesCreateRequest(ctx context.Context, options *PathClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Action != nil { + reqQP.Set("action", string(*options.Action)) + } + if options != nil && options.Upn != nil { + reqQP.Set("upn", strconv.FormatBool(*options.Upn)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *PathClient) getPropertiesHandleResponse(resp *http.Response) (PathClientGetPropertiesResponse, error) { + result := PathClientGetPropertiesResponse{} + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PathClientGetPropertiesResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + result.ContentMD5 = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-resource-type"); val != "" { + result.ResourceType = &val + } + if val := resp.Header.Get("x-ms-properties"); val != "" { + result.Properties = &val + } + if val := resp.Header.Get("x-ms-owner"); val != "" { + result.Owner = &val + } + if val := resp.Header.Get("x-ms-group"); val != "" { + result.Group = &val + } + if val := resp.Header.Get("x-ms-permissions"); val != "" { + result.Permissions = &val + } + if val := resp.Header.Get("x-ms-acl"); val != "" { + result.ACL = &val + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = &val + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = &val + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = &val + } + return result, nil +} + +// Lease - Create and manage a lease to restrict write and delete access to the path. This operation supports conditional +// HTTP requests. For more information, see Specifying Conditional Headers for Blob Service +// Operations [https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations]. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - xmsLeaseAction - There are five lease actions: "acquire", "break", "change", "renew", and "release". Use "acquire" and +// specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a new lease. Use "break" +// to break an existing lease. When a lease is broken, the lease break period is allowed to elapse, during which time no lease +// operation except break and release can be performed on the file. When a +// lease is successfully broken, the response indicates the interval in seconds until a new lease can be acquired. Use "change" +// and specify the current lease ID in "x-ms-lease-id" and the new lease ID in +// "x-ms-proposed-lease-id" to change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew +// an existing lease. Use "release" and specify the "x-ms-lease-id" to release a +// lease. +// - options - PathClientLeaseOptions contains the optional parameters for the PathClient.Lease method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the PathClient.Create method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the FileSystemClient.SetProperties +// method. +func (client *PathClient) Lease(ctx context.Context, xmsLeaseAction PathLeaseAction, options *PathClientLeaseOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PathClientLeaseResponse, error) { + req, err := client.leaseCreateRequest(ctx, xmsLeaseAction, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return PathClientLeaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PathClientLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted) { + return PathClientLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.leaseHandleResponse(resp) +} + +// leaseCreateRequest creates the Lease request. +func (client *PathClient) leaseCreateRequest(ctx context.Context, xmsLeaseAction PathLeaseAction, options *PathClientLeaseOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-lease-action"] = []string{string(xmsLeaseAction)} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(client.xmsLeaseDuration), 10)} + if options != nil && options.XMSLeaseBreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.XMSLeaseBreakPeriod), 10)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// leaseHandleResponse handles the Lease response. +func (client *PathClient) leaseHandleResponse(resp *http.Response) (PathClientLeaseResponse, error) { + result := PathClientLeaseResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + result.LeaseTime = &val + } + return result, nil +} + +// Read - Read the contents of a file. For read operations, range requests are supported. This operation supports conditional +// HTTP requests. For more information, see Specifying Conditional Headers for Blob +// Service Operations [https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations]. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - PathClientReadOptions contains the optional parameters for the PathClient.Read method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the PathClient.Create method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the FileSystemClient.SetProperties +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the PathClient.Create method. +func (client *PathClient) Read(ctx context.Context, options *PathClientReadOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, cpkInfo *CPKInfo) (PathClientReadResponse, error) { + req, err := client.readCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, cpkInfo) + if err != nil { + return PathClientReadResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PathClientReadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { + return PathClientReadResponse{}, runtime.NewResponseError(resp) + } + return client.readHandleResponse(resp) +} + +// readCreateRequest creates the Read request. +func (client *PathClient) readCreateRequest(ctx context.Context, options *PathClientReadOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, cpkInfo *CPKInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.Range != nil { + req.Raw().Header["Range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.XMSRangeGetContentMD5 != nil { + req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.XMSRangeGetContentMD5)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// readHandleResponse handles the Read response. +func (client *PathClient) readHandleResponse(resp *http.Response) (PathClientReadResponse, error) { + result := PathClientReadResponse{Body: resp.Body} + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PathClientReadResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + result.ContentMD5 = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientReadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientReadResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-resource-type"); val != "" { + result.ResourceType = &val + } + if val := resp.Header.Get("x-ms-properties"); val != "" { + result.Properties = &val + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = &val + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = &val + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PathClientReadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-content-md5"); val != "" { + result.XMSContentMD5 = &val + } + return result, nil +} + +// SetAccessControl - Set the owner, group, permissions, or access control list for a path. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - PathClientSetAccessControlOptions contains the optional parameters for the PathClient.SetAccessControl method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the PathClient.Create method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the FileSystemClient.SetProperties +// method. +func (client *PathClient) SetAccessControl(ctx context.Context, options *PathClientSetAccessControlOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PathClientSetAccessControlResponse, error) { + req, err := client.setAccessControlCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return PathClientSetAccessControlResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PathClientSetAccessControlResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PathClientSetAccessControlResponse{}, runtime.NewResponseError(resp) + } + return client.setAccessControlHandleResponse(resp) +} + +// setAccessControlCreateRequest creates the SetAccessControl request. +func (client *PathClient) setAccessControlCreateRequest(ctx context.Context, options *PathClientSetAccessControlOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPatch, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("action", "setAccessControl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Owner != nil { + req.Raw().Header["x-ms-owner"] = []string{*options.Owner} + } + if options != nil && options.Group != nil { + req.Raw().Header["x-ms-group"] = []string{*options.Group} + } + if options != nil && options.Permissions != nil { + req.Raw().Header["x-ms-permissions"] = []string{*options.Permissions} + } + if options != nil && options.ACL != nil { + req.Raw().Header["x-ms-acl"] = []string{*options.ACL} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// setAccessControlHandleResponse handles the SetAccessControl response. +func (client *PathClient) setAccessControlHandleResponse(resp *http.Response) (PathClientSetAccessControlResponse, error) { + result := PathClientSetAccessControlResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientSetAccessControlResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientSetAccessControlResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetAccessControlRecursive - Set the access control list for a path and sub-paths. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - mode - Mode "set" sets POSIX access control rights on files and directories, "modify" modifies one or more POSIX access +// control rights that pre-exist on files and directories, "remove" removes one or more +// POSIX access control rights that were present earlier on files and directories +// - options - PathClientSetAccessControlRecursiveOptions contains the optional parameters for the PathClient.SetAccessControlRecursive +// method. +func (client *PathClient) SetAccessControlRecursive(ctx context.Context, mode PathSetAccessControlRecursiveMode, options *PathClientSetAccessControlRecursiveOptions) (PathClientSetAccessControlRecursiveResponse, error) { + req, err := client.SetAccessControlRecursiveCreateRequest(ctx, mode, options) + if err != nil { + return PathClientSetAccessControlRecursiveResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PathClientSetAccessControlRecursiveResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PathClientSetAccessControlRecursiveResponse{}, runtime.NewResponseError(resp) + } + return client.SetAccessControlRecursiveHandleResponse(resp) +} + +// SetAccessControlRecursiveCreateRequest creates the SetAccessControlRecursive request. +func (client *PathClient) SetAccessControlRecursiveCreateRequest(ctx context.Context, mode PathSetAccessControlRecursiveMode, options *PathClientSetAccessControlRecursiveOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPatch, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("action", "setAccessControlRecursive") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Continuation != nil { + reqQP.Set("continuation", *options.Continuation) + } + reqQP.Set("mode", string(mode)) + if options != nil && options.ForceFlag != nil { + reqQP.Set("forceFlag", strconv.FormatBool(*options.ForceFlag)) + } + if options != nil && options.MaxRecords != nil { + reqQP.Set("maxRecords", strconv.FormatInt(int64(*options.MaxRecords), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.ACL != nil { + req.Raw().Header["x-ms-acl"] = []string{*options.ACL} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// SetAccessControlRecursiveHandleResponse handles the SetAccessControlRecursive response. +func (client *PathClient) SetAccessControlRecursiveHandleResponse(resp *http.Response) (PathClientSetAccessControlRecursiveResponse, error) { + result := PathClientSetAccessControlRecursiveResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientSetAccessControlRecursiveResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-continuation"); val != "" { + result.Continuation = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsJSON(resp, &result.SetAccessControlRecursiveResponse); err != nil { + return PathClientSetAccessControlRecursiveResponse{}, err + } + return result, nil +} + +// SetExpiry - Sets the time a blob will expire and be deleted. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - expiryOptions - Required. Indicates mode of the expiry time +// - options - PathClientSetExpiryOptions contains the optional parameters for the PathClient.SetExpiry method. +func (client *PathClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *PathClientSetExpiryOptions) (PathClientSetExpiryResponse, error) { + req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) + if err != nil { + return PathClientSetExpiryResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PathClientSetExpiryResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PathClientSetExpiryResponse{}, runtime.NewResponseError(resp) + } + return client.setExpiryHandleResponse(resp) +} + +// setExpiryCreateRequest creates the SetExpiry request. +func (client *PathClient) setExpiryCreateRequest(ctx context.Context, expiryOptions ExpiryOptions, options *PathClientSetExpiryOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "expiry") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-expiry-option"] = []string{string(expiryOptions)} + if options != nil && options.ExpiresOn != nil { + req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// setExpiryHandleResponse handles the SetExpiry response. +func (client *PathClient) setExpiryHandleResponse(resp *http.Response) (PathClientSetExpiryResponse, error) { + result := PathClientSetExpiryResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientSetExpiryResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientSetExpiryResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Undelete - Undelete a path that was previously soft deleted +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - PathClientUndeleteOptions contains the optional parameters for the PathClient.Undelete method. +func (client *PathClient) Undelete(ctx context.Context, options *PathClientUndeleteOptions) (PathClientUndeleteResponse, error) { + req, err := client.undeleteCreateRequest(ctx, options) + if err != nil { + return PathClientUndeleteResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PathClientUndeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PathClientUndeleteResponse{}, runtime.NewResponseError(resp) + } + return client.undeleteHandleResponse(resp) +} + +// undeleteCreateRequest creates the Undelete request. +func (client *PathClient) undeleteCreateRequest(ctx context.Context, options *PathClientUndeleteOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.UndeleteSource != nil { + req.Raw().Header["x-ms-undelete-source"] = []string{*options.UndeleteSource} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// undeleteHandleResponse handles the Undelete response. +func (client *PathClient) undeleteHandleResponse(resp *http.Response) (PathClientUndeleteResponse, error) { + result := PathClientUndeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-resource-type"); val != "" { + result.ResourceType = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientUndeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Update - Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, sets properties for +// a file or directory, or sets access control for a file or directory. Data can only be +// appended to a file. Concurrent writes to the same file using multiple clients are not supported. This operation supports +// conditional HTTP requests. For more information, see Specifying Conditional +// Headers for Blob Service Operations [https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations]. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - action - The action must be "append" to upload data to be appended to a file, "flush" to flush previously uploaded data +// to a file, "setProperties" to set the properties of a file or directory, +// "setAccessControl" to set the owner, group, permissions, or access control list for a file or directory, or "setAccessControlRecursive" +// to set the access control list for a directory recursively. Note +// that Hierarchical Namespace must be enabled for the account in order to use access control. Also note that the Access Control +// List (ACL) includes permissions for the owner, owning group, and others, +// so the x-ms-permissions and x-ms-acl request headers are mutually exclusive. +// - mode - Mode "set" sets POSIX access control rights on files and directories, "modify" modifies one or more POSIX access +// control rights that pre-exist on files and directories, "remove" removes one or more +// POSIX access control rights that were present earlier on files and directories +// - body - Initial data +// - options - PathClientUpdateOptions contains the optional parameters for the PathClient.Update method. +// - PathHTTPHeaders - PathHTTPHeaders contains a group of parameters for the PathClient.Create method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the PathClient.Create method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the FileSystemClient.SetProperties +// method. +func (client *PathClient) Update(ctx context.Context, action PathUpdateAction, mode PathSetAccessControlRecursiveMode, body io.ReadSeekCloser, options *PathClientUpdateOptions, pathHTTPHeaders *PathHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PathClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, action, mode, body, options, pathHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return PathClientUpdateResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PathClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return PathClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *PathClient) updateCreateRequest(ctx context.Context, action PathUpdateAction, mode PathSetAccessControlRecursiveMode, body io.ReadSeekCloser, options *PathClientUpdateOptions, pathHTTPHeaders *PathHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPatch, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + reqQP.Set("action", string(action)) + if options != nil && options.MaxRecords != nil { + reqQP.Set("maxRecords", strconv.FormatInt(int64(*options.MaxRecords), 10)) + } + if options != nil && options.Continuation != nil { + reqQP.Set("continuation", *options.Continuation) + } + reqQP.Set("mode", string(mode)) + if options != nil && options.ForceFlag != nil { + reqQP.Set("forceFlag", strconv.FormatBool(*options.ForceFlag)) + } + if options != nil && options.Position != nil { + reqQP.Set("position", strconv.FormatInt(*options.Position, 10)) + } + if options != nil && options.RetainUncommittedData != nil { + reqQP.Set("retainUncommittedData", strconv.FormatBool(*options.RetainUncommittedData)) + } + if options != nil && options.Close != nil { + reqQP.Set("close", strconv.FormatBool(*options.Close)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.ContentLength != nil { + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(*options.ContentLength, 10)} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentMD5 != nil { + req.Raw().Header["x-ms-content-md5"] = []string{base64.StdEncoding.EncodeToString(pathHTTPHeaders.ContentMD5)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.CacheControl != nil { + req.Raw().Header["x-ms-cache-control"] = []string{*pathHTTPHeaders.CacheControl} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentType != nil { + req.Raw().Header["x-ms-content-type"] = []string{*pathHTTPHeaders.ContentType} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentDisposition != nil { + req.Raw().Header["x-ms-content-disposition"] = []string{*pathHTTPHeaders.ContentDisposition} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentEncoding != nil { + req.Raw().Header["x-ms-content-encoding"] = []string{*pathHTTPHeaders.ContentEncoding} + } + if pathHTTPHeaders != nil && pathHTTPHeaders.ContentLanguage != nil { + req.Raw().Header["x-ms-content-language"] = []string{*pathHTTPHeaders.ContentLanguage} + } + if options != nil && options.Properties != nil { + req.Raw().Header["x-ms-properties"] = []string{*options.Properties} + } + if options != nil && options.Owner != nil { + req.Raw().Header["x-ms-owner"] = []string{*options.Owner} + } + if options != nil && options.Group != nil { + req.Raw().Header["x-ms-group"] = []string{*options.Group} + } + if options != nil && options.Permissions != nil { + req.Raw().Header["x-ms-permissions"] = []string{*options.Permissions} + } + if options != nil && options.ACL != nil { + req.Raw().Header["x-ms-acl"] = []string{*options.ACL} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + req.Raw().Header["Accept"] = []string{"application/json"} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// updateHandleResponse handles the Update response. +func (client *PathClient) updateHandleResponse(resp *http.Response) (PathClientUpdateResponse, error) { + result := PathClientUpdateResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientUpdateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PathClientUpdateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PathClientUpdateResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + result.ContentMD5 = &val + } + if val := resp.Header.Get("x-ms-properties"); val != "" { + result.Properties = &val + } + if val := resp.Header.Get("x-ms-continuation"); val != "" { + result.XMSContinuation = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsJSON(resp, &result.SetAccessControlRecursiveResponse); err != nil { + return PathClientUpdateResponse{}, err + } + return result, nil +} diff --git a/sdk/storage/azdatalake/internal/generated/zz_response_types.go b/sdk/storage/azdatalake/internal/generated/zz_response_types.go new file mode 100644 index 000000000000..58112b532f0c --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/zz_response_types.go @@ -0,0 +1,558 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "io" + "time" +) + +// FileSystemClientCreateResponse contains the response from method FileSystemClient.Create. +type FileSystemClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // NamespaceEnabled contains the information returned from the x-ms-namespace-enabled header response. + NamespaceEnabled *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileSystemClientDeleteResponse contains the response from method FileSystemClient.Delete. +type FileSystemClientDeleteResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileSystemClientGetPropertiesResponse contains the response from method FileSystemClient.GetProperties. +type FileSystemClientGetPropertiesResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // NamespaceEnabled contains the information returned from the x-ms-namespace-enabled header response. + NamespaceEnabled *string + + // Properties contains the information returned from the x-ms-properties header response. + Properties *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileSystemClientListPathHierarchySegmentResponse contains the response from method FileSystemClient.NewListBlobHierarchySegmentPager. +type FileSystemClientListPathHierarchySegmentResponse struct { + ListPathsHierarchySegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// FileSystemClientListPathsResponse contains the response from method FileSystemClient.NewListPathsPager. +type FileSystemClientListPathsResponse struct { + PathList + // Continuation contains the information returned from the x-ms-continuation header response. + Continuation *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileSystemClientSetPropertiesResponse contains the response from method FileSystemClient.SetProperties. +type FileSystemClientSetPropertiesResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PathClientAppendDataResponse contains the response from method PathClient.AppendData. +type PathClientAppendDataResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PathClientCreateResponse contains the response from method PathClient.Create. +type PathClientCreateResponse struct { + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // Continuation contains the information returned from the x-ms-continuation header response. + Continuation *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PathClientDeleteResponse contains the response from method PathClient.Delete. +type PathClientDeleteResponse struct { + // Continuation contains the information returned from the x-ms-continuation header response. + Continuation *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DeletionID contains the information returned from the x-ms-deletion-id header response. + DeletionID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PathClientFlushDataResponse contains the response from method PathClient.FlushData. +type PathClientFlushDataResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PathClientGetPropertiesResponse contains the response from method PathClient.GetProperties. +type PathClientGetPropertiesResponse struct { + // ACL contains the information returned from the x-ms-acl header response. + ACL *string + + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 *string + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // Group contains the information returned from the x-ms-group header response. + Group *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *string + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *string + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *string + + // Owner contains the information returned from the x-ms-owner header response. + Owner *string + + // Permissions contains the information returned from the x-ms-permissions header response. + Permissions *string + + // Properties contains the information returned from the x-ms-properties header response. + Properties *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // ResourceType contains the information returned from the x-ms-resource-type header response. + ResourceType *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PathClientLeaseResponse contains the response from method PathClient.Lease. +type PathClientLeaseResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PathClientReadResponse contains the response from method PathClient.Read. +type PathClientReadResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 *string + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *string + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *string + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *string + + // Properties contains the information returned from the x-ms-properties header response. + Properties *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // ResourceType contains the information returned from the x-ms-resource-type header response. + ResourceType *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentMD5 contains the information returned from the x-ms-content-md5 header response. + XMSContentMD5 *string +} + +// PathClientSetAccessControlRecursiveResponse contains the response from method PathClient.SetAccessControlRecursive. +type PathClientSetAccessControlRecursiveResponse struct { + SetAccessControlRecursiveResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Continuation contains the information returned from the x-ms-continuation header response. + Continuation *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PathClientSetAccessControlResponse contains the response from method PathClient.SetAccessControl. +type PathClientSetAccessControlResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PathClientSetExpiryResponse contains the response from method PathClient.SetExpiry. +type PathClientSetExpiryResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PathClientUndeleteResponse contains the response from method PathClient.Undelete. +type PathClientUndeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // ResourceType contains the information returned from the x-ms-resource-type header response. + ResourceType *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PathClientUpdateResponse contains the response from method PathClient.Update. +type PathClientUpdateResponse struct { + SetAccessControlRecursiveResponse + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 *string + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // Properties contains the information returned from the x-ms-properties header response. + Properties *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContinuation contains the information returned from the x-ms-continuation header response. + XMSContinuation *string +} + +// ServiceClientListFileSystemsResponse contains the response from method ServiceClient.NewListFileSystemsPager. +type ServiceClientListFileSystemsResponse struct { + FileSystemList + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Continuation contains the information returned from the x-ms-continuation header response. + Continuation *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} diff --git a/sdk/storage/azdatalake/internal/generated/zz_service_client.go b/sdk/storage/azdatalake/internal/generated/zz_service_client.go new file mode 100644 index 000000000000..4c6e81408b18 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/zz_service_client.go @@ -0,0 +1,91 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strconv" + "strings" + "time" +) + +// ServiceClient contains the methods for the Service group. +// Don't use this type directly, use a constructor function instead. +type ServiceClient struct { + internal *azcore.Client + endpoint string +} + +// NewListFileSystemsPager - List filesystems and their properties in given account. +// +// Generated from API version 2020-10-02 +// - options - ServiceClientListFileSystemsOptions contains the optional parameters for the ServiceClient.NewListFileSystemsPager +// method. +// +// ListFileSystemsCreateRequest creates the ListFileSystems request. +func (client *ServiceClient) ListFileSystemsCreateRequest(ctx context.Context, options *ServiceClientListFileSystemsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("resource", "account") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Continuation != nil { + reqQP.Set("continuation", *options.Continuation) + } + if options != nil && options.MaxResults != nil { + reqQP.Set("maxResults", strconv.FormatInt(int64(*options.MaxResults), 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listFileSystemsHandleResponse handles the ListFileSystems response. +func (client *ServiceClient) ListFileSystemsHandleResponse(resp *http.Response) (ServiceClientListFileSystemsResponse, error) { + result := ServiceClientListFileSystemsResponse{} + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientListFileSystemsResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-continuation"); val != "" { + result.Continuation = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if err := runtime.UnmarshalAsJSON(resp, &result.FileSystemList); err != nil { + return ServiceClientListFileSystemsResponse{}, err + } + return result, nil +} diff --git a/sdk/storage/azdatalake/internal/generated/zz_time_rfc1123.go b/sdk/storage/azdatalake/internal/generated/zz_time_rfc1123.go new file mode 100644 index 000000000000..4b4d51aa3994 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated/zz_time_rfc1123.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "strings" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` +) + +type timeRFC1123 time.Time + +func (t timeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(rfc1123JSON)) + return b, nil +} + +func (t timeRFC1123) MarshalText() ([]byte, error) { + b := []byte(time.Time(t).Format(time.RFC1123)) + return b, nil +} + +func (t *timeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) + *t = timeRFC1123(p) + return err +} + +func (t *timeRFC1123) UnmarshalText(data []byte) error { + p, err := time.Parse(time.RFC1123, string(data)) + *t = timeRFC1123(p) + return err +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/autorest.md b/sdk/storage/azdatalake/internal/generated_blob/autorest.md new file mode 100644 index 000000000000..5905e9ac68ee --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/autorest.md @@ -0,0 +1,480 @@ +# Code Generation - Azure Blob SDK for Golang + +### Settings + +```yaml +go: true +clear-output-folder: false +version: "^3.0.0" +license-header: MICROSOFT_MIT_NO_VERSION +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/e515b6251fdc21015282d2e84b85beec7c091763/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json" +credential-scope: "https://storage.azure.com/.default" +output-folder: ../generated_blob +file-prefix: "zz_" +openapi-type: "data-plane" +verbose: true +security: AzureKey +modelerfour: + group-parameters: false + seal-single-value-enum-by-default: true + lenient-model-deduplication: true +export-clients: true +use: "@autorest/go@4.0.0-preview.49" +``` + +### Undo breaking change with BlobName +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/Name\s+\*BlobName/g, `Name *string`); +``` + +### Removing UnmarshalXML for BlobItems to create customer UnmarshalXML function +```yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal["x-ms-go-omit-serde-methods"] = true; +``` + +### Remove pager methods and export various generated methods in container client + +``` yaml +directive: + - from: zz_container_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `//\n// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). + replace(/\(client \*ContainerClient\) listBlobFlatSegmentCreateRequest\(/, `(client *ContainerClient) ListBlobFlatSegmentCreateRequest(`). + replace(/\(client \*ContainerClient\) listBlobFlatSegmentHandleResponse\(/, `(client *ContainerClient) ListBlobFlatSegmentHandleResponse(`); +``` + +### Remove pager methods and export various generated methods in service client + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `//\n// listContainersSegmentCreateRequest creates the ListContainersSegment request`). + replace(/\(client \*ServiceClient\) listContainersSegmentCreateRequest\(/, `(client *ServiceClient) ListContainersSegmentCreateRequest(`). + replace(/\(client \*ServiceClient\) listContainersSegmentHandleResponse\(/, `(client *ServiceClient) ListContainersSegmentHandleResponse(`); +``` + +### Fix BlobMetadata. + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.BlobMetadata["properties"]; + +``` + +### Don't include container name or blob in path - we have direct URIs. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('/{containerName}/{blob}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); + } + else if (property.includes('/{containerName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); + } + } +``` + +### Remove DataLake stuff. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('filesystem')) + { + delete $[property]; + } + } +``` + +### Remove DataLakeStorageError + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.DataLakeStorageError; +``` + +### Fix 304s + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{containerName}/{blob}"] + transform: > + $.get.responses["304"] = { + "description": "The condition specified using HTTP conditional header(s) is not met.", + "x-az-response-name": "ConditionNotMetError", + "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } } + }; +``` + +### Fix GeoReplication + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.GeoReplication.properties.Status["x-ms-enum"]; + $.GeoReplication.properties.Status["x-ms-enum"] = { + "name": "BlobGeoReplicationStatus", + "modelAsString": false + }; +``` + +### Fix RehydratePriority + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.RehydratePriority["x-ms-enum"]; + $.RehydratePriority["x-ms-enum"] = { + "name": "RehydratePriority", + "modelAsString": false + }; +``` + +### Fix BlobDeleteType + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.BlobDeleteType.enum; + $.BlobDeleteType.enum = [ + "None", + "Permanent" + ]; +``` + +### Fix EncryptionAlgorithm + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.EncryptionAlgorithm.enum; + $.EncryptionAlgorithm.enum = [ + "None", + "AES256" + ]; +``` + +### Fix XML string "ObjectReplicationMetadata" to "OrMetadata" + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"]; + delete $.BlobItemInternal.properties["ObjectReplicationMetadata"]; +``` + +# Export various createRequest/HandleResponse methods + +``` yaml +directive: +- from: zz_container_client.go + where: $ + transform: >- + return $. + replace(/listBlobHierarchySegmentCreateRequest/g, function(_, s) { return `ListBlobHierarchySegmentCreateRequest` }). + replace(/listBlobHierarchySegmentHandleResponse/g, function(_, s) { return `ListBlobHierarchySegmentHandleResponse` }); + +- from: zz_pageblob_client.go + where: $ + transform: >- + return $. + replace(/getPageRanges(Diff)?CreateRequest/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}CreateRequest` }). + replace(/getPageRanges(Diff)?HandleResponse/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}HandleResponse` }); +``` + +### Clean up some const type names so they don't stutter + +``` yaml +directive: +- from: swagger-document + where: $.parameters['BlobDeleteType'] + transform: > + $["x-ms-enum"].name = "DeleteType"; + $["x-ms-client-name"] = "DeleteType"; + +- from: swagger-document + where: $.parameters['BlobExpiryOptions'] + transform: > + $["x-ms-enum"].name = "ExpiryOptions"; + $["x-ms-client-name"].name = "ExpiryOptions"; + +- from: swagger-document + where: $["x-ms-paths"][*].*.responses[*].headers["x-ms-immutability-policy-mode"] + transform: > + $["x-ms-client-name"].name = "ImmutabilityPolicyMode"; + $.enum = [ "Mutable", "Unlocked", "Locked"]; + $["x-ms-enum"] = { "name": "ImmutabilityPolicyMode", "modelAsString": false }; + +- from: swagger-document + where: $.parameters['ImmutabilityPolicyMode'] + transform: > + $["x-ms-enum"].name = "ImmutabilityPolicySetting"; + $["x-ms-client-name"].name = "ImmutabilityPolicySetting"; + +- from: swagger-document + where: $.definitions['BlobPropertiesInternal'] + transform: > + $.properties.ImmutabilityPolicyMode["x-ms-enum"].name = "ImmutabilityPolicyMode"; +``` + +### use azcore.ETag + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/import "time"/, `import (\n\t"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n)`). + replace(/Etag\s+\*string/g, `ETag *azcore.ETag`). + replace(/IfMatch\s+\*string/g, `IfMatch *azcore.ETag`). + replace(/IfNoneMatch\s+\*string/g, `IfNoneMatch *azcore.ETag`). + replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`). + replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`); + +- from: zz_response_types.go + where: $ + transform: >- + return $. + replace(/"time"/, `"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"`). + replace(/ETag\s+\*string/g, `ETag *azcore.ETag`); + +- from: + - zz_appendblob_client.go + - zz_blob_client.go + - zz_blockblob_client.go + - zz_container_client.go + - zz_pageblob_client.go + where: $ + transform: >- + return $. + replace(/"github\.com\/Azure\/azure\-sdk\-for\-go\/sdk\/azcore\/policy"/, `"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"`). + replace(/result\.ETag\s+=\s+&val/g, `result.ETag = (*azcore.ETag)(&val)`). + replace(/\*modifiedAccessConditions.IfMatch/g, `string(*modifiedAccessConditions.IfMatch)`). + replace(/\*modifiedAccessConditions.IfNoneMatch/g, `string(*modifiedAccessConditions.IfNoneMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfMatch/g, `string(*sourceModifiedAccessConditions.SourceIfMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfNoneMatch/g, `string(*sourceModifiedAccessConditions.SourceIfNoneMatch)`); +``` + +### Unsure why this casing changed, but fixing it + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/SignedOid\s+\*string/g, `SignedOID *string`). + replace(/SignedTid\s+\*string/g, `SignedTID *string`); +``` + +### Fixing Typo with StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed + +``` yaml +directive: +- from: zz_constants.go + where: $ + transform: >- + return $. + replace(/IncrementalCopyOfEralierVersionSnapshotNotAllowed/g, "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"); +``` + +### Fix up x-ms-content-crc64 header response name + +``` yaml +directive: +- from: swagger-document + where: $.x-ms-paths.*.*.responses.*.headers.x-ms-content-crc64 + transform: > + $["x-ms-client-name"] = "ContentCRC64" +``` + +``` yaml +directive: +- rename-model: + from: BlobItemInternal + to: BlobItem +- rename-model: + from: BlobPropertiesInternal + to: BlobProperties +``` + +### Updating encoding URL, Golang adds '+' which disrupts encoding with service + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/req.Raw\(\).URL.RawQuery \= reqQP.Encode\(\)/, `req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)`) +``` + +### Change `where` parameter in blob filtering to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.FilterBlobsWhere + transform: > + $.required = true; +``` + +### Change `Duration` parameter in leases to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.LeaseDuration + transform: > + $.required = true; +``` + +### Change CPK acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cpk/g, "CPK"); +``` + +### Change CORS acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cors/g, "CORS"); +``` + +### Change cors xml to be correct + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/xml:"CORS>CORSRule"/g, "xml:\"Cors>CorsRule\""); +``` + +### Fix Content-Type header in submit batch request + +``` yaml +directive: +- from: + - zz_container_client.go + - zz_service_client.go + where: $ + transform: >- + return $. + replace (/req.SetBody\(body\,\s+\"application\/xml\"\)/g, `req.SetBody(body, multipartContentType)`); +``` + +### Fix response status code check in submit batch request + +``` yaml +directive: +- from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/if\s+!runtime\.HasStatusCode\(resp,\s+http\.StatusOK\)\s+\{\s*\n\t\treturn\s+ServiceClientSubmitBatchResponse\{\}\,\s+runtime\.NewResponseError\(resp\)\s*\n\t\}/g, + `if !runtime.HasStatusCode(resp, http.StatusAccepted) {\n\t\treturn ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp)\n\t}`); +``` + +### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers + +``` yaml +directive: +- from: + - zz_container_client.go + - zz_blob_client.go + - zz_appendblob_client.go + - zz_blockblob_client.go + - zz_pageblob_client.go + where: $ + transform: >- + return $. + replace (/req\.Raw\(\)\.Header\[\"If-Modified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"If-Unmodified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-modified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-unmodified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-immutability-policy-until-date\"\]\s+=\s+\[\]string\{options\.ImmutabilityPolicyExpiry\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}`); +``` + +### Change container prefix to filesystem +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/ServiceClientListContainersSegmentResponse/g, 'ServiceClientListFileSystemsSegmentResponse'). + replace(/ListContainersSegmentResponse/g, 'ListFileSystemsSegmentResponse'). + replace(/ContainerItem/g, 'FileSystemItem'). + replace(/PublicAccessTypeBlob/g, 'PublicAccessTypeFile'). + replace(/PublicAccessTypeContainer/g, 'PublicAccessTypeFileSystem'). + replace(/ContainerClientListBlobHierarchySegmentResponse/g, 'FileSystemClientListPathHierarchySegmentResponse'). + replace(/ListBlobsHierarchySegmentResponse/g, 'ListPathsHierarchySegmentResponse'). + replace(/(^|[^"])ContainerName/g, '$1FileSystemName'). + replace(/BlobHierarchyListSegment/g, 'PathHierarchyListSegment'). + replace(/BlobItems/g, 'PathItems'). + replace(/BlobItem/g, 'PathItem'). + replace(/BlobPrefix/g, 'PathPrefix'). + replace(/BlobPrefixes/g, 'PathPrefixes'). + replace(/BlobProperties/g, 'PathProperties'). + replace(/ContainerProperties/g, 'FileSystemProperties'); +``` diff --git a/sdk/storage/azdatalake/internal/generated_blob/blob_client.go b/sdk/storage/azdatalake/internal/generated_blob/blob_client.go new file mode 100644 index 000000000000..b5db4b2b68a5 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/blob_client.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated_blob + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "time" +) + +// used to convert times from UTC to GMT before sending across the wire +var gmt = time.FixedZone("GMT", 0) + +func (client *BlobClient) Endpoint() string { + return client.endpoint +} + +func (client *BlobClient) InternalClient() *azcore.Client { + return client.internal +} + +func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + return client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) +} + +func (client *BlobClient) SetTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + return client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) +} + +// NewBlobClient creates a new instance of BlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlobClient(endpoint string, azClient *azcore.Client) *BlobClient { + client := &BlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/build.go b/sdk/storage/azdatalake/internal/generated_blob/build.go new file mode 100644 index 000000000000..b1a3b508ab34 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated_blob diff --git a/sdk/storage/azdatalake/internal/generated_blob/container_client.go b/sdk/storage/azdatalake/internal/generated_blob/container_client.go new file mode 100644 index 000000000000..441eab0fcbe9 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/container_client.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated_blob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *ContainerClient) Endpoint() string { + return client.endpoint +} + +func (client *ContainerClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewContainerClient creates a new instance of ContainerClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewContainerClient(endpoint string, azClient *azcore.Client) *ContainerClient { + client := &ContainerClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/models.go b/sdk/storage/azdatalake/internal/generated_blob/models.go new file mode 100644 index 000000000000..630cd80fbc45 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/models.go @@ -0,0 +1,65 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated_blob + +type TransactionalContentSetter interface { + SetCRC64([]byte) + SetMD5([]byte) +} + +func (a *AppendBlobClientAppendBlockOptions) SetCRC64(v []byte) { + a.TransactionalContentCRC64 = v +} + +func (a *AppendBlobClientAppendBlockOptions) SetMD5(v []byte) { + a.TransactionalContentMD5 = v +} + +func (b *BlockBlobClientStageBlockOptions) SetCRC64(v []byte) { + b.TransactionalContentCRC64 = v +} + +func (b *BlockBlobClientStageBlockOptions) SetMD5(v []byte) { + b.TransactionalContentMD5 = v +} + +func (p *PageBlobClientUploadPagesOptions) SetCRC64(v []byte) { + p.TransactionalContentCRC64 = v +} + +func (p *PageBlobClientUploadPagesOptions) SetMD5(v []byte) { + p.TransactionalContentMD5 = v +} + +type SourceContentSetter interface { + SetSourceContentCRC64(v []byte) + SetSourceContentMD5(v []byte) +} + +func (a *AppendBlobClientAppendBlockFromURLOptions) SetSourceContentCRC64(v []byte) { + a.SourceContentcrc64 = v +} + +func (a *AppendBlobClientAppendBlockFromURLOptions) SetSourceContentMD5(v []byte) { + a.SourceContentMD5 = v +} + +func (b *BlockBlobClientStageBlockFromURLOptions) SetSourceContentCRC64(v []byte) { + b.SourceContentcrc64 = v +} + +func (b *BlockBlobClientStageBlockFromURLOptions) SetSourceContentMD5(v []byte) { + b.SourceContentMD5 = v +} + +func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentCRC64(v []byte) { + p.SourceContentcrc64 = v +} + +func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentMD5(v []byte) { + p.SourceContentMD5 = v +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/service_client.go b/sdk/storage/azdatalake/internal/generated_blob/service_client.go new file mode 100644 index 000000000000..05798c7efdca --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/service_client.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated_blob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *ServiceClient) Endpoint() string { + return client.endpoint +} + +func (client *ServiceClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewServiceClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewServiceClient(endpoint string, azClient *azcore.Client) *ServiceClient { + client := &ServiceClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_appendblob_client.go b/sdk/storage/azdatalake/internal/generated_blob/zz_appendblob_client.go new file mode 100644 index 000000000000..f58dba665ddf --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_appendblob_client.go @@ -0,0 +1,651 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// AppendBlobClient contains the methods for the AppendBlob group. +// Don't use this type directly, use a constructor function instead. +type AppendBlobClient struct { + internal *azcore.Client + endpoint string +} + +// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append +// Block operation is permitted only if the blob was created with x-ms-blob-type set to +// AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - contentLength - The length of the request. +// - body - Initial data +// - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { + req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return AppendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) + } + return client.appendBlockHandleResponse(resp) +} + +// appendBlockCreateRequest creates the AppendBlock request. +func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// appendBlockHandleResponse handles the AppendBlock response. +func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) { + result := AppendBlobClientAppendBlockResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where +// the contents are read from a source url. The Append Block operation is permitted only if the blob was +// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - sourceURL - Specify a URL to the copy source. +// - contentLength - The length of the request. +// - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { + req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return AppendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.appendBlockFromURLHandleResponse(resp) +} + +// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. +func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. +func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) { + result := AppendBlobClientAppendBlockFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// Create - The Create Append Blob operation creates a new append blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - contentLength - The length of the request. +// - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return AppendBlobClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *AppendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) { + result := AppendBlobClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version +// or later. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { + req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return AppendBlobClientSealResponse{}, runtime.NewResponseError(resp) + } + return client.sealHandleResponse(resp) +} + +// sealCreateRequest creates the Seal request. +func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "seal") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// sealHandleResponse handles the Seal response. +func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) { + result := AppendBlobClientSealResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.IsSealed = &isSealed + } + return result, nil +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_blob_client.go b/sdk/storage/azdatalake/internal/generated_blob/zz_blob_client.go new file mode 100644 index 000000000000..73bfa9187875 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_blob_client.go @@ -0,0 +1,2874 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/http" + "strconv" + "strings" + "time" +) + +// BlobClient contains the methods for the Blob group. +// Don't use this type directly, use a constructor function instead. +type BlobClient struct { + internal *azcore.Client + endpoint string +} + +// AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination +// blob with zero length and full metadata. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. +// - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { + req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) + if err != nil { + return BlobClientAbortCopyFromURLResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientAbortCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return BlobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.abortCopyFromURLHandleResponse(resp) +} + +// abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. +func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "copy") + reqQP.Set("copyid", copyID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-copy-action"] = []string{"abort"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// abortCopyFromURLHandleResponse handles the AbortCopyFromURL response. +func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (BlobClientAbortCopyFromURLResponse, error) { + result := BlobClientAbortCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAbortCopyFromURLResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.acquireLeaseHandleResponse(resp) +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) { + result := BlobClientAcquireLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return BlobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.breakLeaseHandleResponse(resp) +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) { + result := BlobClientBreakLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// - options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.changeLeaseHandleResponse(resp) +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) { + result := BlobClientChangeLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response +// until the copy is complete. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCopyFromURLResponse, error) { + req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return BlobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.copyFromURLHandleResponse(resp) +} + +// copyFromURLCreateRequest creates the CopyFromURL request. +func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-requires-sync"] = []string{"true"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// copyFromURLHandleResponse handles the CopyFromURL response. +func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { + result := BlobClientCopyFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + return result, nil +} + +// CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { + req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + } + return client.createSnapshotHandleResponse(resp) +} + +// createSnapshotCreateRequest creates the CreateSnapshot request. +func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "snapshot") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createSnapshotHandleResponse handles the CreateSnapshot response. +func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) { + result := BlobClientCreateSnapshotResponse{} + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// Delete - If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed +// from the storage account. If the storage account's soft delete feature is enabled, +// then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service +// retains the blob or snapshot for the number of days specified by the +// DeleteRetentionPolicy section of Storage service properties [Set-Blob-Service-Properties.md]. After the specified number +// of days has passed, the blob's data is permanently removed from the storage +// account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use +// the List Blobs API and specify the "include=deleted" query parameter to discover +// which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. +// All other operations on a soft-deleted blob or snapshot causes the service to +// return an HTTP status code of 404 (ResourceNotFound). +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlobClientDeleteResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return BlobClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.DeleteType != nil { + reqQP.Set("deletetype", string(*options.DeleteType)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.DeleteSnapshots != nil { + req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientDeleteResponse, error) { + result := BlobClientDeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { + req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) + if err != nil { + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.deleteImmutabilityPolicyHandleResponse(resp) +} + +// deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. +func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "immutabilityPolicies") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteImmutabilityPolicyHandleResponse handles the DeleteImmutabilityPolicy response. +func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientDeleteImmutabilityPolicyResponse, error) { + result := BlobClientDeleteImmutabilityPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You +// can also call Download to read a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { + req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return BlobClientDownloadResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientDownloadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { + return BlobClientDownloadResponse{}, runtime.NewResponseError(resp) + } + return client.downloadHandleResponse(resp) +} + +// downloadCreateRequest creates the Download request. +func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.RangeGetContentMD5 != nil { + req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} + } + if options != nil && options.RangeGetContentCRC64 != nil { + req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// downloadHandleResponse handles the Download response. +func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) { + result := BlobClientDownloadResponse{Body: resp.Body} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.LastModified = &lastModified + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsCurrentVersion = &isCurrentVersion + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.BlobContentMD5 = blobContentMD5 + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.LastAccessed = &lastAccessed + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.LegalHold = &legalHold + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-error-code"); val != "" { + result.ErrorCode = &val + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getAccountInfoHandleResponse(resp) +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, options *BlobClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) { + result := BlobClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + return result, nil +} + +// GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties +// for the blob. It does not return the content of the blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) { + result := BlobClientGetPropertiesResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.CreationTime = &creationTime + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { + isIncrementalCopy, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsIncrementalCopy = &isIncrementalCopy + } + if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { + result.DestinationSnapshot = &val + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { + accessTierInferred, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.AccessTierInferred = &accessTierInferred + } + if val := resp.Header.Get("x-ms-archive-status"); val != "" { + result.ArchiveStatus = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.AccessTierChangeTime = &accessTierChangeTime + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsCurrentVersion = &isCurrentVersion + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-expiry-time"); val != "" { + expiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ExpiresOn = &expiresOn + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { + result.RehydratePriority = &val + } + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.LastAccessed = &lastAccessed + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.LegalHold = &legalHold + } + return result, nil +} + +// GetTags - The Get Tags operation enables users to get the tags associated with a blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { + req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientGetTagsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientGetTagsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientGetTagsResponse{}, runtime.NewResponseError(resp) + } + return client.getTagsHandleResponse(resp) +} + +// getTagsCreateRequest creates the GetTags request. +func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tags") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getTagsHandleResponse handles the GetTags response. +func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClientGetTagsResponse, error) { + result := BlobClientGetTagsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetTagsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { + return BlobClientGetTagsResponse{}, err + } + return result, nil +} + +// Query - The Query operation enables users to select/project on blob data by providing simple query expressions. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { + req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return BlobClientQueryResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientQueryResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { + return BlobClientQueryResponse{}, runtime.NewResponseError(resp) + } + return client.queryHandleResponse(resp) +} + +// queryCreateRequest creates the Query request. +func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "query") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.QueryRequest != nil { + if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil { + return nil, err + } + return req, nil + } + return req, nil +} + +// queryHandleResponse handles the Query response. +func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) { + result := BlobClientQueryResponse{Body: resp.Body} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.LastModified = &lastModified + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.BlobContentMD5 = blobContentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + return result, nil +} + +// ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseLeaseHandleResponse(resp) +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) { + result := BlobClientReleaseLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.renewLeaseHandleResponse(resp) +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) { + result := BlobClientRenewLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetExpiry - Sets the time a blob will expire and be deleted. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - expiryOptions - Required. Indicates mode of the expiry time +// - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { + req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientSetExpiryResponse{}, runtime.NewResponseError(resp) + } + return client.setExpiryHandleResponse(resp) +} + +// setExpiryCreateRequest creates the SetExpiry request. +func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "expiry") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-expiry-option"] = []string{string(expiryOptions)} + if options != nil && options.ExpiresOn != nil { + req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setExpiryHandleResponse handles the SetExpiry response. +func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) { + result := BlobClientSetExpiryResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { + req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + } + return client.setHTTPHeadersHandleResponse(resp) +} + +// setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. +func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. +func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) { + result := BlobClientSetHTTPHeadersResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { + req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.setImmutabilityPolicyHandleResponse(resp) +} + +// setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. +func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "immutabilityPolicies") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setImmutabilityPolicyHandleResponse handles the SetImmutabilityPolicy response. +func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientSetImmutabilityPolicyResponse, error) { + result := BlobClientSetImmutabilityPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiry, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + result.ImmutabilityPolicyExpiry = &immutabilityPolicyExpiry + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + return result, nil +} + +// SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - legalHold - Specified if a legal hold should be set on the blob. +// - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { + req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) + } + return client.setLegalHoldHandleResponse(resp) +} + +// setLegalHoldCreateRequest creates the SetLegalHold request. +func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "legalhold") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(legalHold)} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setLegalHoldHandleResponse handles the SetLegalHold response. +func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobClientSetLegalHoldResponse, error) { + result := BlobClientSetLegalHoldResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + result.LegalHold = &legalHold + } + return result, nil +} + +// SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value +// pairs +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) { + result := BlobClientSetMetadataResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// SetTags - The Set Tags operation enables users to set tags on a blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - tags - Blob tags +// - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { + req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientSetTagsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetTagsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return BlobClientSetTagsResponse{}, runtime.NewResponseError(resp) + } + return client.setTagsHandleResponse(resp) +} + +// setTagsCreateRequest creates the SetTags request. +func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tags") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := runtime.MarshalAsXML(req, tags); err != nil { + return nil, err + } + return req, nil +} + +// setTagsHandleResponse handles the SetTags response. +func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClientSetTagsResponse, error) { + result := BlobClientSetTagsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetTagsResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetTier - The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage +// account and on a block blob in a blob storage account (locally redundant storage only). A +// premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive +// storage type. This operation does not update the blob's ETag. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - tier - Indicates the tier to be set on the blob. +// - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { + req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlobClientSetTierResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetTierResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return BlobClientSetTierResponse{}, runtime.NewResponseError(resp) + } + return client.setTierHandleResponse(resp) +} + +// setTierCreateRequest creates the SetTier request. +func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tier") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-access-tier"] = []string{string(tier)} + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setTierHandleResponse handles the SetTier response. +func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClientSetTierResponse, error) { + result := BlobClientSetTierResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { + req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return BlobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.startCopyFromURLHandleResponse(resp) +} + +// startCopyFromURLCreateRequest creates the StartCopyFromURL request. +func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.SealBlob != nil { + req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// startCopyFromURLHandleResponse handles the StartCopyFromURL response. +func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) { + result := BlobClientStartCopyFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + return result, nil +} + +// Undelete - Undelete a blob that was previously soft deleted +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { + req, err := client.undeleteCreateRequest(ctx, options) + if err != nil { + return BlobClientUndeleteResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientUndeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientUndeleteResponse{}, runtime.NewResponseError(resp) + } + return client.undeleteHandleResponse(resp) +} + +// undeleteCreateRequest creates the Undelete request. +func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *BlobClientUndeleteOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// undeleteHandleResponse handles the Undelete response. +func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClientUndeleteResponse, error) { + result := BlobClientUndeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientUndeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_blockblob_client.go b/sdk/storage/azdatalake/internal/generated_blob/zz_blockblob_client.go new file mode 100644 index 000000000000..09b4362dbb6d --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_blockblob_client.go @@ -0,0 +1,970 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// BlockBlobClient contains the methods for the BlockBlob group. +// Don't use this type directly, use a constructor function instead. +type BlockBlobClient struct { + internal *azcore.Client + endpoint string +} + +// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written to the +// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that +// have changed, then committing the new and existing blocks together. You can do +// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit +// the most recently uploaded version of the block, whichever list it may +// belong to. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - blocks - Blob Blocks. +// - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList +// method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { + req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) + } + return client.commitBlockListHandleResponse(resp) +} + +// commitBlockListCreateRequest creates the CommitBlockList request. +func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := runtime.MarshalAsXML(req, blocks); err != nil { + return nil, err + } + return req, nil +} + +// commitBlockListHandleResponse handles the CommitBlockList response. +func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) { + result := BlockBlobClientCommitBlockListResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. +// - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { + req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) + } + return client.getBlockListHandleResponse(resp) +} + +// getBlockListCreateRequest creates the GetBlockList request. +func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + reqQP.Set("blocklisttype", string(listType)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getBlockListHandleResponse handles the GetBlockList response. +func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) { + result := BlockBlobClientGetBlockListResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + return result, nil +} + +// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not +// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform +// partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - contentLength - The length of the request. +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL +// method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { + req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.putBlobFromURLHandleResponse(resp) +} + +// putBlobFromURLCreateRequest creates the PutBlobFromURL request. +func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourcePathProperties != nil { + req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourcePathProperties)} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// putBlobFromURLHandleResponse handles the PutBlobFromURL response. +func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) { + result := BlockBlobClientPutBlobFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// - contentLength - The length of the request. +// - body - Initial data +// - options - BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (BlockBlobClientStageBlockResponse, error) { + req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) + } + return client.stageBlockHandleResponse(resp) +} + +// stageBlockCreateRequest creates the StageBlock request. +func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// stageBlockHandleResponse handles the StageBlock response. +func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) { + result := BlockBlobClientStageBlockResponse{} + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents +// are read from a URL. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// - contentLength - The length of the request. +// - sourceURL - Specify a URL to the copy source. +// - options - BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { + req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.stageBlockFromURLHandleResponse(resp) +} + +// stageBlockFromURLCreateRequest creates the StageBlockFromURL request. +func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// stageBlockFromURLHandleResponse handles the StageBlockFromURL response. +func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) { + result := BlockBlobClientStageBlockFromURLResponse{} + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob +// overwrites any existing metadata on the blob. Partial updates are not supported with Put +// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of +// the content of a block blob, use the Put Block List operation. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - contentLength - The length of the request. +// - body - Initial data +// - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { + req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientUploadResponse{}, runtime.NewResponseError(resp) + } + return client.uploadHandleResponse(resp) +} + +// uploadCreateRequest creates the Upload request. +func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// uploadHandleResponse handles the Upload response. +func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) { + result := BlockBlobClientUploadResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_constants.go b/sdk/storage/azdatalake/internal/generated_blob/zz_constants.go new file mode 100644 index 000000000000..aba25231b855 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_constants.go @@ -0,0 +1,714 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +type AccessTier string + +const ( + AccessTierArchive AccessTier = "Archive" + AccessTierCool AccessTier = "Cool" + AccessTierHot AccessTier = "Hot" + AccessTierP10 AccessTier = "P10" + AccessTierP15 AccessTier = "P15" + AccessTierP20 AccessTier = "P20" + AccessTierP30 AccessTier = "P30" + AccessTierP4 AccessTier = "P4" + AccessTierP40 AccessTier = "P40" + AccessTierP50 AccessTier = "P50" + AccessTierP6 AccessTier = "P6" + AccessTierP60 AccessTier = "P60" + AccessTierP70 AccessTier = "P70" + AccessTierP80 AccessTier = "P80" + AccessTierPremium AccessTier = "Premium" +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return []AccessTier{ + AccessTierArchive, + AccessTierCool, + AccessTierHot, + AccessTierP10, + AccessTierP15, + AccessTierP20, + AccessTierP30, + AccessTierP4, + AccessTierP40, + AccessTierP50, + AccessTierP6, + AccessTierP60, + AccessTierP70, + AccessTierP80, + AccessTierPremium, + } +} + +type AccountKind string + +const ( + AccountKindBlobStorage AccountKind = "BlobStorage" + AccountKindBlockBlobStorage AccountKind = "BlockBlobStorage" + AccountKindFileStorage AccountKind = "FileStorage" + AccountKindStorage AccountKind = "Storage" + AccountKindStorageV2 AccountKind = "StorageV2" +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return []AccountKind{ + AccountKindBlobStorage, + AccountKindBlockBlobStorage, + AccountKindFileStorage, + AccountKindStorage, + AccountKindStorageV2, + } +} + +type ArchiveStatus string + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool" + ArchiveStatusRehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot" +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return []ArchiveStatus{ + ArchiveStatusRehydratePendingToCool, + ArchiveStatusRehydratePendingToHot, + } +} + +// BlobGeoReplicationStatus - The status of the secondary location +type BlobGeoReplicationStatus string + +const ( + BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap" + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" + BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable" +) + +// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. +func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { + return []BlobGeoReplicationStatus{ + BlobGeoReplicationStatusBootstrap, + BlobGeoReplicationStatusLive, + BlobGeoReplicationStatusUnavailable, + } +} + +type BlobType string + +const ( + BlobTypeAppendBlob BlobType = "AppendBlob" + BlobTypeBlockBlob BlobType = "BlockBlob" + BlobTypePageBlob BlobType = "PageBlob" +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return []BlobType{ + BlobTypeAppendBlob, + BlobTypeBlockBlob, + BlobTypePageBlob, + } +} + +type BlockListType string + +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return []BlockListType{ + BlockListTypeAll, + BlockListTypeCommitted, + BlockListTypeUncommitted, + } +} + +type CopyStatusType string + +const ( + CopyStatusTypeAborted CopyStatusType = "aborted" + CopyStatusTypeFailed CopyStatusType = "failed" + CopyStatusTypePending CopyStatusType = "pending" + CopyStatusTypeSuccess CopyStatusType = "success" +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return []CopyStatusType{ + CopyStatusTypeAborted, + CopyStatusTypeFailed, + CopyStatusTypePending, + CopyStatusTypeSuccess, + } +} + +type DeleteSnapshotsOptionType string + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = "include" + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = "only" +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return []DeleteSnapshotsOptionType{ + DeleteSnapshotsOptionTypeInclude, + DeleteSnapshotsOptionTypeOnly, + } +} + +type DeleteType string + +const ( + DeleteTypeNone DeleteType = "None" + DeleteTypePermanent DeleteType = "Permanent" +) + +// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. +func PossibleDeleteTypeValues() []DeleteType { + return []DeleteType{ + DeleteTypeNone, + DeleteTypePermanent, + } +} + +type EncryptionAlgorithmType string + +const ( + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256" + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" +) + +// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return []EncryptionAlgorithmType{ + EncryptionAlgorithmTypeAES256, + EncryptionAlgorithmTypeNone, + } +} + +type ExpiryOptions string + +const ( + ExpiryOptionsAbsolute ExpiryOptions = "Absolute" + ExpiryOptionsNeverExpire ExpiryOptions = "NeverExpire" + ExpiryOptionsRelativeToCreation ExpiryOptions = "RelativeToCreation" + ExpiryOptionsRelativeToNow ExpiryOptions = "RelativeToNow" +) + +// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type. +func PossibleExpiryOptionsValues() []ExpiryOptions { + return []ExpiryOptions{ + ExpiryOptionsAbsolute, + ExpiryOptionsNeverExpire, + ExpiryOptionsRelativeToCreation, + ExpiryOptionsRelativeToNow, + } +} + +type ImmutabilityPolicyMode string + +const ( + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable" + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked" +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return []ImmutabilityPolicyMode{ + ImmutabilityPolicyModeLocked, + ImmutabilityPolicyModeMutable, + ImmutabilityPolicyModeUnlocked, + } +} + +type ImmutabilityPolicySetting string + +const ( + ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked" + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" +) + +// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. +func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { + return []ImmutabilityPolicySetting{ + ImmutabilityPolicySettingLocked, + ImmutabilityPolicySettingUnlocked, + } +} + +type LeaseDurationType string + +const ( + LeaseDurationTypeFixed LeaseDurationType = "fixed" + LeaseDurationTypeInfinite LeaseDurationType = "infinite" +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return []LeaseDurationType{ + LeaseDurationTypeFixed, + LeaseDurationTypeInfinite, + } +} + +type LeaseStateType string + +const ( + LeaseStateTypeAvailable LeaseStateType = "available" + LeaseStateTypeBreaking LeaseStateType = "breaking" + LeaseStateTypeBroken LeaseStateType = "broken" + LeaseStateTypeExpired LeaseStateType = "expired" + LeaseStateTypeLeased LeaseStateType = "leased" +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return []LeaseStateType{ + LeaseStateTypeAvailable, + LeaseStateTypeBreaking, + LeaseStateTypeBroken, + LeaseStateTypeExpired, + LeaseStateTypeLeased, + } +} + +type LeaseStatusType string + +const ( + LeaseStatusTypeLocked LeaseStatusType = "locked" + LeaseStatusTypeUnlocked LeaseStatusType = "unlocked" +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return []LeaseStatusType{ + LeaseStatusTypeLocked, + LeaseStatusTypeUnlocked, + } +} + +type ListBlobsIncludeItem string + +const ( + ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy" + ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted" + ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" + ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" + ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" + ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" + ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" + ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" + ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" + ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" +) + +// PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type. +func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { + return []ListBlobsIncludeItem{ + ListBlobsIncludeItemCopy, + ListBlobsIncludeItemDeleted, + ListBlobsIncludeItemDeletedwithversions, + ListBlobsIncludeItemImmutabilitypolicy, + ListBlobsIncludeItemLegalhold, + ListBlobsIncludeItemMetadata, + ListBlobsIncludeItemSnapshots, + ListBlobsIncludeItemTags, + ListBlobsIncludeItemUncommittedblobs, + ListBlobsIncludeItemVersions, + } +} + +type ListContainersIncludeType string + +const ( + ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted" + ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" + ListContainersIncludeTypeSystem ListContainersIncludeType = "system" +) + +// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return []ListContainersIncludeType{ + ListContainersIncludeTypeDeleted, + ListContainersIncludeTypeMetadata, + ListContainersIncludeTypeSystem, + } +} + +type PremiumPageBlobAccessTier string + +const ( + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = "P10" + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = "P15" + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = "P20" + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = "P30" + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = "P4" + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = "P40" + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = "P50" + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = "P6" + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = "P60" + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = "P70" + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = "P80" +) + +// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type. +func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { + return []PremiumPageBlobAccessTier{ + PremiumPageBlobAccessTierP10, + PremiumPageBlobAccessTierP15, + PremiumPageBlobAccessTierP20, + PremiumPageBlobAccessTierP30, + PremiumPageBlobAccessTierP4, + PremiumPageBlobAccessTierP40, + PremiumPageBlobAccessTierP50, + PremiumPageBlobAccessTierP6, + PremiumPageBlobAccessTierP60, + PremiumPageBlobAccessTierP70, + PremiumPageBlobAccessTierP80, + } +} + +type PublicAccessType string + +const ( + PublicAccessTypeFile PublicAccessType = "blob" + PublicAccessTypeFileSystem PublicAccessType = "container" +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return []PublicAccessType{ + PublicAccessTypeFile, + PublicAccessTypeFileSystem, + } +} + +// QueryFormatType - The quick query format type. +type QueryFormatType string + +const ( + QueryFormatTypeArrow QueryFormatType = "arrow" + QueryFormatTypeDelimited QueryFormatType = "delimited" + QueryFormatTypeJSON QueryFormatType = "json" + QueryFormatTypeParquet QueryFormatType = "parquet" +) + +// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return []QueryFormatType{ + QueryFormatTypeArrow, + QueryFormatTypeDelimited, + QueryFormatTypeJSON, + QueryFormatTypeParquet, + } +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority string + +const ( + RehydratePriorityHigh RehydratePriority = "High" + RehydratePriorityStandard RehydratePriority = "Standard" +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return []RehydratePriority{ + RehydratePriorityHigh, + RehydratePriorityStandard, + } +} + +type SKUName string + +const ( + SKUNamePremiumLRS SKUName = "Premium_LRS" + SKUNameStandardGRS SKUName = "Standard_GRS" + SKUNameStandardLRS SKUName = "Standard_LRS" + SKUNameStandardRAGRS SKUName = "Standard_RAGRS" + SKUNameStandardZRS SKUName = "Standard_ZRS" +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return []SKUName{ + SKUNamePremiumLRS, + SKUNameStandardGRS, + SKUNameStandardLRS, + SKUNameStandardRAGRS, + SKUNameStandardZRS, + } +} + +type SequenceNumberActionType string + +const ( + SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" + SequenceNumberActionTypeMax SequenceNumberActionType = "max" + SequenceNumberActionTypeUpdate SequenceNumberActionType = "update" +) + +// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return []SequenceNumberActionType{ + SequenceNumberActionTypeIncrement, + SequenceNumberActionTypeMax, + SequenceNumberActionTypeUpdate, + } +} + +// StorageErrorCode - Error codes returned by the service +type StorageErrorCode string + +const ( + StorageErrorCodeAccountAlreadyExists StorageErrorCode = "AccountAlreadyExists" + StorageErrorCodeAccountBeingCreated StorageErrorCode = "AccountBeingCreated" + StorageErrorCodeAccountIsDisabled StorageErrorCode = "AccountIsDisabled" + StorageErrorCodeAppendPositionConditionNotMet StorageErrorCode = "AppendPositionConditionNotMet" + StorageErrorCodeAuthenticationFailed StorageErrorCode = "AuthenticationFailed" + StorageErrorCodeAuthorizationFailure StorageErrorCode = "AuthorizationFailure" + StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCode = "AuthorizationPermissionMismatch" + StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCode = "AuthorizationProtocolMismatch" + StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch" + StorageErrorCodeAuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch" + StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch" + StorageErrorCodeBlobAlreadyExists StorageErrorCode = "BlobAlreadyExists" + StorageErrorCodeBlobArchived StorageErrorCode = "BlobArchived" + StorageErrorCodeBlobBeingRehydrated StorageErrorCode = "BlobBeingRehydrated" + StorageErrorCodeBlobImmutableDueToPolicy StorageErrorCode = "BlobImmutableDueToPolicy" + StorageErrorCodeBlobNotArchived StorageErrorCode = "BlobNotArchived" + StorageErrorCodeBlobNotFound StorageErrorCode = "BlobNotFound" + StorageErrorCodeBlobOverwritten StorageErrorCode = "BlobOverwritten" + StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCode = "BlobTierInadequateForContentLength" + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption StorageErrorCode = "BlobUsesCustomerSpecifiedEncryption" + StorageErrorCodeBlockCountExceedsLimit StorageErrorCode = "BlockCountExceedsLimit" + StorageErrorCodeBlockListTooLong StorageErrorCode = "BlockListTooLong" + StorageErrorCodeCannotChangeToLowerTier StorageErrorCode = "CannotChangeToLowerTier" + StorageErrorCodeCannotVerifyCopySource StorageErrorCode = "CannotVerifyCopySource" + StorageErrorCodeConditionHeadersNotSupported StorageErrorCode = "ConditionHeadersNotSupported" + StorageErrorCodeConditionNotMet StorageErrorCode = "ConditionNotMet" + StorageErrorCodeContainerAlreadyExists StorageErrorCode = "ContainerAlreadyExists" + StorageErrorCodeContainerBeingDeleted StorageErrorCode = "ContainerBeingDeleted" + StorageErrorCodeContainerDisabled StorageErrorCode = "ContainerDisabled" + StorageErrorCodeContainerNotFound StorageErrorCode = "ContainerNotFound" + StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCode = "ContentLengthLargerThanTierLimit" + StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCode = "CopyAcrossAccountsNotSupported" + StorageErrorCodeCopyIDMismatch StorageErrorCode = "CopyIdMismatch" + StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" + StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" + StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCode = "IncrementalCopyBlobMismatch" + StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" + StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCode = "IncrementalCopySourceMustBeSnapshot" + StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCode = "InfiniteLeaseDurationRequired" + StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" + StorageErrorCodeInternalError StorageErrorCode = "InternalError" + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCode = "InvalidAuthenticationInfo" + StorageErrorCodeInvalidBlobOrBlock StorageErrorCode = "InvalidBlobOrBlock" + StorageErrorCodeInvalidBlobTier StorageErrorCode = "InvalidBlobTier" + StorageErrorCodeInvalidBlobType StorageErrorCode = "InvalidBlobType" + StorageErrorCodeInvalidBlockID StorageErrorCode = "InvalidBlockId" + StorageErrorCodeInvalidBlockList StorageErrorCode = "InvalidBlockList" + StorageErrorCodeInvalidHTTPVerb StorageErrorCode = "InvalidHttpVerb" + StorageErrorCodeInvalidHeaderValue StorageErrorCode = "InvalidHeaderValue" + StorageErrorCodeInvalidInput StorageErrorCode = "InvalidInput" + StorageErrorCodeInvalidMD5 StorageErrorCode = "InvalidMd5" + StorageErrorCodeInvalidMetadata StorageErrorCode = "InvalidMetadata" + StorageErrorCodeInvalidOperation StorageErrorCode = "InvalidOperation" + StorageErrorCodeInvalidPageRange StorageErrorCode = "InvalidPageRange" + StorageErrorCodeInvalidQueryParameterValue StorageErrorCode = "InvalidQueryParameterValue" + StorageErrorCodeInvalidRange StorageErrorCode = "InvalidRange" + StorageErrorCodeInvalidResourceName StorageErrorCode = "InvalidResourceName" + StorageErrorCodeInvalidSourceBlobType StorageErrorCode = "InvalidSourceBlobType" + StorageErrorCodeInvalidSourceBlobURL StorageErrorCode = "InvalidSourceBlobUrl" + StorageErrorCodeInvalidURI StorageErrorCode = "InvalidUri" + StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCode = "InvalidVersionForPageBlobOperation" + StorageErrorCodeInvalidXMLDocument StorageErrorCode = "InvalidXmlDocument" + StorageErrorCodeInvalidXMLNodeValue StorageErrorCode = "InvalidXmlNodeValue" + StorageErrorCodeLeaseAlreadyBroken StorageErrorCode = "LeaseAlreadyBroken" + StorageErrorCodeLeaseAlreadyPresent StorageErrorCode = "LeaseAlreadyPresent" + StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCode = "LeaseIdMismatchWithBlobOperation" + StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCode = "LeaseIdMismatchWithContainerOperation" + StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCode = "LeaseIdMismatchWithLeaseOperation" + StorageErrorCodeLeaseIDMissing StorageErrorCode = "LeaseIdMissing" + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCode = "LeaseIsBreakingAndCannotBeAcquired" + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCode = "LeaseIsBreakingAndCannotBeChanged" + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCode = "LeaseIsBrokenAndCannotBeRenewed" + StorageErrorCodeLeaseLost StorageErrorCode = "LeaseLost" + StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCode = "LeaseNotPresentWithBlobOperation" + StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCode = "LeaseNotPresentWithContainerOperation" + StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCode = "LeaseNotPresentWithLeaseOperation" + StorageErrorCodeMD5Mismatch StorageErrorCode = "Md5Mismatch" + StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCode = "MaxBlobSizeConditionNotMet" + StorageErrorCodeMetadataTooLarge StorageErrorCode = "MetadataTooLarge" + StorageErrorCodeMissingContentLengthHeader StorageErrorCode = "MissingContentLengthHeader" + StorageErrorCodeMissingRequiredHeader StorageErrorCode = "MissingRequiredHeader" + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCode = "MissingRequiredQueryParameter" + StorageErrorCodeMissingRequiredXMLNode StorageErrorCode = "MissingRequiredXmlNode" + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCode = "MultipleConditionHeadersNotSupported" + StorageErrorCodeNoAuthenticationInformation StorageErrorCode = "NoAuthenticationInformation" + StorageErrorCodeNoPendingCopyOperation StorageErrorCode = "NoPendingCopyOperation" + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCode = "OperationNotAllowedOnIncrementalCopyBlob" + StorageErrorCodeOperationTimedOut StorageErrorCode = "OperationTimedOut" + StorageErrorCodeOutOfRangeInput StorageErrorCode = "OutOfRangeInput" + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCode = "OutOfRangeQueryParameterValue" + StorageErrorCodePendingCopyOperation StorageErrorCode = "PendingCopyOperation" + StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCode = "PreviousSnapshotCannotBeNewer" + StorageErrorCodePreviousSnapshotNotFound StorageErrorCode = "PreviousSnapshotNotFound" + StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCode = "PreviousSnapshotOperationNotSupported" + StorageErrorCodeRequestBodyTooLarge StorageErrorCode = "RequestBodyTooLarge" + StorageErrorCodeRequestURLFailedToParse StorageErrorCode = "RequestUrlFailedToParse" + StorageErrorCodeResourceAlreadyExists StorageErrorCode = "ResourceAlreadyExists" + StorageErrorCodeResourceNotFound StorageErrorCode = "ResourceNotFound" + StorageErrorCodeResourceTypeMismatch StorageErrorCode = "ResourceTypeMismatch" + StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCode = "SequenceNumberConditionNotMet" + StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCode = "SequenceNumberIncrementTooLarge" + StorageErrorCodeServerBusy StorageErrorCode = "ServerBusy" + StorageErrorCodeSnapshotCountExceeded StorageErrorCode = "SnapshotCountExceeded" + StorageErrorCodeSnapshotOperationRateExceeded StorageErrorCode = "SnapshotOperationRateExceeded" + StorageErrorCodeSnapshotsPresent StorageErrorCode = "SnapshotsPresent" + StorageErrorCodeSourceConditionNotMet StorageErrorCode = "SourceConditionNotMet" + StorageErrorCodeSystemInUse StorageErrorCode = "SystemInUse" + StorageErrorCodeTargetConditionNotMet StorageErrorCode = "TargetConditionNotMet" + StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCode = "UnauthorizedBlobOverwrite" + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCode = "UnsupportedHttpVerb" + StorageErrorCodeUnsupportedHeader StorageErrorCode = "UnsupportedHeader" + StorageErrorCodeUnsupportedQueryParameter StorageErrorCode = "UnsupportedQueryParameter" + StorageErrorCodeUnsupportedXMLNode StorageErrorCode = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeValues returns the possible values for the StorageErrorCode const type. +func PossibleStorageErrorCodeValues() []StorageErrorCode { + return []StorageErrorCode{ + StorageErrorCodeAccountAlreadyExists, + StorageErrorCodeAccountBeingCreated, + StorageErrorCodeAccountIsDisabled, + StorageErrorCodeAppendPositionConditionNotMet, + StorageErrorCodeAuthenticationFailed, + StorageErrorCodeAuthorizationFailure, + StorageErrorCodeAuthorizationPermissionMismatch, + StorageErrorCodeAuthorizationProtocolMismatch, + StorageErrorCodeAuthorizationResourceTypeMismatch, + StorageErrorCodeAuthorizationServiceMismatch, + StorageErrorCodeAuthorizationSourceIPMismatch, + StorageErrorCodeBlobAlreadyExists, + StorageErrorCodeBlobArchived, + StorageErrorCodeBlobBeingRehydrated, + StorageErrorCodeBlobImmutableDueToPolicy, + StorageErrorCodeBlobNotArchived, + StorageErrorCodeBlobNotFound, + StorageErrorCodeBlobOverwritten, + StorageErrorCodeBlobTierInadequateForContentLength, + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption, + StorageErrorCodeBlockCountExceedsLimit, + StorageErrorCodeBlockListTooLong, + StorageErrorCodeCannotChangeToLowerTier, + StorageErrorCodeCannotVerifyCopySource, + StorageErrorCodeConditionHeadersNotSupported, + StorageErrorCodeConditionNotMet, + StorageErrorCodeContainerAlreadyExists, + StorageErrorCodeContainerBeingDeleted, + StorageErrorCodeContainerDisabled, + StorageErrorCodeContainerNotFound, + StorageErrorCodeContentLengthLargerThanTierLimit, + StorageErrorCodeCopyAcrossAccountsNotSupported, + StorageErrorCodeCopyIDMismatch, + StorageErrorCodeEmptyMetadataKey, + StorageErrorCodeFeatureVersionMismatch, + StorageErrorCodeIncrementalCopyBlobMismatch, + StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed, + StorageErrorCodeIncrementalCopySourceMustBeSnapshot, + StorageErrorCodeInfiniteLeaseDurationRequired, + StorageErrorCodeInsufficientAccountPermissions, + StorageErrorCodeInternalError, + StorageErrorCodeInvalidAuthenticationInfo, + StorageErrorCodeInvalidBlobOrBlock, + StorageErrorCodeInvalidBlobTier, + StorageErrorCodeInvalidBlobType, + StorageErrorCodeInvalidBlockID, + StorageErrorCodeInvalidBlockList, + StorageErrorCodeInvalidHTTPVerb, + StorageErrorCodeInvalidHeaderValue, + StorageErrorCodeInvalidInput, + StorageErrorCodeInvalidMD5, + StorageErrorCodeInvalidMetadata, + StorageErrorCodeInvalidOperation, + StorageErrorCodeInvalidPageRange, + StorageErrorCodeInvalidQueryParameterValue, + StorageErrorCodeInvalidRange, + StorageErrorCodeInvalidResourceName, + StorageErrorCodeInvalidSourceBlobType, + StorageErrorCodeInvalidSourceBlobURL, + StorageErrorCodeInvalidURI, + StorageErrorCodeInvalidVersionForPageBlobOperation, + StorageErrorCodeInvalidXMLDocument, + StorageErrorCodeInvalidXMLNodeValue, + StorageErrorCodeLeaseAlreadyBroken, + StorageErrorCodeLeaseAlreadyPresent, + StorageErrorCodeLeaseIDMismatchWithBlobOperation, + StorageErrorCodeLeaseIDMismatchWithContainerOperation, + StorageErrorCodeLeaseIDMismatchWithLeaseOperation, + StorageErrorCodeLeaseIDMissing, + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, + StorageErrorCodeLeaseLost, + StorageErrorCodeLeaseNotPresentWithBlobOperation, + StorageErrorCodeLeaseNotPresentWithContainerOperation, + StorageErrorCodeLeaseNotPresentWithLeaseOperation, + StorageErrorCodeMD5Mismatch, + StorageErrorCodeMaxBlobSizeConditionNotMet, + StorageErrorCodeMetadataTooLarge, + StorageErrorCodeMissingContentLengthHeader, + StorageErrorCodeMissingRequiredHeader, + StorageErrorCodeMissingRequiredQueryParameter, + StorageErrorCodeMissingRequiredXMLNode, + StorageErrorCodeMultipleConditionHeadersNotSupported, + StorageErrorCodeNoAuthenticationInformation, + StorageErrorCodeNoPendingCopyOperation, + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, + StorageErrorCodeOperationTimedOut, + StorageErrorCodeOutOfRangeInput, + StorageErrorCodeOutOfRangeQueryParameterValue, + StorageErrorCodePendingCopyOperation, + StorageErrorCodePreviousSnapshotCannotBeNewer, + StorageErrorCodePreviousSnapshotNotFound, + StorageErrorCodePreviousSnapshotOperationNotSupported, + StorageErrorCodeRequestBodyTooLarge, + StorageErrorCodeRequestURLFailedToParse, + StorageErrorCodeResourceAlreadyExists, + StorageErrorCodeResourceNotFound, + StorageErrorCodeResourceTypeMismatch, + StorageErrorCodeSequenceNumberConditionNotMet, + StorageErrorCodeSequenceNumberIncrementTooLarge, + StorageErrorCodeServerBusy, + StorageErrorCodeSnapshotCountExceeded, + StorageErrorCodeSnapshotOperationRateExceeded, + StorageErrorCodeSnapshotsPresent, + StorageErrorCodeSourceConditionNotMet, + StorageErrorCodeSystemInUse, + StorageErrorCodeTargetConditionNotMet, + StorageErrorCodeUnauthorizedBlobOverwrite, + StorageErrorCodeUnsupportedHTTPVerb, + StorageErrorCodeUnsupportedHeader, + StorageErrorCodeUnsupportedQueryParameter, + StorageErrorCodeUnsupportedXMLNode, + } +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_container_client.go b/sdk/storage/azdatalake/internal/generated_blob/zz_container_client.go new file mode 100644 index 000000000000..3f34ec1a47c4 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_container_client.go @@ -0,0 +1,1473 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +import ( + "context" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// ContainerClient contains the methods for the Container group. +// Don't use this type directly, use a constructor function instead. +type ContainerClient struct { + internal *azcore.Client + endpoint string +} + +// AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ContainerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.acquireLeaseHandleResponse(resp) +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) { + result := ContainerClientAcquireLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ContainerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.breakLeaseHandleResponse(resp) +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) { + result := ContainerClientBreakLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// - options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.changeLeaseHandleResponse(resp) +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) { + result := ContainerClientChangeLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Create - creates a new container under the specified account. If the container with the same name already exists, the operation +// fails +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +// - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, options, containerCPKScopeInfo) + if err != nil { + return ContainerClientCreateResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ContainerClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if containerCPKScopeInfo != nil && containerCPKScopeInfo.DefaultEncryptionScope != nil { + req.Raw().Header["x-ms-default-encryption-scope"] = []string{*containerCPKScopeInfo.DefaultEncryptionScope} + } + if containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil { + req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) { + result := ContainerClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later +// deleted during garbage collection +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ContainerClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (ContainerClientDeleteResponse, error) { + result := ContainerClientDeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may +// be accessed publicly. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { + req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.getAccessPolicyHandleResponse(resp) +} + +// getAccessPolicyCreateRequest creates the GetAccessPolicy request. +func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "acl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccessPolicyHandleResponse handles the GetAccessPolicy response. +func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response) (ContainerClientGetAccessPolicyResponse, error) { + result := ContainerClientGetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result); err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo +// method. +func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getAccountInfoHandleResponse(resp) +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) { + result := ContainerClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + return result, nil +} + +// GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned +// does not include the container's list of blobs +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) { + result := ContainerClientGetPropertiesResponse{} + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) + } + if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { + hasImmutabilityPolicy, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.HasImmutabilityPolicy = &hasImmutabilityPolicy + } + if val := resp.Header.Get("x-ms-has-legal-hold"); val != "" { + hasLegalHold, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.HasLegalHold = &hasLegalHold + } + if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { + result.DefaultEncryptionScope = &val + } + if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { + denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + } + if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { + isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled + } + return result, nil +} + +// NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// Generated from API version 2020-10-02 +// - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +// +// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request. +func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Context, options *ContainerClientListBlobFlatSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. +func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) { + result := ContainerClientListBlobFlatSegmentResponse{} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientListBlobFlatSegmentResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { + return ContainerClientListBlobFlatSegmentResponse{}, err + } + return result, nil +} + +// NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// Generated from API version 2020-10-02 +// - delimiter - When the request includes this parameter, the operation returns a PathPrefix element in the response body that +// acts as a placeholder for all blobs whose names begin with the same substring up to the +// appearance of the delimiter character. The delimiter may be a single character or a string. +// - options - ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. +func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) *runtime.Pager[FileSystemClientListPathHierarchySegmentResponse] { + return runtime.NewPager(runtime.PagingHandler[FileSystemClientListPathHierarchySegmentResponse]{ + More: func(page FileSystemClientListPathHierarchySegmentResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *FileSystemClientListPathHierarchySegmentResponse) (FileSystemClientListPathHierarchySegmentResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + } + if err != nil { + return FileSystemClientListPathHierarchySegmentResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FileSystemClientListPathHierarchySegmentResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileSystemClientListPathHierarchySegmentResponse{}, runtime.NewResponseError(resp) + } + return client.ListBlobHierarchySegmentHandleResponse(resp) + }, + }) +} + +// ListBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request. +func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + reqQP.Set("delimiter", delimiter) + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. +func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (FileSystemClientListPathHierarchySegmentResponse, error) { + result := FileSystemClientListPathHierarchySegmentResponse{} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileSystemClientListPathHierarchySegmentResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListPathsHierarchySegmentResponse); err != nil { + return FileSystemClientListPathHierarchySegmentResponse{}, err + } + return result, nil +} + +// ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseLeaseHandleResponse(resp) +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) { + result := ContainerClientReleaseLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Rename - Renames an existing container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - sourceFileSystemName - Required. Specifies the name of the container to rename. +// - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +func (client *ContainerClient) Rename(ctx context.Context, sourceFileSystemName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { + req, err := client.renameCreateRequest(ctx, sourceFileSystemName, options) + if err != nil { + return ContainerClientRenameResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientRenameResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientRenameResponse{}, runtime.NewResponseError(resp) + } + return client.renameHandleResponse(resp) +} + +// renameCreateRequest creates the Rename request. +func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceFileSystemName string, options *ContainerClientRenameOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "rename") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-source-container-name"] = []string{sourceFileSystemName} + if options != nil && options.SourceLeaseID != nil { + req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// renameHandleResponse handles the Rename response. +func (client *ContainerClient) renameHandleResponse(resp *http.Response) (ContainerClientRenameResponse, error) { + result := ContainerClientRenameResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenameResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.renewLeaseHandleResponse(resp) +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) { + result := ContainerClientRenewLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Restore - Restores a previously-deleted container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { + req, err := client.restoreCreateRequest(ctx, options) + if err != nil { + return ContainerClientRestoreResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientRestoreResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ContainerClientRestoreResponse{}, runtime.NewResponseError(resp) + } + return client.restoreHandleResponse(resp) +} + +// restoreCreateRequest creates the Restore request. +func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options *ContainerClientRestoreOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.DeletedFileSystemName != nil { + req.Raw().Header["x-ms-deleted-container-name"] = []string{*options.DeletedFileSystemName} + } + if options != nil && options.DeletedContainerVersion != nil { + req.Raw().Header["x-ms-deleted-container-version"] = []string{*options.DeletedContainerVersion} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// restoreHandleResponse handles the Restore response. +func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (ContainerClientRestoreResponse, error) { + result := ContainerClientRestoreResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRestoreResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container +// may be accessed publicly. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - containerACL - the acls for the container +// - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { + req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.setAccessPolicyHandleResponse(resp) +} + +// setAccessPolicyCreateRequest creates the SetAccessPolicy request. +func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "acl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + type wrapper struct { + XMLName xml.Name `xml:"SignedIdentifiers"` + ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` + } + if err := runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}); err != nil { + return nil, err + } + return req, nil +} + +// setAccessPolicyHandleResponse handles the SetAccessPolicy response. +func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) { + result := ContainerClientSetAccessPolicyResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) { + result := ContainerClientSetMetadataResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - contentLength - The length of the request. +// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// - body - Initial data +// - options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { + req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) + if err != nil { + return ContainerClientSubmitBatchResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientSubmitBatchResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ContainerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + } + return client.submitBatchHandleResponse(resp) +} + +// submitBatchCreateRequest creates the SubmitBatch request. +func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "batch") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["Content-Type"] = []string{multipartContentType} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil +} + +// submitBatchHandleResponse handles the SubmitBatch response. +func (client *ContainerClient) submitBatchHandleResponse(resp *http.Response) (ContainerClientSubmitBatchResponse, error) { + result := ContainerClientSubmitBatchResponse{Body: resp.Body} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_models.go b/sdk/storage/azdatalake/internal/generated_blob/zz_models.go new file mode 100644 index 000000000000..c07e6bdb3b39 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_models.go @@ -0,0 +1,1733 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AccessPolicy - An Access policy +type AccessPolicy struct { + // the date-time the policy expires + Expiry *time.Time `xml:"Expiry"` + + // the permissions for the acl policy + Permission *string `xml:"Permission"` + + // the date-time the policy is active + Start *time.Time `xml:"Start"` +} + +// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +type AppendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // Bytes of source data in the specified range. + SourceRange *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +type AppendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +type AppendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +type AppendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. +type AppendPositionAccessConditions struct { + // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. + // Append Block will succeed only if the append position is equal to this number. If + // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + AppendPosition *int64 + // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would + // cause the blob to exceed that limit or if the blob size is already greater than + // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - + // Precondition Failed). + MaxSize *int64 +} + +// ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted. +type ArrowConfiguration struct { + // REQUIRED + Schema []*ArrowField `xml:"Schema>Field"` +} + +// ArrowField - Groups settings regarding specific field of an arrow schema +type ArrowField struct { + // REQUIRED + Type *string `xml:"Type"` + Name *string `xml:"Name"` + Precision *int32 `xml:"Precision"` + Scale *int32 `xml:"Scale"` +} + +// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +type BlobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +type BlobClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +type BlobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +type BlobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +type BlobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +type BlobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +type BlobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +type BlobClientDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + DeleteType *DeleteType + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +type BlobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +type BlobClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +type BlobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +type BlobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +type BlobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +type BlobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +type BlobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +type BlobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +type BlobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. +type BlobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +type BlobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +type BlobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +type BlobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +type BlobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +type BlobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +type BlobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +type BlobFlatListSegment struct { + // REQUIRED + PathItems []*PathItem `xml:"Blob"` +} + +// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +type PathHierarchyListSegment struct { + // REQUIRED + PathItems []*PathItem `xml:"Blob"` + PathPrefixes []*PathPrefix `xml:"PathPrefix"` +} + +// PathItem - An Azure Storage blob +type PathItem struct { + // REQUIRED + Deleted *bool `xml:"Deleted"` + + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a blob + Properties *PathProperties `xml:"Properties"` + + // REQUIRED + Snapshot *string `xml:"Snapshot"` + + // Blob tags + BlobTags *BlobTags `xml:"Tags"` + HasVersionsOnly *bool `xml:"HasVersionsOnly"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + + // Dictionary of + OrMetadata map[string]*string `xml:"OrMetadata"` + VersionID *string `xml:"VersionId"` +} + +type PathPrefix struct { + // REQUIRED + Name *string `xml:"Name"` +} + +// PathProperties - Properties of a blob +type PathProperties struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + AccessTier *AccessTier `xml:"AccessTier"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + ArchiveStatus *ArchiveStatus `xml:"ArchiveStatus"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + BlobType *BlobType `xml:"BlobType"` + CacheControl *string `xml:"Cache-Control"` + ContentDisposition *string `xml:"Content-Disposition"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + + // Size in bytes + ContentLength *int64 `xml:"Content-Length"` + ContentMD5 []byte `xml:"Content-MD5"` + ContentType *string `xml:"Content-Type"` + CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` + CopyID *string `xml:"CopyId"` + CopyProgress *string `xml:"CopyProgress"` + CopySource *string `xml:"CopySource"` + CopyStatus *CopyStatusType `xml:"CopyStatus"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + CreationTime *time.Time `xml:"Creation-Time"` + CustomerProvidedKeySHA256 *string `xml:"CustomerProvidedKeySha256"` + DeletedTime *time.Time `xml:"DeletedTime"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + + // The name of the encryption scope under which the blob is encrypted. + EncryptionScope *string `xml:"EncryptionScope"` + ExpiresOn *time.Time `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` + ImmutabilityPolicyMode *ImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + IsSealed *bool `xml:"Sealed"` + LastAccessedOn *time.Time `xml:"LastAccessTime"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + LegalHold *bool `xml:"LegalHold"` + + // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High + // and Standard. + RehydratePriority *RehydratePriority `xml:"RehydratePriority"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + TagCount *int32 `xml:"TagCount"` +} + +type BlobTag struct { + // REQUIRED + Key *string `xml:"Key"` + + // REQUIRED + Value *string `xml:"Value"` +} + +// BlobTags - Blob tags +type BlobTags struct { + // REQUIRED + BlobTagSet []*BlobTag `xml:"TagSet>Tag"` +} + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block struct { + // REQUIRED; The base64 encoded block ID. + Name *string `xml:"Name"` + + // REQUIRED; The block size in bytes. + Size *int64 `xml:"Size"` +} + +// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. +type BlockBlobClientCommitBlockListOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +type BlockBlobClientGetBlockListOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. +type BlockBlobClientPutBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourcePathProperties *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. +type BlockBlobClientStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // Bytes of source data in the specified range. + SourceRange *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +type BlockBlobClientStageBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +type BlockBlobClientUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +type BlockList struct { + CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` +} + +type BlockLookupList struct { + Committed []*string `xml:"Committed"` + Latest []*string `xml:"Latest"` + Uncommitted []*string `xml:"Uncommitted"` +} + +type ClearRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +type ContainerClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +type ContainerClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +type ContainerClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +type ContainerClientCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +type ContainerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. +type ContainerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. +type ContainerClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type ContainerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +type ContainerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. +type ContainerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +type ContainerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +type ContainerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +type ContainerClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +type ContainerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedFileSystemName *string + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. +type ContainerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +type ContainerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +type ContainerClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type ContainerCPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// FileSystemItem - An Azure Storage container +type FileSystemItem struct { + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a container + Properties *FileSystemProperties `xml:"Properties"` + Deleted *bool `xml:"Deleted"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + Version *string `xml:"Version"` +} + +// FileSystemProperties - Properties of a container +type FileSystemProperties struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + DeletedTime *time.Time `xml:"DeletedTime"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + + // Indicates if version level worm is enabled on this container. + IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + PublicAccess *PublicAccessType `xml:"PublicAccess"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` +} + +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CORSRule struct { + // REQUIRED; the request headers that the origin domain may specify on the CORS request. + AllowedHeaders *string `xml:"AllowedHeaders"` + + // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods *string `xml:"AllowedMethods"` + + // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain + // is the domain from which the request originates. Note that the origin must be an exact + // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' + // to allow all origin domains to make requests via CORS. + AllowedOrigins *string `xml:"AllowedOrigins"` + + // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request + // issuer + ExposedHeaders *string `xml:"ExposedHeaders"` + + // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` +} + +// CPKInfo contains a group of parameters for the BlobClient.Download method. +type CPKInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. +type DelimitedTextConfiguration struct { + // The string used to separate columns. + ColumnSeparator *string `xml:"ColumnSeparator"` + + // The string used as an escape character. + EscapeChar *string `xml:"EscapeChar"` + + // The string used to quote a specific field. + FieldQuote *string `xml:"FieldQuote"` + + // Represents whether the data has headers. + HeadersPresent *bool `xml:"HasHeaders"` + + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// FilterPathItem - Blob info from a Filter Blobs API call +type FilterPathItem struct { + // REQUIRED + FileSystemName *string `xml:"ContainerName"` + + // REQUIRED + Name *string `xml:"Name"` + + // Blob tags + Tags *BlobTags `xml:"Tags"` +} + +// FilterBlobSegment - The result of a Filter Blobs API call +type FilterBlobSegment struct { + // REQUIRED + Blobs []*FilterPathItem `xml:"Blobs>Blob"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + + // REQUIRED + Where *string `xml:"Where"` + NextMarker *string `xml:"NextMarker"` +} + +// GeoReplication - Geo-Replication information for the Secondary Storage Service +type GeoReplication struct { + // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available + // for read operations at the secondary. Primary writes after this point in time may or may + // not be available for reads. + LastSyncTime *time.Time `xml:"LastSyncTime"` + + // REQUIRED; The status of the secondary location + Status *BlobGeoReplicationStatus `xml:"Status"` +} + +// JSONTextConfiguration - json text configuration +type JSONTextConfiguration struct { + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// KeyInfo - Key information +type KeyInfo struct { + // REQUIRED; The date-time the key expires in ISO 8601 UTC time + Expiry *string `xml:"Expiry"` + + // REQUIRED; The date-time the key is active in ISO 8601 UTC time + Start *string `xml:"Start"` +} + +// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse struct { + // REQUIRED + FileSystemName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobFlatListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListPathsHierarchySegmentResponse - An enumeration of blobs +type ListPathsHierarchySegmentResponse struct { + // REQUIRED + FileSystemName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *PathHierarchyListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Delimiter *string `xml:"Delimiter"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListFileSystemsSegmentResponse - An enumeration of containers +type ListFileSystemsSegmentResponse struct { + // REQUIRED + FileSystemItems []*FileSystemItem `xml:"Containers>Container"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // REQUIRED; Indicates whether all delete requests should be logged. + Delete *bool `xml:"Delete"` + + // REQUIRED; Indicates whether all read requests should be logged. + Read *bool `xml:"Read"` + + // REQUIRED; the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // REQUIRED; The version of Storage Analytics to configure. + Version *string `xml:"Version"` + + // REQUIRED; Indicates whether all write requests should be logged. + Write *bool `xml:"Write"` +} + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics struct { + // REQUIRED; Indicates whether metrics are enabled for the Blob service. + Enabled *bool `xml:"Enabled"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + + // the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // The version of Storage Analytics to configure. + Version *string `xml:"Version"` +} + +// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *azcore.ETag + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *azcore.ETag + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time +} + +// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +type PageBlobClientClearPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. +type PageBlobClientCopyIncrementalOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +type PageBlobClientCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +type PageBlobClientGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + Prevsnapshot *string + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. +type PageBlobClientGetPageRangesOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +type PageBlobClientResizeOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +type PageBlobClientUpdateSequenceNumberOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. +type PageBlobClientUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +type PageBlobClientUploadPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// PageList - the list of pages +type PageList struct { + ClearRange []*ClearRange `xml:"ClearRange"` + NextMarker *string `xml:"NextMarker"` + PageRange []*PageRange `xml:"PageRange"` +} + +type PageRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +type QueryFormat struct { + // REQUIRED; The quick query format type. + Type *QueryFormatType `xml:"Type"` + + // Groups the settings used for formatting the response if the response should be Arrow formatted. + ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` + + // Groups the settings used for interpreting the blob data if the blob is delimited text formatted. + DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` + + // json text configuration + JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` + + // parquet configuration + ParquetTextConfiguration any `xml:"ParquetTextConfiguration"` +} + +// QueryRequest - Groups the set of query request settings. +type QueryRequest struct { + // REQUIRED; The query expression in SQL. The maximum size of the query expression is 256KiB. + Expression *string `xml:"Expression"` + + // CONSTANT; Required. The type of the provided query expression. + // Field has constant value "SQL", any specified value is ignored. + QueryType *string `xml:"QueryType"` + InputSerialization *QuerySerialization `xml:"InputSerialization"` + OutputSerialization *QuerySerialization `xml:"OutputSerialization"` +} + +type QuerySerialization struct { + // REQUIRED + Format *QueryFormat `xml:"Format"` +} + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy struct { + // REQUIRED; Indicates whether a retention policy is enabled for the storage service + Enabled *bool `xml:"Enabled"` + + // Indicates whether permanent delete is allowed on this storage account. + AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` + + // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this + // value will be deleted + Days *int32 `xml:"Days"` +} + +// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +type ServiceClientFilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +type ServiceClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +type ServiceClientGetStatisticsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. +type ServiceClientGetUserDelegationKeyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +type ServiceClientListContainersSegmentOptions struct { + // Include this parameter to specify that the container's metadata be returned as part of the response body. + Include []ListContainersIncludeType + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +type ServiceClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // REQUIRED; An Access policy + AccessPolicy *AccessPolicy `xml:"AccessPolicy"` + + // REQUIRED; a unique id + ID *string `xml:"Id"` +} + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *azcore.ETag + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *azcore.ETag + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // REQUIRED; Indicates whether this account is hosting a static website + Enabled *bool `xml:"Enabled"` + + // Absolute path of the default index page + DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` + + // The absolute path of the custom 404 page + ErrorDocument404Path *string `xml:"ErrorDocument404Path"` + + // The default name of the index page under each directory + IndexDocument *string `xml:"IndexDocument"` +} + +type StorageError struct { + Message *string +} + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + // The set of CORS rules. + CORS []*CORSRule `xml:"Cors>CorsRule"` + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string `xml:"DefaultServiceVersion"` + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics `xml:"HourMetrics"` + + // Azure Analytics Logging settings. + Logging *Logging `xml:"Logging"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite `xml:"StaticWebsite"` +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + // Geo-Replication information for the Secondary Storage Service + GeoReplication *GeoReplication `xml:"GeoReplication"` +} + +// UserDelegationKey - A user delegation key +type UserDelegationKey struct { + // REQUIRED; The date-time the key expires + SignedExpiry *time.Time `xml:"SignedExpiry"` + + // REQUIRED; The Azure Active Directory object ID in GUID format. + SignedOID *string `xml:"SignedOid"` + + // REQUIRED; Abbreviation of the Azure Storage service that accepts the key + SignedService *string `xml:"SignedService"` + + // REQUIRED; The date-time the key is active + SignedStart *time.Time `xml:"SignedStart"` + + // REQUIRED; The Azure Active Directory tenant ID in GUID format + SignedTID *string `xml:"SignedTid"` + + // REQUIRED; The service version that created the key + SignedVersion *string `xml:"SignedVersion"` + + // REQUIRED; The key as a base64 string + Value *string `xml:"Value"` +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_models_serde.go b/sdk/storage/azdatalake/internal/generated_blob/zz_models_serde.go new file mode 100644 index 000000000000..bc5a3d6ec6ad --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_models_serde.go @@ -0,0 +1,473 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" + "time" +) + +// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. +func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(&a), + Expiry: (*timeRFC3339)(a.Expiry), + Start: (*timeRFC3339)(a.Start), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. +func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(a), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + a.Expiry = (*time.Time)(aux.Expiry) + a.Start = (*time.Time)(aux.Start) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. +func (a ArrowConfiguration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ArrowConfiguration + aux := &struct { + *alias + Schema *[]*ArrowField `xml:"Schema>Field"` + }{ + alias: (*alias)(&a), + } + if a.Schema != nil { + aux.Schema = &a.Schema + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. +func (b BlobFlatListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias BlobFlatListSegment + aux := &struct { + *alias + PathItems *[]*PathItem `xml:"Blob"` + }{ + alias: (*alias)(&b), + } + if b.PathItems != nil { + aux.PathItems = &b.PathItems + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type PathHierarchyListSegment. +func (b PathHierarchyListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias PathHierarchyListSegment + aux := &struct { + *alias + PathItems *[]*PathItem `xml:"Blob"` + PathPrefixes *[]*PathPrefix `xml:"PathPrefix"` + }{ + alias: (*alias)(&b), + } + if b.PathItems != nil { + aux.PathItems = &b.PathItems + } + if b.PathPrefixes != nil { + aux.PathPrefixes = &b.PathPrefixes + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type PathProperties. +func (b PathProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias PathProperties + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&b), + AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), + CreationTime: (*timeRFC1123)(b.CreationTime), + DeletedTime: (*timeRFC1123)(b.DeletedTime), + ExpiresOn: (*timeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), + LastModified: (*timeRFC1123)(b.LastModified), + } + if b.ContentMD5 != nil { + encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) + aux.ContentMD5 = &encodedContentMD5 + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type PathProperties. +func (b *PathProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias PathProperties + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + if aux.ContentMD5 != nil { + if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil { + return err + } + } + b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) + b.CreationTime = (*time.Time)(aux.CreationTime) + b.DeletedTime = (*time.Time)(aux.DeletedTime) + b.ExpiresOn = (*time.Time)(aux.ExpiresOn) + b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) + b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) + b.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type BlobTags. +func (b BlobTags) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "Tags" + type alias BlobTags + aux := &struct { + *alias + BlobTagSet *[]*BlobTag `xml:"TagSet>Tag"` + }{ + alias: (*alias)(&b), + } + if b.BlobTagSet != nil { + aux.BlobTagSet = &b.BlobTagSet + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlockList. +func (b BlockList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias BlockList + aux := &struct { + *alias + CommittedBlocks *[]*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks *[]*Block `xml:"UncommittedBlocks>Block"` + }{ + alias: (*alias)(&b), + } + if b.CommittedBlocks != nil { + aux.CommittedBlocks = &b.CommittedBlocks + } + if b.UncommittedBlocks != nil { + aux.UncommittedBlocks = &b.UncommittedBlocks + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlockLookupList. +func (b BlockLookupList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "BlockList" + type alias BlockLookupList + aux := &struct { + *alias + Committed *[]*string `xml:"Committed"` + Latest *[]*string `xml:"Latest"` + Uncommitted *[]*string `xml:"Uncommitted"` + }{ + alias: (*alias)(&b), + } + if b.Committed != nil { + aux.Committed = &b.Committed + } + if b.Latest != nil { + aux.Latest = &b.Latest + } + if b.Uncommitted != nil { + aux.Uncommitted = &b.Uncommitted + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type FileSystemItem. +func (c *FileSystemItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias FileSystemItem + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + }{ + alias: (*alias)(c), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + c.Metadata = (map[string]*string)(aux.Metadata) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type FileSystemProperties. +func (c FileSystemProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias FileSystemProperties + aux := &struct { + *alias + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&c), + DeletedTime: (*timeRFC1123)(c.DeletedTime), + LastModified: (*timeRFC1123)(c.LastModified), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type FileSystemProperties. +func (c *FileSystemProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias FileSystemProperties + aux := &struct { + *alias + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(c), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + c.DeletedTime = (*time.Time)(aux.DeletedTime) + c.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. +func (f FilterBlobSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias FilterBlobSegment + aux := &struct { + *alias + Blobs *[]*FilterPathItem `xml:"Blobs>Blob"` + }{ + alias: (*alias)(&f), + } + if f.Blobs != nil { + aux.Blobs = &f.Blobs + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type GeoReplication. +func (g GeoReplication) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(&g), + LastSyncTime: (*timeRFC1123)(g.LastSyncTime), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. +func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(g), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ListFileSystemsSegmentResponse. +func (l ListFileSystemsSegmentResponse) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ListFileSystemsSegmentResponse + aux := &struct { + *alias + FileSystemItems *[]*FileSystemItem `xml:"Containers>Container"` + }{ + alias: (*alias)(&l), + } + if l.FileSystemItems != nil { + aux.FileSystemItems = &l.FileSystemItems + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type PageList. +func (p PageList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias PageList + aux := &struct { + *alias + ClearRange *[]*ClearRange `xml:"ClearRange"` + PageRange *[]*PageRange `xml:"PageRange"` + }{ + alias: (*alias)(&p), + } + if p.ClearRange != nil { + aux.ClearRange = &p.ClearRange + } + if p.PageRange != nil { + aux.PageRange = &p.PageRange + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type QueryRequest. +func (q QueryRequest) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "QueryRequest" + type alias QueryRequest + aux := &struct { + *alias + }{ + alias: (*alias)(&q), + } + return enc.EncodeElement(aux, start) +} + +// MarshalJSON implements the json.Marshaller interface for type StorageError. +func (s StorageError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "Message", s.Message) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageError. +func (s *StorageError) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Message": + err = unpopulate(val, "Message", &s.Message) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. +func (s StorageServiceProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias StorageServiceProperties + aux := &struct { + *alias + CORS *[]*CORSRule `xml:"Cors>CorsRule"` + }{ + alias: (*alias)(&s), + } + if s.CORS != nil { + aux.CORS = &s.CORS + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. +func (u UserDelegationKey) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` + SignedStart *timeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(&u), + SignedExpiry: (*timeRFC3339)(u.SignedExpiry), + SignedStart: (*timeRFC3339)(u.SignedStart), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. +func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` + SignedStart *timeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(u), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + u.SignedExpiry = (*time.Time)(aux.SignedExpiry) + u.SignedStart = (*time.Time)(aux.SignedStart) + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func populateAny(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_pageblob_client.go b/sdk/storage/azdatalake/internal/generated_blob/zz_pageblob_client.go new file mode 100644 index 000000000000..b4801102a15d --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_pageblob_client.go @@ -0,0 +1,1289 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// PageBlobClient contains the methods for the PageBlob group. +// Don't use this type directly, use a constructor function instead. +type PageBlobClient struct { + internal *azcore.Client + endpoint string +} + +// ClearPages - The Clear Pages operation clears a set of pages from a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - contentLength - The length of the request. +// - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { + req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) + } + return client.clearPagesHandleResponse(resp) +} + +// clearPagesCreateRequest creates the ClearPages request. +func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"clear"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// clearPagesHandleResponse handles the ClearPages response. +func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) { + result := PageBlobClientClearPagesResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied +// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can +// be read or copied from as usual. This API is supported since REST version +// 2016-05-31. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { + req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return PageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) + } + return client.copyIncrementalHandleResponse(resp) +} + +// copyIncrementalCreateRequest creates the CopyIncremental request. +func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "incrementalcopy") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// copyIncrementalHandleResponse handles the CopyIncremental response. +func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) { + result := PageBlobClientCopyIncrementalResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + return result, nil +} + +// Create - The Create operation creates a new page blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - contentLength - The length of the request. +// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// - options - PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) { + result := PageBlobClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot +// of a page blob +// +// Generated from API version 2020-10-02 +// - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesResponse] { + return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesResponse]{ + More: func(page PageBlobClientGetPageRangesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + } + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientGetPageRangesResponse{}, runtime.NewResponseError(resp) + } + return client.GetPageRangesHandleResponse(resp) + }, + }) +} + +// GetPageRangesCreateRequest creates the GetPageRanges request. +func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// GetPageRangesHandleResponse handles the GetPageRanges response. +func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) { + result := PageBlobClientGetPageRangesResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + return result, nil +} + +// NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that +// were changed between target blob and previous snapshot. +// +// Generated from API version 2020-10-02 +// - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesDiffResponse] { + return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesDiffResponse]{ + More: func(page PageBlobClientGetPageRangesDiffResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + } + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientGetPageRangesDiffResponse{}, runtime.NewResponseError(resp) + } + return client.GetPageRangesDiffHandleResponse(resp) + }, + }) +} + +// GetPageRangesDiffCreateRequest creates the GetPageRangesDiff request. +func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Prevsnapshot != nil { + reqQP.Set("prevsnapshot", *options.Prevsnapshot) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.PrevSnapshotURL != nil { + req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response. +func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) { + result := PageBlobClientGetPageRangesDiffResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + return result, nil +} + +// Resize - Resize the Blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { + req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientResizeResponse{}, runtime.NewResponseError(resp) + } + return client.resizeHandleResponse(resp) +} + +// resizeCreateRequest creates the Resize request. +func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// resizeHandleResponse handles the Resize response. +func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) { + result := PageBlobClientResizeResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// UpdateSequenceNumber - Update the sequence number of the blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to +// page blobs only. This property indicates how the service should modify the blob's sequence number +// - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { + req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) + } + return client.updateSequenceNumberHandleResponse(resp) +} + +// updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. +func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. +func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) { + result := PageBlobClientUpdateSequenceNumberResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// UploadPages - The Upload Pages operation writes a range of pages to a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - contentLength - The length of the request. +// - body - Initial data +// - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { + req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) + } + return client.uploadPagesHandleResponse(resp) +} + +// uploadPagesCreateRequest creates the UploadPages request. +func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// uploadPagesHandleResponse handles the UploadPages response. +func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) { + result := PageBlobClientUploadPagesResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from +// a URL +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - sourceURL - Specify a URL to the copy source. +// - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header +// and x-ms-range/Range destination range header. +// - contentLength - The length of the request. +// - rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end +// is required. +// - options - PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { + req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.uploadPagesFromURLHandleResponse(resp) +} + +// uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. +func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + req.Raw().Header["x-ms-source-range"] = []string{sourceRange} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["x-ms-range"] = []string{rangeParam} + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. +func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) { + result := PageBlobClientUploadPagesFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_response_types.go b/sdk/storage/azdatalake/internal/generated_blob/zz_response_types.go new file mode 100644 index 000000000000..44e0bb9131df --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_response_types.go @@ -0,0 +1,1972 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "io" + "time" +) + +// AppendBlobClientAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. +type AppendBlobClientAppendBlockFromURLResponse struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. +type AppendBlobClientAppendBlockResponse struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create. +type AppendBlobClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// AppendBlobClientSealResponse contains the response from method AppendBlobClient.Seal. +type AppendBlobClientSealResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. +type BlobClientAbortCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientAcquireLeaseResponse contains the response from method BlobClient.AcquireLease. +type BlobClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientBreakLeaseResponse contains the response from method BlobClient.BreakLease. +type BlobClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientChangeLeaseResponse contains the response from method BlobClient.ChangeLease. +type BlobClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientCopyFromURLResponse contains the response from method BlobClient.CopyFromURL. +type BlobClientCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. +type BlobClientCreateSnapshotResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Snapshot contains the information returned from the x-ms-snapshot header response. + Snapshot *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientDeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicy. +type BlobClientDeleteImmutabilityPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientDeleteResponse contains the response from method BlobClient.Delete. +type BlobClientDeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientDownloadResponse contains the response from method BlobClient.Download. +type BlobClientDownloadResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ErrorCode contains the information returned from the x-ms-error-code header response. + ErrorCode *string + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientGetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. +type BlobClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientGetPropertiesResponse contains the response from method BlobClient.GetProperties. +type BlobClientGetPropertiesResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // AccessTier contains the information returned from the x-ms-access-tier header response. + AccessTier *string + + // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response. + AccessTierChangeTime *time.Time + + // AccessTierInferred contains the information returned from the x-ms-access-tier-inferred header response. + AccessTierInferred *bool + + // ArchiveStatus contains the information returned from the x-ms-archive-status header response. + ArchiveStatus *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DestinationSnapshot contains the information returned from the x-ms-copy-destination-snapshot header response. + DestinationSnapshot *string + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ExpiresOn contains the information returned from the x-ms-expiry-time header response. + ExpiresOn *time.Time + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsIncrementalCopy contains the information returned from the x-ms-incremental-copy header response. + IsIncrementalCopy *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]*string + + // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response. + RehydratePriority *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. +type BlobClientGetTagsResponse struct { + BlobTags + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// BlobClientQueryResponse contains the response from method BlobClient.Query. +type BlobClientQueryResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type BlobClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientRenewLeaseResponse contains the response from method BlobClient.RenewLease. +type BlobClientRenewLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetExpiryResponse contains the response from method BlobClient.SetExpiry. +type BlobClientSetExpiryResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type BlobClientSetHTTPHeadersResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. +type BlobClientSetImmutabilityPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ImmutabilityPolicyExpiry contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiry *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. +type BlobClientSetLegalHoldResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetMetadataResponse contains the response from method BlobClient.SetMetadata. +type BlobClientSetMetadataResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientSetTagsResponse contains the response from method BlobClient.SetTags. +type BlobClientSetTagsResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetTierResponse contains the response from method BlobClient.SetTier. +type BlobClientSetTierResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type BlobClientStartCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientUndeleteResponse contains the response from method BlobClient.Undelete. +type BlobClientUndeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. +type BlockBlobClientCommitBlockListResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. +type BlockBlobClientGetBlockListResponse struct { + BlockList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 `xml:"BlobContentLength"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. +type BlockBlobClientPutBlobFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlockBlobClientStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. +type BlockBlobClientStageBlockFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock. +type BlockBlobClientStageBlockResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload. +type BlockBlobClientUploadResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// ContainerClientAcquireLeaseResponse contains the response from method ContainerClient.AcquireLease. +type ContainerClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientBreakLeaseResponse contains the response from method ContainerClient.BreakLease. +type ContainerClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientChangeLeaseResponse contains the response from method ContainerClient.ChangeLease. +type ContainerClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientCreateResponse contains the response from method ContainerClient.Create. +type ContainerClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientDeleteResponse contains the response from method ContainerClient.Delete. +type ContainerClientDeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. +type ContainerClientGetAccessPolicyResponse struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // a collection of signed identifiers + SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. +type ContainerClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetPropertiesResponse contains the response from method ContainerClient.GetProperties. +type ContainerClientGetPropertiesResponse struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response. + DefaultEncryptionScope *string + + // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response. + DenyEncryptionScopeOverride *bool + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. + HasImmutabilityPolicy *bool + + // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response. + HasLegalHold *bool + + // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled + // header response. + IsImmutableStorageWithVersioningEnabled *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager. +type ContainerClientListBlobFlatSegmentResponse struct { + ListBlobsFlatSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// FileSystemClientListPathHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager. +type FileSystemClientListPathHierarchySegmentResponse struct { + ListPathsHierarchySegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. +type ContainerClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRenameResponse contains the response from method ContainerClient.Rename. +type ContainerClientRenameResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRenewLeaseResponse contains the response from method ContainerClient.RenewLease. +type ContainerClientRenewLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRestoreResponse contains the response from method ContainerClient.Restore. +type ContainerClientRestoreResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy. +type ContainerClientSetAccessPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSetMetadataResponse contains the response from method ContainerClient.SetMetadata. +type ContainerClientSetMetadataResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSubmitBatchResponse contains the response from method ContainerClient.SubmitBatch. +type ContainerClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientClearPagesResponse contains the response from method PageBlobClient.ClearPages. +type PageBlobClientClearPagesResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental. +type PageBlobClientCopyIncrementalResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientCreateResponse contains the response from method PageBlobClient.Create. +type PageBlobClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager. +type PageBlobClientGetPageRangesDiffResponse struct { + PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 `xml:"BlobContentLength"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager. +type PageBlobClientGetPageRangesResponse struct { + PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 `xml:"BlobContentLength"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. +type PageBlobClientResizeResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber. +type PageBlobClientUpdateSequenceNumberResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL. +type PageBlobClientUploadPagesFromURLResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages. +type PageBlobClientUploadPagesResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. +type ServiceClientFilterBlobsResponse struct { + FilterBlobSegment + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. +type ServiceClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. +type ServiceClientGetPropertiesResponse struct { + StorageServiceProperties + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. +type ServiceClientGetStatisticsResponse struct { + StorageServiceStats + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. +type ServiceClientGetUserDelegationKeyResponse struct { + UserDelegationKey + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientListFileSystemsSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager. +type ServiceClientListFileSystemsSegmentResponse struct { + ListFileSystemsSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. +type ServiceClientSetPropertiesResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientSubmitBatchResponse contains the response from method ServiceClient.SubmitBatch. +type ServiceClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_service_client.go b/sdk/storage/azdatalake/internal/generated_blob/zz_service_client.go new file mode 100644 index 000000000000..e335a7e4c689 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_service_client.go @@ -0,0 +1,557 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// ServiceClient contains the methods for the Service group. +// Don't use this type directly, use a constructor function instead. +type ServiceClient struct { + internal *azcore.Client + endpoint string +} + +// FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search +// expression. Filter blobs searches across all containers within a storage account but can +// be scoped within the expression to a single container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - where - Filters the results to return only to return only blobs whose tags match the specified expression. +// - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { + req, err := client.filterBlobsCreateRequest(ctx, where, options) + if err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + } + return client.filterBlobsHandleResponse(resp) +} + +// filterBlobsCreateRequest creates the FilterBlobs request. +func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blobs") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + reqQP.Set("where", where) + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// filterBlobsHandleResponse handles the FilterBlobs response. +func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (ServiceClientFilterBlobsResponse, error) { + result := ServiceClientFilterBlobsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getAccountInfoHandleResponse(resp) +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) { + result := ServiceClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { + isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled + } + return result, nil +} + +// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and +// CORS (Cross-Origin Resource Sharing) rules. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, options *ServiceClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (ServiceClientGetPropertiesResponse, error) { + result := ServiceClientGetPropertiesResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + return result, nil +} + +// GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary +// location endpoint when read-access geo-redundant replication is enabled for the storage account. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { + req, err := client.getStatisticsCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + } + return client.getStatisticsHandleResponse(resp) +} + +// getStatisticsCreateRequest creates the GetStatistics request. +func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, options *ServiceClientGetStatisticsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "stats") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getStatisticsHandleResponse handles the GetStatistics response. +func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (ServiceClientGetStatisticsResponse, error) { + result := ServiceClientGetStatisticsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + return result, nil +} + +// GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using +// bearer token authentication. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - keyInfo - Key information +// - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey +// method. +func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { + req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) + if err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) + } + return client.getUserDelegationKeyHandleResponse(resp) +} + +// getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. +func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "userdelegationkey") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := runtime.MarshalAsXML(req, keyInfo); err != nil { + return nil, err + } + return req, nil +} + +// getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. +func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (ServiceClientGetUserDelegationKeyResponse, error) { + result := ServiceClientGetUserDelegationKeyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + return result, nil +} + +// NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified +// account +// +// Generated from API version 2020-10-02 +// - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +// +// listContainersSegmentCreateRequest creates the ListContainersSegment request. +func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Context, options *ServiceClientListContainersSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listContainersSegmentHandleResponse handles the ListContainersSegment response. +func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Response) (ServiceClientListFileSystemsSegmentResponse, error) { + result := ServiceClientListFileSystemsSegmentResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.ListFileSystemsSegmentResponse); err != nil { + return ServiceClientListFileSystemsSegmentResponse{}, err + } + return result, nil +} + +// SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - storageServiceProperties - The StorageService properties. +// - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { + req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.setPropertiesHandleResponse(resp) +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil { + return nil, err + } + return req, nil +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (ServiceClientSetPropertiesResponse, error) { + result := ServiceClientSetPropertiesResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - contentLength - The length of the request. +// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// - body - Initial data +// - options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { + req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) + if err != nil { + return ServiceClientSubmitBatchResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientSubmitBatchResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + } + return client.submitBatchHandleResponse(resp) +} + +// submitBatchCreateRequest creates the SubmitBatch request. +func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "batch") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["Content-Type"] = []string{multipartContentType} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil +} + +// submitBatchHandleResponse handles the SubmitBatch response. +func (client *ServiceClient) submitBatchHandleResponse(resp *http.Response) (ServiceClientSubmitBatchResponse, error) { + result := ServiceClientSubmitBatchResponse{Body: resp.Body} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_time_rfc1123.go b/sdk/storage/azdatalake/internal/generated_blob/zz_time_rfc1123.go new file mode 100644 index 000000000000..def276c50d08 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_time_rfc1123.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +import ( + "strings" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` +) + +type timeRFC1123 time.Time + +func (t timeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(rfc1123JSON)) + return b, nil +} + +func (t timeRFC1123) MarshalText() ([]byte, error) { + b := []byte(time.Time(t).Format(time.RFC1123)) + return b, nil +} + +func (t *timeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) + *t = timeRFC1123(p) + return err +} + +func (t *timeRFC1123) UnmarshalText(data []byte) error { + p, err := time.Parse(time.RFC1123, string(data)) + *t = timeRFC1123(p) + return err +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_time_rfc3339.go b/sdk/storage/azdatalake/internal/generated_blob/zz_time_rfc3339.go new file mode 100644 index 000000000000..034d115920a9 --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_time_rfc3339.go @@ -0,0 +1,59 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +import ( + "regexp" + "strings" + "time" +) + +const ( + utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` + utcLayout = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +type timeRFC3339 time.Time + +func (t timeRFC3339) MarshalJSON() (json []byte, err error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t timeRFC3339) MarshalText() (text []byte, err error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *timeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcLayoutJSON + if tzOffsetRegex.Match(data) { + layout = rfc3339JSON + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + layout := utcLayout + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = timeRFC3339(p) + return err +} diff --git a/sdk/storage/azdatalake/internal/generated_blob/zz_xml_helper.go b/sdk/storage/azdatalake/internal/generated_blob/zz_xml_helper.go new file mode 100644 index 000000000000..be1f2b2d3ffd --- /dev/null +++ b/sdk/storage/azdatalake/internal/generated_blob/zz_xml_helper.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated_blob + +import ( + "encoding/xml" + "strings" +) + +type additionalProperties map[string]*string + +// UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. +func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + for t, err := d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + break + case xml.CharData: + if tokName == "" { + continue + } + if *ap == nil { + *ap = additionalProperties{} + } + s := string(tt) + (*ap)[tokName] = &s + tokName = "" + break + } + } + return nil +} diff --git a/sdk/storage/azdatalake/internal/path/constants.go b/sdk/storage/azdatalake/internal/path/constants.go new file mode 100644 index 000000000000..0744d15e9205 --- /dev/null +++ b/sdk/storage/azdatalake/internal/path/constants.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package path + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" +) + +type EncryptionAlgorithmType = generated.EncryptionAlgorithmType + +const ( + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeNone + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeAES256 +) + +type ImmutabilityPolicyMode = blob.ImmutabilityPolicyMode + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = blob.ImmutabilityPolicyModeMutable + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = blob.ImmutabilityPolicyModeUnlocked + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = blob.ImmutabilityPolicyModeLocked +) + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = blob.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = blob.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = blob.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = blob.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = blob.CopyStatusTypeFailed +) + +// StatusType defines values for StatusType +type StatusType = azdatalake.StatusType + +const ( + StatusTypeLocked StatusType = azdatalake.StatusTypeLocked + StatusTypeUnlocked StatusType = azdatalake.StatusTypeUnlocked +) + +// PossibleStatusTypeValues returns the possible values for the StatusType const type. +func PossibleStatusTypeValues() []StatusType { + return azdatalake.PossibleStatusTypeValues() +} + +// DurationType defines values for DurationType +type DurationType = azdatalake.DurationType + +const ( + DurationTypeInfinite DurationType = azdatalake.DurationTypeInfinite + DurationTypeFixed DurationType = azdatalake.DurationTypeFixed +) + +// PossibleDurationTypeValues returns the possible values for the DurationType const type. +func PossibleDurationTypeValues() []DurationType { + return azdatalake.PossibleDurationTypeValues() +} + +// StateType defines values for StateType +type StateType = azdatalake.StateType + +const ( + StateTypeAvailable StateType = azdatalake.StateTypeAvailable + StateTypeLeased StateType = azdatalake.StateTypeLeased + StateTypeExpired StateType = azdatalake.StateTypeExpired + StateTypeBreaking StateType = azdatalake.StateTypeBreaking + StateTypeBroken StateType = azdatalake.StateTypeBroken +) diff --git a/sdk/storage/azdatalake/internal/path/models.go b/sdk/storage/azdatalake/internal/path/models.go new file mode 100644 index 000000000000..3a25acce5a27 --- /dev/null +++ b/sdk/storage/azdatalake/internal/path/models.go @@ -0,0 +1,307 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package path + +import ( + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "time" +) + +// DeleteOptions contains the optional parameters when calling the Delete operation. +type DeleteOptions struct { + // AccessConditions contains parameters for accessing the path. + AccessConditions *AccessConditions +} + +func FormatDeleteOptions(o *DeleteOptions, recursive bool) (*generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, *generated.PathClientDeleteOptions) { + deleteOpts := &generated.PathClientDeleteOptions{ + Recursive: &recursive, + } + if o == nil { + return nil, nil, deleteOpts + } + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) + return leaseAccessConditions, modifiedAccessConditions, deleteOpts +} + +// RenameOptions contains the optional parameters when calling the Rename operation. +type RenameOptions struct { + // SourceAccessConditions identifies the source path access conditions. + SourceAccessConditions *SourceAccessConditions + // AccessConditions contains parameters for accessing the path. + AccessConditions *AccessConditions +} + +func FormatRenameOptions(o *RenameOptions, path string) (*generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions, *generated.PathClientCreateOptions) { + // we don't need sourceModAccCond since this is not rename + mode := generated.PathRenameModeLegacy + createOpts := &generated.PathClientCreateOptions{ + Mode: &mode, + RenameSource: &path, + } + if o == nil { + return nil, nil, nil, createOpts + } + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) + if o.SourceAccessConditions != nil { + if o.SourceAccessConditions.SourceLeaseAccessConditions != nil { + createOpts.SourceLeaseID = o.SourceAccessConditions.SourceLeaseAccessConditions.LeaseID + } + if o.SourceAccessConditions.SourceModifiedAccessConditions != nil { + sourceModifiedAccessConditions := &generated.SourceModifiedAccessConditions{ + SourceIfMatch: o.SourceAccessConditions.SourceModifiedAccessConditions.SourceIfMatch, + SourceIfModifiedSince: o.SourceAccessConditions.SourceModifiedAccessConditions.SourceIfModifiedSince, + SourceIfNoneMatch: o.SourceAccessConditions.SourceModifiedAccessConditions.SourceIfNoneMatch, + SourceIfUnmodifiedSince: o.SourceAccessConditions.SourceModifiedAccessConditions.SourceIfUnmodifiedSince, + } + return leaseAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions, createOpts + } + } + return leaseAccessConditions, modifiedAccessConditions, nil, createOpts +} + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. +type GetPropertiesOptions struct { + AccessConditions *AccessConditions + CPKInfo *CPKInfo +} + +func FormatGetPropertiesOptions(o *GetPropertiesOptions) *blob.GetPropertiesOptions { + if o == nil { + return nil + } + accessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + if o.CPKInfo == nil { + o.CPKInfo = &CPKInfo{} + } + return &blob.GetPropertiesOptions{ + AccessConditions: accessConditions, + CPKInfo: &blob.CPKInfo{ + EncryptionKey: o.CPKInfo.EncryptionKey, + EncryptionAlgorithm: (*blob.EncryptionAlgorithmType)(o.CPKInfo.EncryptionAlgorithm), + EncryptionKeySHA256: o.CPKInfo.EncryptionKeySHA256, + }, + } +} + +// ===================================== PATH IMPORTS =========================================== + +// SetAccessControlOptions contains the optional parameters when calling the SetAccessControl operation. +type SetAccessControlOptions struct { + // Owner is the owner of the path. + Owner *string + // Group is the owning group of the path. + Group *string + // ACL is the access control list for the path. + ACL *string + // Permissions is the octal representation of the permissions for user, group and mask. + Permissions *string + // AccessConditions contains parameters for accessing the path. + AccessConditions *AccessConditions +} + +func FormatSetAccessControlOptions(o *SetAccessControlOptions) (*generated.PathClientSetAccessControlOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, error) { + if o == nil { + return nil, nil, nil, datalakeerror.MissingParameters + } + // call path formatter since we're hitting dfs in this operation + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) + if o.Owner == nil && o.Group == nil && o.ACL == nil && o.Permissions == nil { + return nil, nil, nil, errors.New("at least one parameter should be set for SetAccessControl API") + } + return &generated.PathClientSetAccessControlOptions{ + Owner: o.Owner, + Group: o.Group, + ACL: o.ACL, + Permissions: o.Permissions, + }, leaseAccessConditions, modifiedAccessConditions, nil +} + +// GetAccessControlOptions contains the optional parameters when calling the GetAccessControl operation. +type GetAccessControlOptions struct { + // UPN is the user principal name. + UPN *bool + // AccessConditions contains parameters for accessing the path. + AccessConditions *AccessConditions +} + +func FormatGetAccessControlOptions(o *GetAccessControlOptions) (*generated.PathClientGetPropertiesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + action := generated.PathGetPropertiesActionGetAccessControl + if o == nil { + return &generated.PathClientGetPropertiesOptions{ + Action: &action, + }, nil, nil + } + // call path formatter since we're hitting dfs in this operation + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) + return &generated.PathClientGetPropertiesOptions{ + Upn: o.UPN, + Action: &action, + }, leaseAccessConditions, modifiedAccessConditions +} + +// CPKInfo contains CPK related information. +type CPKInfo struct { + // EncryptionAlgorithm is the algorithm used to encrypt the data. + EncryptionAlgorithm *EncryptionAlgorithmType + // EncryptionKey is the base64 encoded encryption key. + EncryptionKey *string + // EncryptionKeySHA256 is the base64 encoded SHA256 of the encryption key. + EncryptionKeySHA256 *string +} + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + // StartTime is the start time for this SAS token. + StartTime *time.Time +} + +func FormatGetSASURLOptions(o *GetSASURLOptions) time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method. +type SetHTTPHeadersOptions struct { + // AccessConditions contains parameters for accessing the path. + AccessConditions *AccessConditions +} + +func FormatSetHTTPHeadersOptions(o *SetHTTPHeadersOptions, httpHeaders HTTPHeaders) (*blob.SetHTTPHeadersOptions, blob.HTTPHeaders) { + httpHeaderOpts := blob.HTTPHeaders{ + BlobCacheControl: httpHeaders.CacheControl, + BlobContentDisposition: httpHeaders.ContentDisposition, + BlobContentEncoding: httpHeaders.ContentEncoding, + BlobContentLanguage: httpHeaders.ContentLanguage, + BlobContentMD5: httpHeaders.ContentMD5, + BlobContentType: httpHeaders.ContentType, + } + if o == nil { + return nil, httpHeaderOpts + } + accessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &blob.SetHTTPHeadersOptions{ + AccessConditions: accessConditions, + }, httpHeaderOpts +} + +// HTTPHeaders contains the HTTP headers for path operations. +type HTTPHeaders struct { + // CacheControl Sets the path's cache control. If specified, this property is stored with the path and returned with a read request. + CacheControl *string + // ContentDisposition Sets the path's Content-Disposition header. + ContentDisposition *string + // ContentEncoding Sets the path's content encoding. If specified, this property is stored with the path and returned with a read + // request. + ContentEncoding *string + // ContentLanguage Set the path's content language. If specified, this property is stored with the path and returned with a read + // request. + ContentLanguage *string + // ContentMD5 Specify the transactional md5 for the body, to be validated by the service. + ContentMD5 []byte + // ContentType Sets the path's content type. If specified, this property is stored with the path and returned with a read request. + ContentType *string +} + +func FormatBlobHTTPHeaders(o *HTTPHeaders) *blob.HTTPHeaders { + + opts := &blob.HTTPHeaders{ + BlobCacheControl: o.CacheControl, + BlobContentDisposition: o.ContentDisposition, + BlobContentEncoding: o.ContentEncoding, + BlobContentLanguage: o.ContentLanguage, + BlobContentMD5: o.ContentMD5, + BlobContentType: o.ContentType, + } + return opts +} + +func FormatPathHTTPHeaders(o *HTTPHeaders) *generated.PathHTTPHeaders { + if o == nil { + return nil + } + opts := generated.PathHTTPHeaders{ + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + ContentMD5: o.ContentMD5, + ContentType: o.ContentType, + TransactionalContentHash: o.ContentMD5, + } + return &opts +} + +// SetMetadataOptions provides set of configurations for Set Metadata on path operation +type SetMetadataOptions struct { + // AccessConditions contains parameters for accessing the path. + AccessConditions *AccessConditions + // CPKInfo contains CPK related information. + CPKInfo *CPKInfo + // CPKScopeInfo specifies the encryption scope settings. + CPKScopeInfo *CPKScopeInfo +} + +func FormatSetMetadataOptions(o *SetMetadataOptions) *blob.SetMetadataOptions { + if o == nil { + return nil + } + accessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + opts := &blob.SetMetadataOptions{ + AccessConditions: accessConditions, + } + if o.CPKInfo != nil { + opts.CPKInfo = &blob.CPKInfo{ + EncryptionKey: o.CPKInfo.EncryptionKey, + EncryptionAlgorithm: (*blob.EncryptionAlgorithmType)(o.CPKInfo.EncryptionAlgorithm), + EncryptionKeySHA256: o.CPKInfo.EncryptionKeySHA256, + } + } + if o.CPKScopeInfo != nil { + opts.CPKScopeInfo = o.CPKScopeInfo + } + return opts +} + +// ========================================= constants ========================================= + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// AccessConditions identifies access conditions which you optionally set. +type AccessConditions = exported.AccessConditions + +// SourceAccessConditions identifies source access conditions which you optionally set. +type SourceAccessConditions = exported.SourceAccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions + +// SourceModifiedAccessConditions contains a group of parameters for specifying access conditions. +type SourceModifiedAccessConditions = exported.SourceModifiedAccessConditions + +// CPKScopeInfo contains a group of parameters for the Client.SetMetadata() method. +type CPKScopeInfo = blob.CPKScopeInfo + +// ACLFailedEntry contains the failed ACL entry (response model). +type ACLFailedEntry = generated.ACLFailedEntry diff --git a/sdk/storage/azdatalake/internal/path/responses.go b/sdk/storage/azdatalake/internal/path/responses.go new file mode 100644 index 000000000000..1cf49ce01acf --- /dev/null +++ b/sdk/storage/azdatalake/internal/path/responses.go @@ -0,0 +1,325 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package path + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "net/http" + "time" +) + +// SetAccessControlResponse contains the response fields for the SetAccessControl operation. +type SetAccessControlResponse = generated.PathClientSetAccessControlResponse + +// GetAccessControlResponse contains the response fields for the GetAccessControl operation. +type GetAccessControlResponse = generated.PathClientGetPropertiesResponse + +// UpdateAccessControlResponse contains the response fields for the UpdateAccessControlRecursive operation. +type UpdateAccessControlResponse = generated.PathClientSetAccessControlRecursiveResponse + +// RemoveAccessControlResponse contains the response fields for the RemoveAccessControlRecursive operation. +type RemoveAccessControlResponse = generated.PathClientSetAccessControlRecursiveResponse + +// CreateResponse contains the response fields for the Create operation. +type CreateResponse = generated.PathClientCreateResponse + +// DeleteResponse contains the response fields for the Delete operation. +type DeleteResponse = generated.PathClientDeleteResponse + +type RenameResponse struct { + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // Continuation contains the information returned from the x-ms-continuation header response. + Continuation *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// We need to do this now in case we add the new client to renamed response - we don't want to break the cx + +func FormatRenameResponse(createResp *CreateResponse) RenameResponse { + newResp := RenameResponse{} + newResp.ContentLength = createResp.ContentLength + newResp.Continuation = createResp.Continuation + newResp.Date = createResp.Date + newResp.ETag = createResp.ETag + newResp.EncryptionKeySHA256 = createResp.EncryptionKeySHA256 + newResp.IsServerEncrypted = createResp.IsServerEncrypted + newResp.LastModified = createResp.LastModified + newResp.RequestID = createResp.RequestID + newResp.Version = createResp.Version + return newResp +} + +// removed BlobSequenceNumber, BlobCommittedBlockCount and BlobType headers from the original response: + +// GetPropertiesResponse contains the response fields for the GetProperties operation. +type GetPropertiesResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // AccessTier contains the information returned from the x-ms-access-tier header response. + AccessTier *string + + // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response. + AccessTierChangeTime *time.Time + + // AccessTierInferred contains the information returned from the x-ms-access-tier-inferred header response. + AccessTierInferred *bool + + // ArchiveStatus contains the information returned from the x-ms-archive-status header response. + ArchiveStatus *string + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DestinationSnapshot contains the information returned from the x-ms-copy-destination-snapshot header response. + DestinationSnapshot *string + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ExpiresOn contains the information returned from the x-ms-expiry-time header response. + ExpiresOn *time.Time + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsIncrementalCopy contains the information returned from the x-ms-incremental-copy header response. + IsIncrementalCopy *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *DurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *StateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *StatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]*string + + // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response. + RehydratePriority *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string + + // Owner contains the information returned from the x-ms-owner header response. + Owner *string + + // Group contains the information returned from the x-ms-group header response. + Group *string + + // Permissions contains the information returned from the x-ms-permissions header response. + Permissions *string +} + +func FormatGetPropertiesResponse(r *blob.GetPropertiesResponse, rawResponse *http.Response) GetPropertiesResponse { + newResp := GetPropertiesResponse{} + newResp.AcceptRanges = r.AcceptRanges + newResp.AccessTier = r.AccessTier + newResp.AccessTierChangeTime = r.AccessTierChangeTime + newResp.AccessTierInferred = r.AccessTierInferred + newResp.ArchiveStatus = r.ArchiveStatus + newResp.CacheControl = r.CacheControl + newResp.ClientRequestID = r.ClientRequestID + newResp.ContentDisposition = r.ContentDisposition + newResp.ContentEncoding = r.ContentEncoding + newResp.ContentLanguage = r.ContentLanguage + newResp.ContentLength = r.ContentLength + newResp.ContentMD5 = r.ContentMD5 + newResp.ContentType = r.ContentType + newResp.CopyCompletionTime = r.CopyCompletionTime + newResp.CopyID = r.CopyID + newResp.CopyProgress = r.CopyProgress + newResp.CopySource = r.CopySource + newResp.CopyStatus = r.CopyStatus + newResp.CopyStatusDescription = r.CopyStatusDescription + newResp.CreationTime = r.CreationTime + newResp.Date = r.Date + newResp.DestinationSnapshot = r.DestinationSnapshot + newResp.ETag = r.ETag + newResp.EncryptionKeySHA256 = r.EncryptionKeySHA256 + newResp.EncryptionScope = r.EncryptionScope + newResp.ExpiresOn = r.ExpiresOn + newResp.ImmutabilityPolicyExpiresOn = r.ImmutabilityPolicyExpiresOn + newResp.ImmutabilityPolicyMode = r.ImmutabilityPolicyMode + newResp.IsCurrentVersion = r.IsCurrentVersion + newResp.IsIncrementalCopy = r.IsIncrementalCopy + newResp.IsSealed = r.IsSealed + newResp.IsServerEncrypted = r.IsServerEncrypted + newResp.LastAccessed = r.LastAccessed + newResp.LastModified = r.LastModified + newResp.LeaseDuration = r.LeaseDuration + newResp.LeaseState = r.LeaseState + newResp.LeaseStatus = r.LeaseStatus + newResp.LegalHold = r.LegalHold + newResp.Metadata = r.Metadata + newResp.ObjectReplicationPolicyID = r.ObjectReplicationPolicyID + newResp.ObjectReplicationRules = r.ObjectReplicationRules + newResp.RehydratePriority = r.RehydratePriority + newResp.RequestID = r.RequestID + newResp.TagCount = r.TagCount + newResp.Version = r.Version + newResp.VersionID = r.VersionID + if val := rawResponse.Header.Get("x-ms-owner"); val != "" { + newResp.Owner = &val + } + if val := rawResponse.Header.Get("x-ms-group"); val != "" { + newResp.Group = &val + } + if val := rawResponse.Header.Get("x-ms-permissions"); val != "" { + newResp.Permissions = &val + } + return newResp +} + +// SetMetadataResponse contains the response fields for the SetMetadata operation. +type SetMetadataResponse = blob.SetMetadataResponse + +// SetHTTPHeadersResponse contains the response from method Client.SetHTTPHeaders. +type SetHTTPHeadersResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// removes blob sequence number from response + +func FormatSetHTTPHeadersResponse(r *SetHTTPHeadersResponse, blobResp *blob.SetHTTPHeadersResponse) { + r.ClientRequestID = blobResp.ClientRequestID + r.Date = blobResp.Date + r.ETag = blobResp.ETag + r.LastModified = blobResp.LastModified + r.RequestID = blobResp.RequestID + r.Version = blobResp.Version +} diff --git a/sdk/storage/azdatalake/internal/shared/batch_transfer.go b/sdk/storage/azdatalake/internal/shared/batch_transfer.go new file mode 100644 index 000000000000..ec5541bfbb13 --- /dev/null +++ b/sdk/storage/azdatalake/internal/shared/batch_transfer.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "context" + "errors" +) + +// BatchTransferOptions identifies options used by doBatchTransfer. +type BatchTransferOptions struct { + TransferSize int64 + ChunkSize int64 + Concurrency uint16 + Operation func(ctx context.Context, offset int64, chunkSize int64) error + OperationName string +} + +// DoBatchTransfer helps to execute operations in a batch manner. +// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) +func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { + if o.ChunkSize == 0 { + return errors.New("ChunkSize cannot be 0") + } + + if o.Concurrency == 0 { + o.Concurrency = 5 // default concurrency + } + + // Prepare and do parallel operations. + numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) + operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently + operationResponseChannel := make(chan error, numChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + for g := uint16(0); g < o.Concurrency; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + err := f() + operationResponseChannel <- err + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + curChunkSize := o.ChunkSize + + if chunkNum == numChunks-1 { // Last chunk + curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.ChunkSize + operationChannel <- func() error { + return o.Operation(ctx, offset, curChunkSize) + } + } + close(operationChannel) + + // Wait for the operations to complete. + var firstErr error = nil + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + responseError := <-operationResponseChannel + // record the first error (the original error which should cause the other chunks to fail with canceled context) + if responseError != nil && firstErr == nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + firstErr = responseError + } + } + return firstErr +} diff --git a/sdk/storage/azdatalake/internal/shared/bytes_writer.go b/sdk/storage/azdatalake/internal/shared/bytes_writer.go new file mode 100644 index 000000000000..8d4d35bdeffd --- /dev/null +++ b/sdk/storage/azdatalake/internal/shared/bytes_writer.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" +) + +type bytesWriter []byte + +func NewBytesWriter(b []byte) bytesWriter { + return b +} + +func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { + if off >= int64(len(c)) || off < 0 { + return 0, errors.New("offset value is out of range") + } + + n := copy(c[int(off):], b) + if n < len(b) { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/sdk/storage/azdatalake/internal/shared/section_writer.go b/sdk/storage/azdatalake/internal/shared/section_writer.go new file mode 100644 index 000000000000..c8528a2e3ed2 --- /dev/null +++ b/sdk/storage/azdatalake/internal/shared/section_writer.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "io" +) + +type SectionWriter struct { + Count int64 + Offset int64 + Position int64 + WriterAt io.WriterAt +} + +func NewSectionWriter(c io.WriterAt, off int64, count int64) *SectionWriter { + return &SectionWriter{ + Count: count, + Offset: off, + WriterAt: c, + } +} + +func (c *SectionWriter) Write(p []byte) (int, error) { + remaining := c.Count - c.Position + + if remaining <= 0 { + return 0, errors.New("end of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.WriterAt.WriteAt(slice, c.Offset+c.Position) + c.Position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/sdk/storage/azdatalake/internal/shared/shared.go b/sdk/storage/azdatalake/internal/shared/shared.go new file mode 100644 index 000000000000..f6ae4ea2fe66 --- /dev/null +++ b/sdk/storage/azdatalake/internal/shared/shared.go @@ -0,0 +1,249 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "fmt" + "hash/crc64" + "io" + "net" + "net/url" + "strconv" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +const ( + TokenScope = "https://storage.azure.com/.default" +) + +const ( + ServiceClient = "azdatalake/service.Client" + FileSystemClient = "azdatalake/filesystem.Client" + DirectoryClient = "azdatalake/directory.Client" + FileClient = "azdatalake/file.Client" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderXmsDate = "x-ms-date" + HeaderContentLength = "Content-Length" + HeaderContentEncoding = "Content-Encoding" + HeaderContentLanguage = "Content-Language" + HeaderContentType = "Content-Type" + HeaderContentMD5 = "Content-MD5" + HeaderIfModifiedSince = "If-Modified-Since" + HeaderIfMatch = "If-Match" + HeaderIfNoneMatch = "If-None-Match" + HeaderIfUnmodifiedSince = "If-Unmodified-Since" + HeaderRange = "Range" + HeaderXmsVersion = "x-ms-version" + HeaderXmsRequestID = "x-ms-request-id" +) + +const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 + +var CRC64Table = crc64.MakeTable(crc64Polynomial) + +// CopyOptions returns a zero-value T if opts is nil. +// If opts is not nil, a copy is made and its address returned. +func CopyOptions[T any](opts *T) *T { + if opts == nil { + return new(T) + } + cp := *opts + return &cp +} + +var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + + "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + + "AccountKey=;EndpointSuffix=core.windows.net'") + +type ParsedConnectionString struct { + ServiceURL string + AccountName string + AccountKey string +} + +func GetURLs(url string) (string, string) { + blobURL := strings.Replace(url, ".dfs.", ".blob.", 1) + dfsURL := strings.Replace(url, ".blob.", ".dfs.", 1) + return blobURL, dfsURL +} + +func ParseConnectionString(connectionString string) (ParsedConnectionString, error) { + const ( + defaultScheme = "https" + defaultSuffix = "core.windows.net" + ) + + connStrMap := make(map[string]string) + connectionString = strings.TrimRight(connectionString, ";") + + splitString := strings.Split(connectionString, ";") + if len(splitString) == 0 { + return ParsedConnectionString{}, errConnectionString + } + for _, stringPart := range splitString { + parts := strings.SplitN(stringPart, "=", 2) + if len(parts) != 2 { + return ParsedConnectionString{}, errConnectionString + } + connStrMap[parts[0]] = parts[1] + } + + protocol, ok := connStrMap["DefaultEndpointsProtocol"] + if !ok { + protocol = defaultScheme + } + + suffix, ok := connStrMap["EndpointSuffix"] + if !ok { + suffix = defaultSuffix + } + + blobEndpoint, has_blobEndpoint := connStrMap["BlobEndpoint"] + accountName, has_accountName := connStrMap["AccountName"] + + var serviceURL string + if has_blobEndpoint { + serviceURL = blobEndpoint + } else if has_accountName { + serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix) + } else { + return ParsedConnectionString{}, errors.New("connection string needs either AccountName or BlobEndpoint") + } + + if !strings.HasSuffix(serviceURL, "/") { + // add a trailing slash to be consistent with the portal + serviceURL += "/" + } + + accountKey, has_accountKey := connStrMap["AccountKey"] + sharedAccessSignature, has_sharedAccessSignature := connStrMap["SharedAccessSignature"] + + if has_accountName && has_accountKey { + return ParsedConnectionString{ + ServiceURL: serviceURL, + AccountName: accountName, + AccountKey: accountKey, + }, nil + } else if has_sharedAccessSignature { + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v?%v", serviceURL, sharedAccessSignature), + }, nil + } else { + return ParsedConnectionString{}, errors.New("connection string needs either AccountKey or SharedAccessSignature") + } + +} + +func SerializeBlobTagsToStrPtr(tagsMap map[string]string) *string { + if len(tagsMap) == 0 { + return nil + } + tags := make([]string, 0) + for key, val := range tagsMap { + tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) + } + blobTagsString := strings.Join(tags, "&") + return &blobTagsString +} + +func ValidateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body's are "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} + +func RangeToString(offset, count int64) string { + return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10) +} + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +func GenerateLeaseID(leaseID *string) (*string, error) { + if leaseID == nil { + generatedUuid, err := uuid.New() + if err != nil { + return nil, err + } + leaseID = to.Ptr(generatedUuid.String()) + } + return leaseID, nil +} + +func GetClientOptions[T any](o *T) *T { + if o == nil { + return new(T) + } + return o +} + +// IsIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/container/... +// As url's Host property, host could be both host or host:port +func IsIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} diff --git a/sdk/storage/azdatalake/internal/testcommon/clients_auth.go b/sdk/storage/azdatalake/internal/testcommon/clients_auth.go new file mode 100644 index 000000000000..acc034183de4 --- /dev/null +++ b/sdk/storage/azdatalake/internal/testcommon/clients_auth.go @@ -0,0 +1,247 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package testcommon + +import ( + "context" + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/service" + "github.com/stretchr/testify/require" + "testing" +) + +const ( + DefaultEndpointSuffix = "core.windows.net/" + DefaultBlobEndpointSuffix = "blob.core.windows.net/" + AccountNameEnvVar = "AZURE_STORAGE_ACCOUNT_NAME" + AccountKeyEnvVar = "AZURE_STORAGE_ACCOUNT_KEY" + DefaultEndpointSuffixEnvVar = "AZURE_STORAGE_ENDPOINT_SUFFIX" + SubscriptionID = "SUBSCRIPTION_ID" + ResourceGroupName = "RESOURCE_GROUP_NAME" +) + +const ( + FakeStorageAccount = "fakestorage" + FakeBlobStorageURL = "https://fakestorage.blob.core.windows.net" + FakeDFSStorageURL = "https://fakestorage.dfs.core.windows.net" + FakeToken = "faketoken" +) + +var BasicMetadata = map[string]*string{"Foo": to.Ptr("bar")} + +var ( + DatalakeContentType = "my_type" + DatalakeContentDisposition = "my_disposition" + DatalakeCacheControl = "control" + DatalakeContentLanguage = "my_language" + DatalakeContentEncoding = "my_encoding" +) + +var BasicHeaders = file.HTTPHeaders{ + ContentType: &DatalakeContentType, + ContentDisposition: &DatalakeContentDisposition, + CacheControl: &DatalakeCacheControl, + ContentMD5: nil, + ContentLanguage: &DatalakeContentLanguage, + ContentEncoding: &DatalakeContentEncoding, +} + +type TestAccountType string + +const ( + TestAccountDefault TestAccountType = "" + TestAccountSecondary TestAccountType = "SECONDARY_" + TestAccountPremium TestAccountType = "PREMIUM_" + TestAccountSoftDelete TestAccountType = "SOFT_DELETE_" + TestAccountDatalake TestAccountType = "DATALAKE_" + TestAccountImmutable TestAccountType = "IMMUTABLE_" +) + +func SetClientOptions(t *testing.T, opts *azcore.ClientOptions) { + opts.Logging.AllowedHeaders = append(opts.Logging.AllowedHeaders, "X-Request-Mismatch", "X-Request-Mismatch-Error") + + transport, err := recording.NewRecordingHTTPClient(t, nil) + require.NoError(t, err) + opts.Transport = transport +} + +func GetGenericAccountInfo(accountType TestAccountType) (string, string) { + if recording.GetRecordMode() == recording.PlaybackMode { + return FakeStorageAccount, "ZmFrZQ==" + } + accountNameEnvVar := string(accountType) + AccountNameEnvVar + accountKeyEnvVar := string(accountType) + AccountKeyEnvVar + accountName, _ := GetRequiredEnv(accountNameEnvVar) + accountKey, _ := GetRequiredEnv(accountKeyEnvVar) + return accountName, accountKey +} + +func GetGenericSharedKeyCredential(accountType TestAccountType) (*azdatalake.SharedKeyCredential, error) { + accountName, accountKey := GetGenericAccountInfo(accountType) + if accountName == "" || accountKey == "" { + return nil, errors.New(string(accountType) + AccountNameEnvVar + " and/or " + string(accountType) + AccountKeyEnvVar + " environment variables not specified.") + } + return azdatalake.NewSharedKeyCredential(accountName, accountKey) +} + +func GetServiceClient(t *testing.T, accountType TestAccountType, options *service.ClientOptions) (*service.Client, error) { + if options == nil { + options = &service.ClientOptions{} + } + + SetClientOptions(t, &options.ClientOptions) + + cred, err := GetGenericSharedKeyCredential(accountType) + if err != nil { + return nil, err + } + + serviceClient, err := service.NewClientWithSharedKeyCredential("https://"+cred.AccountName()+".dfs.core.windows.net/", cred, options) + + return serviceClient, err +} + +func GetFileSystemClient(fsName string, t *testing.T, accountType TestAccountType, options *filesystem.ClientOptions) (*filesystem.Client, error) { + if options == nil { + options = &filesystem.ClientOptions{} + } + + SetClientOptions(t, &options.ClientOptions) + + cred, err := GetGenericSharedKeyCredential(accountType) + if err != nil { + return nil, err + } + + filesystemClient, err := filesystem.NewClientWithSharedKeyCredential("https://"+cred.AccountName()+".dfs.core.windows.net/"+fsName, cred, options) + + return filesystemClient, err +} + +func GetFileClient(fsName, fName string, t *testing.T, accountType TestAccountType, options *file.ClientOptions) (*file.Client, error) { + if options == nil { + options = &file.ClientOptions{} + } + + SetClientOptions(t, &options.ClientOptions) + + cred, err := GetGenericSharedKeyCredential(accountType) + if err != nil { + return nil, err + } + + fileClient, err := file.NewClientWithSharedKeyCredential("https://"+cred.AccountName()+".dfs.core.windows.net/"+fsName+"/"+fName, cred, options) + + return fileClient, err +} + +func CreateNewFile(ctx context.Context, _require *require.Assertions, fileName string, filesystemClient *filesystem.Client) *file.Client { + fileClient := filesystemClient.NewFileClient(fileName) + _, err := fileClient.Create(ctx, nil) + _require.Nil(err) + return fileClient +} + +func CreateNewDir(ctx context.Context, _require *require.Assertions, dirName string, filesystemClient *filesystem.Client) *directory.Client { + dirClient := filesystemClient.NewDirectoryClient(dirName) + _, err := dirClient.Create(ctx, nil) + _require.Nil(err) + return dirClient +} + +func GetDirClient(fsName, dirName string, t *testing.T, accountType TestAccountType, options *directory.ClientOptions) (*directory.Client, error) { + if options == nil { + options = &directory.ClientOptions{} + } + + SetClientOptions(t, &options.ClientOptions) + + cred, err := GetGenericSharedKeyCredential(accountType) + if err != nil { + return nil, err + } + + dirClient, err := directory.NewClientWithSharedKeyCredential("https://"+cred.AccountName()+".dfs.core.windows.net/"+fsName+"/"+dirName, cred, options) + + return dirClient, err +} + +func ServiceGetFileSystemClient(filesystemName string, s *service.Client) *filesystem.Client { + return s.NewFileSystemClient(filesystemName) +} + +func DeleteFileSystem(ctx context.Context, _require *require.Assertions, filesystemClient *filesystem.Client) { + _, err := filesystemClient.Delete(ctx, nil) + _require.Nil(err) +} + +func DeleteFile(ctx context.Context, _require *require.Assertions, fileClient *file.Client) { + _, err := fileClient.Delete(ctx, nil) + _require.Nil(err) +} + +func DeleteDir(ctx context.Context, _require *require.Assertions, dirClient *directory.Client) { + _, err := dirClient.Delete(ctx, nil) + _require.Nil(err) +} + +func GetGenericConnectionString(accountType TestAccountType) (*string, error) { + accountName, accountKey := GetGenericAccountInfo(accountType) + if accountName == "" || accountKey == "" { + return nil, errors.New(string(accountType) + AccountNameEnvVar + " and/or " + string(accountType) + AccountKeyEnvVar + " environment variables not specified.") + } + connectionString := fmt.Sprintf("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=core.windows.net/", + accountName, accountKey) + return &connectionString, nil +} + +func CreateNewFileSystem(ctx context.Context, _require *require.Assertions, filesystemName string, serviceClient *service.Client) *filesystem.Client { + fsClient := ServiceGetFileSystemClient(filesystemName, serviceClient) + + _, err := fsClient.Create(ctx, nil) + _require.Nil(err) + // _require.Equal(cResp.RawResponse.StatusCode, 201) + return fsClient +} + +func GetServiceClientFromConnectionString(t *testing.T, accountType TestAccountType, options *service.ClientOptions) (*service.Client, error) { + if options == nil { + options = &service.ClientOptions{} + } + SetClientOptions(t, &options.ClientOptions) + + transport, err := recording.NewRecordingHTTPClient(t, nil) + require.NoError(t, err) + options.Transport = transport + + cred, err := GetGenericConnectionString(accountType) + if err != nil { + return nil, err + } + svcClient, err := service.NewClientFromConnectionString(*cred, options) + return svcClient, err +} + +func GetServiceClientNoCredential(t *testing.T, sasUrl string, options *service.ClientOptions) (*service.Client, error) { + if options == nil { + options = &service.ClientOptions{} + } + + SetClientOptions(t, &options.ClientOptions) + + serviceClient, err := service.NewClientWithNoCredential(sasUrl, options) + + return serviceClient, err +} diff --git a/sdk/storage/azdatalake/internal/testcommon/common.go b/sdk/storage/azdatalake/internal/testcommon/common.go new file mode 100644 index 000000000000..8c7fb0923b7f --- /dev/null +++ b/sdk/storage/azdatalake/internal/testcommon/common.go @@ -0,0 +1,113 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package testcommon + +import ( + "bytes" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror" + "github.com/stretchr/testify/require" + "io" + "os" + "strings" + "testing" + "time" +) + +const ( + FileSystemPrefix = "gofs" + FilePrefix = "gotestfile" + DirPrefix = "gotestdir" + DefaultData = "Godatalakedata" + InvalidHeaderErrorSubstring = "invalid header field" // error thrown by the http client +) + +func GenerateFileSystemName(testName string) string { + return FileSystemPrefix + GenerateEntityName(testName) +} + +func GenerateFileName(testName string) string { + return FilePrefix + GenerateEntityName(testName) +} + +func GenerateDirName(testName string) string { + return DirPrefix + GenerateEntityName(testName) +} + +func GenerateEntityName(testName string) string { + return strings.ReplaceAll(strings.ReplaceAll(strings.ToLower(testName), "/", ""), "test", "") +} + +func BeforeTest(t *testing.T, suite string, test string) { + const blobURLRegex = `https://\S+\.blob\.core\.windows\.net` + const dfsURLRegex = `https://\S+\.dfs\.core\.windows\.net` + const tokenRegex = `(?:Bearer\s).*` + + require.NoError(t, recording.AddURISanitizer(FakeBlobStorageURL, blobURLRegex, nil)) + require.NoError(t, recording.AddURISanitizer(FakeDFSStorageURL, dfsURLRegex, nil)) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-copy-source", FakeBlobStorageURL, blobURLRegex, nil)) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-copy-source", FakeDFSStorageURL, dfsURLRegex, nil)) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-copy-source-authorization", FakeToken, tokenRegex, nil)) + // we freeze request IDs and timestamps to avoid creating noisy diffs + // NOTE: we can't freeze time stamps as that breaks some tests that use if-modified-since etc (maybe it can be fixed?) + //testframework.AddHeaderRegexSanitizer("X-Ms-Date", "Wed, 10 Aug 2022 23:34:14 GMT", "", nil) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-request-id", "00000000-0000-0000-0000-000000000000", "", nil)) + //testframework.AddHeaderRegexSanitizer("Date", "Wed, 10 Aug 2022 23:34:14 GMT", "", nil) + // TODO: more freezing + //testframework.AddBodyRegexSanitizer("RequestId:00000000-0000-0000-0000-000000000000", `RequestId:\w{8}-\w{4}-\w{4}-\w{4}-\w{12}`, nil) + //testframework.AddBodyRegexSanitizer("Time:2022-08-11T00:21:56.4562741Z", `Time:\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:\.\d*)?Z`, nil) + require.NoError(t, recording.Start(t, "sdk/storage/azdatalake/testdata", nil)) +} + +func AfterTest(t *testing.T, suite string, test string) { + require.NoError(t, recording.Stop(t, nil)) +} + +// GetRequiredEnv gets an environment variable by name and returns an error if it is not found +func GetRequiredEnv(name string) (string, error) { + env, ok := os.LookupEnv(name) + if ok { + return env, nil + } else { + return "", errors.New("Required environment variable not set: " + name) + } +} + +func ValidateErrorCode(_require *require.Assertions, err error, code datalakeerror.StorageErrorCode) { + _require.NotNil(err) + var responseErr *azcore.ResponseError + errors.As(err, &responseErr) + if responseErr != nil { + _require.Equal(string(code), responseErr.ErrorCode) + } else { + _require.Contains(err.Error(), code) + } +} + +func GetRelativeTimeFromAnchor(anchorTime *time.Time, amount time.Duration) time.Time { + return anchorTime.Add(amount * time.Second) +} + +const random64BString string = "2SDgZj6RkKYzJpu04sweQek4uWHO8ndPnYlZ0tnFS61hjnFZ5IkvIGGY44eKABov" + +func GenerateData(sizeInBytes int) (io.ReadSeekCloser, []byte) { + data := make([]byte, sizeInBytes) + _len := len(random64BString) + if sizeInBytes > _len { + count := sizeInBytes / _len + if sizeInBytes%_len != 0 { + count = count + 1 + } + copy(data[:], strings.Repeat(random64BString, count)) + } else { + copy(data[:], random64BString) + } + return streaming.NopCloser(bytes.NewReader(data)), data +} diff --git a/sdk/storage/azdatalake/lease/client.go b/sdk/storage/azdatalake/lease/client.go new file mode 100644 index 000000000000..8e33db303761 --- /dev/null +++ b/sdk/storage/azdatalake/lease/client.go @@ -0,0 +1,91 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" +) + +// FileSystemClient provides lease functionality for the underlying filesystem client. +type FileSystemClient struct { + containerClient *lease.ContainerClient + leaseID *string +} + +// FileSystemClientOptions contains the optional values when creating a FileSystemClient. +type FileSystemClientOptions = lease.ContainerClientOptions + +// NewFileSystemClient creates a filesystem lease client for the provided filesystem client. +// - client - an instance of a filesystem client +// - options - client options; pass nil to accept the default values +func NewFileSystemClient(client *filesystem.Client, options *FileSystemClientOptions) (*FileSystemClient, error) { + _, _, containerClient := base.InnerClients((*base.CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client])(client)) + containerLeaseClient, err := lease.NewContainerClient(containerClient, options) + if err != nil { + return nil, exported.ConvertToDFSError(err) + } + return &FileSystemClient{ + containerClient: containerLeaseClient, + leaseID: containerLeaseClient.LeaseID(), + }, nil +} + +// LeaseID returns leaseID of the client. +func (c *FileSystemClient) LeaseID() *string { + return c.leaseID +} + +// AcquireLease acquires a lease on the filesystem for write and delete operations. +// The lease Duration must be between 15 and 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (c *FileSystemClient) AcquireLease(ctx context.Context, duration int32, o *FileSystemAcquireOptions) (FileSystemAcquireResponse, error) { + opts := o.format() + resp, err := c.containerClient.AcquireLease(ctx, duration, opts) + return resp, exported.ConvertToDFSError(err) +} + +// BreakLease breaks the filesystem's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) +// constant to break a fixed-Duration lease when it expires or an infinite lease immediately. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (c *FileSystemClient) BreakLease(ctx context.Context, o *FileSystemBreakOptions) (FileSystemBreakResponse, error) { + opts := o.format() + resp, err := c.containerClient.BreakLease(ctx, opts) + return resp, exported.ConvertToDFSError(err) +} + +// ChangeLease changes the filesystem's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (c *FileSystemClient) ChangeLease(ctx context.Context, proposedLeaseID string, o *FileSystemChangeOptions) (FileSystemChangeResponse, error) { + opts := o.format() + resp, err := c.containerClient.ChangeLease(ctx, proposedLeaseID, opts) + if err != nil { + return resp, exported.ConvertToDFSError(err) + } + c.leaseID = &proposedLeaseID + return resp, nil +} + +// RenewLease renews the filesystem's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (c *FileSystemClient) RenewLease(ctx context.Context, o *FileSystemRenewOptions) (FileSystemRenewResponse, error) { + opts := o.format() + resp, err := c.containerClient.RenewLease(ctx, opts) + return resp, exported.ConvertToDFSError(err) +} + +// ReleaseLease releases the filesystem's previously-acquired lease. +func (c *FileSystemClient) ReleaseLease(ctx context.Context, o *FileSystemReleaseOptions) (FileSystemReleaseResponse, error) { + opts := o.format() + resp, err := c.containerClient.ReleaseLease(ctx, opts) + return resp, exported.ConvertToDFSError(err) +} diff --git a/sdk/storage/azdatalake/lease/client_test.go b/sdk/storage/azdatalake/lease/client_test.go new file mode 100644 index 000000000000..9095690f1d15 --- /dev/null +++ b/sdk/storage/azdatalake/lease/client_test.go @@ -0,0 +1,560 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease_test + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/lease" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running lease Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &LeaseRecordedTestsSuite{}) + suite.Run(t, &LeaseUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &LeaseRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &LeaseRecordedTestsSuite{}) + } +} + +func (s *LeaseRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(s.T(), suite, test) +} + +func (s *LeaseRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(s.T(), suite, test) +} + +func (s *LeaseUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (s *LeaseUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type LeaseRecordedTestsSuite struct { + suite.Suite +} + +type LeaseUnrecordedTestsSuite struct { + suite.Suite +} + +// var headersToIgnoreForLease = []string {"X-Ms-Proposed-Lease-Id", "X-Ms-Lease-Id"} +var proposedLeaseIDs = []*string{to.Ptr("c820a799-76d7-4ee2-6e15-546f19325c2c"), to.Ptr("326cc5e1-746e-4af8-4811-a50e6629a8ca")} + +func (s *LeaseRecordedTestsSuite) TestFilesystemAcquireLease() { + _require := require.New(s.T()) + testName := s.T().Name() + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + filesystemLeaseClient, _ := lease.NewFileSystemClient(filesystemClient, &lease.FileSystemClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := filesystemLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *filesystemLeaseClient.LeaseID()) + + _, err = filesystemLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFilesystemDeleteFilesystemWithoutLeaseId() { + _require := require.New(s.T()) + testName := s.T().Name() + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + filesystemLeaseClient, _ := lease.NewFileSystemClient(filesystemClient, &lease.FileSystemClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := filesystemLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *filesystemLeaseClient.LeaseID()) + + _, err = filesystemClient.Delete(ctx, nil) + _require.NotNil(err) + + leaseID := filesystemLeaseClient.LeaseID() + _, err = filesystemClient.Delete(ctx, &filesystem.DeleteOptions{ + AccessConditions: &filesystem.AccessConditions{ + LeaseAccessConditions: &filesystem.LeaseAccessConditions{ + LeaseID: leaseID, + }, + }, + }) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFilesystemReleaseLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + filesystemLeaseClient, _ := lease.NewFileSystemClient(filesystemClient, &lease.FileSystemClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := filesystemLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *filesystemLeaseClient.LeaseID()) + + _, err = filesystemClient.Delete(ctx, nil) + _require.NotNil(err) + + _, err = filesystemLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) + + _, err = filesystemClient.Delete(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFilesystemRenewLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + filesystemLeaseClient, _ := lease.NewFileSystemClient(filesystemClient, &lease.FileSystemClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := filesystemLeaseClient.AcquireLease(ctx, int32(15), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *filesystemLeaseClient.LeaseID()) + + _, err = filesystemLeaseClient.RenewLease(ctx, nil) + _require.Nil(err) + + _, err = filesystemLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFilesystemChangeLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + fsName := testcommon.GenerateFileSystemName(testName) + fsClient := testcommon.CreateNewFileSystem(context.Background(), _require, fsName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + fsLeaseClient, _ := lease.NewFileSystemClient(fsClient, &lease.FileSystemClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := fsLeaseClient.AcquireLease(ctx, int32(15), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *fsLeaseClient.LeaseID()) + + changeLeaseResp, err := fsLeaseClient.ChangeLease(ctx, *proposedLeaseIDs[1], nil) + _require.Nil(err) + _require.EqualValues(changeLeaseResp.LeaseID, proposedLeaseIDs[1]) + _require.EqualValues(fsLeaseClient.LeaseID(), proposedLeaseIDs[1]) + + _, err = fsLeaseClient.RenewLease(ctx, nil) + _require.Nil(err) + + _, err = fsLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFileAcquireLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + fileName := testcommon.GenerateFileName(testName) + fileClient := testcommon.CreateNewFile(context.Background(), _require, fileName, filesystemClient) + fileLeaseClient, err := lease.NewPathClient(fileClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + ctx := context.Background() + acquireLeaseResponse, err := fileLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestDeleteFileWithoutLeaseId() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + fileName := testcommon.GenerateFileName(testName) + fileClient := testcommon.CreateNewFile(context.Background(), _require, fileName, filesystemClient) + fileLeaseClient, err := lease.NewPathClient(fileClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + ctx := context.Background() + acquireLeaseResponse, err := fileLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.NotNil(err) + + leaseID := fileLeaseClient.LeaseID() + _, err = fileClient.Delete(ctx, &file.DeleteOptions{ + AccessConditions: &file.AccessConditions{ + LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: leaseID, + }, + }, + }) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFileReleaseLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + fileName := testcommon.GenerateFileName(testName) + fileClient := testcommon.CreateNewFile(context.Background(), _require, fileName, filesystemClient) + fileLeaseClient, _ := lease.NewPathClient(fileClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := fileLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.NotNil(err) + + _, err = fileLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) + + _, err = fileClient.Delete(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFileRenewLease() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + fileName := testcommon.GenerateFileName(testName) + fileClient := testcommon.CreateNewFile(context.Background(), _require, fileName, filesystemClient) + fileLeaseClient, _ := lease.NewPathClient(fileClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := fileLeaseClient.AcquireLease(ctx, int32(15), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileLeaseClient.RenewLease(ctx, nil) + _require.Nil(err) + + _, err = fileLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFileChangeLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + fileName := testcommon.GenerateFileName(testName) + fileClient := testcommon.CreateNewFile(context.Background(), _require, fileName, filesystemClient) + fileLeaseClient, _ := lease.NewPathClient(fileClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := fileLeaseClient.AcquireLease(ctx, int32(15), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.Equal(*acquireLeaseResponse.LeaseID, *proposedLeaseIDs[0]) + + changeLeaseResp, err := fileLeaseClient.ChangeLease(ctx, *proposedLeaseIDs[1], nil) + _require.Nil(err) + _require.Equal(*changeLeaseResp.LeaseID, *proposedLeaseIDs[1]) + + _, err = fileLeaseClient.RenewLease(ctx, nil) + _require.Nil(err) + + _, err = fileLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestDirAcquireLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + dirName := testcommon.GenerateDirName(testName) + dirClient := testcommon.CreateNewDir(context.Background(), _require, dirName, filesystemClient) + dirLeaseClient, err := lease.NewPathClient(dirClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + ctx := context.Background() + acquireLeaseResponse, err := dirLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, dirLeaseClient.LeaseID()) + + _, err = dirLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestDeleteDirWithoutLeaseId() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + dirName := testcommon.GenerateDirName(testName) + dirClient := testcommon.CreateNewDir(context.Background(), _require, dirName, filesystemClient) + dirLeaseClient, err := lease.NewPathClient(dirClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + ctx := context.Background() + acquireLeaseResponse, err := dirLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, dirLeaseClient.LeaseID()) + + _, err = dirClient.Delete(ctx, nil) + _require.NotNil(err) + + leaseID := dirLeaseClient.LeaseID() + _, err = dirClient.Delete(ctx, &file.DeleteOptions{ + AccessConditions: &file.AccessConditions{ + LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: leaseID, + }, + }, + }) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestDirReleaseLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + DirName := testcommon.GenerateDirName(testName) + DirClient := testcommon.CreateNewDir(context.Background(), _require, DirName, filesystemClient) + DirLeaseClient, _ := lease.NewPathClient(DirClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := DirLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, DirLeaseClient.LeaseID()) + + _, err = DirClient.Delete(ctx, nil) + _require.NotNil(err) + + _, err = DirLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) + + _, err = DirClient.Delete(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestDirRenewLease() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + dirName := testcommon.GenerateDirName(testName) + dirClient := testcommon.CreateNewDir(context.Background(), _require, dirName, filesystemClient) + dirLeaseClient, _ := lease.NewPathClient(dirClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := dirLeaseClient.AcquireLease(ctx, int32(15), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, dirLeaseClient.LeaseID()) + + _, err = dirLeaseClient.RenewLease(ctx, nil) + _require.Nil(err) + + _, err = dirLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestDirChangeLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFileSystemName(testName) + filesystemClient := testcommon.CreateNewFileSystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, filesystemClient) + + dirName := testcommon.GenerateDirName(testName) + dirClient := testcommon.CreateNewDir(context.Background(), _require, dirName, filesystemClient) + dirLeaseClient, _ := lease.NewPathClient(dirClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := dirLeaseClient.AcquireLease(ctx, int32(15), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.Equal(*acquireLeaseResponse.LeaseID, *proposedLeaseIDs[0]) + + changeLeaseResp, err := dirLeaseClient.ChangeLease(ctx, *proposedLeaseIDs[1], nil) + _require.Nil(err) + _require.Equal(*changeLeaseResp.LeaseID, *proposedLeaseIDs[1]) + + _, err = dirLeaseClient.RenewLease(ctx, nil) + _require.Nil(err) + + _, err = dirLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} diff --git a/sdk/storage/azdatalake/lease/examples_test.go b/sdk/storage/azdatalake/lease/examples_test.go new file mode 100644 index 000000000000..adc847cb3050 --- /dev/null +++ b/sdk/storage/azdatalake/lease/examples_test.go @@ -0,0 +1,92 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "log" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/lease" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +// This example shows how to perform various lease operations on a filesystem. +// The same lease operations can be performed on individual files as well. +// A lease on a filesystem prevents it from being deleted by others, while a lease on a file +// protects it from both modifications and deletions. +func Example_lease_FileSystemClient_AcquireLease() { + // From the Azure portal, get your Storage account's name and account key. + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + // Use your Storage account's name and key to create a credential object; this is used to access your account. + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + // Create an fsClient object that wraps the filesystem's URL and a default pipeline. + filesystemURL := fmt.Sprintf("https://%s.dfs.core.windows.net/myfs", accountName) + fsClient, err := filesystem.NewClientWithSharedKeyCredential(filesystemURL, credential, nil) + handleError(err) + + // Create a unique ID for the lease + // A lease ID can be any valid GUID string format. To generate UUIDs, consider the github.com/google/uuid package + leaseID := "36b1a876-cf98-4eb2-a5c3-6d68489658ff" + filesystemLeaseClient, err := lease.NewFileSystemClient(fsClient, + &lease.FileSystemClientOptions{LeaseID: to.Ptr(leaseID)}) + handleError(err) + + // Now acquire a lease on the filesystem. + // You can choose to pass an empty string for proposed ID so that the service automatically assigns one for you. + duration := int32(60) + acquireLeaseResponse, err := filesystemLeaseClient.AcquireLease(context.TODO(), duration, nil) + handleError(err) + fmt.Println("The filesystem is leased for delete operations with lease ID", *acquireLeaseResponse.LeaseID) + + // The filesystem cannot be deleted without providing the lease ID. + _, err = fsClient.Delete(context.TODO(), nil) + if err == nil { + log.Fatal("delete should have failed") + } + + fmt.Println("The filesystem cannot be deleted while there is an active lease") + + // We can release the lease now and the filesystem can be deleted. + _, err = filesystemLeaseClient.ReleaseLease(context.TODO(), nil) + handleError(err) + fmt.Println("The lease on the filesystem is now released") + + // AcquireLease a lease again to perform other operations. + // Duration is still 60 + acquireLeaseResponse, err = filesystemLeaseClient.AcquireLease(context.TODO(), duration, nil) + handleError(err) + fmt.Println("The filesystem is leased again with lease ID", *acquireLeaseResponse.LeaseID) + + // We can change the ID of an existing lease. + newLeaseID := "6b3e65e5-e1bb-4a3f-8b72-13e9bc9cd3bf" + changeLeaseResponse, err := filesystemLeaseClient.ChangeLease(context.TODO(), newLeaseID, nil) + handleError(err) + fmt.Println("The lease ID was changed to", *changeLeaseResponse.LeaseID) + + // The lease can be renewed. + renewLeaseResponse, err := filesystemLeaseClient.RenewLease(context.TODO(), nil) + handleError(err) + fmt.Println("The lease was renewed with the same ID", *renewLeaseResponse.LeaseID) + + // Finally, the lease can be broken, and we could prevent others from acquiring a lease for a period of time + _, err = filesystemLeaseClient.BreakLease(context.TODO(), &lease.FileSystemBreakOptions{BreakPeriod: to.Ptr(int32(60))}) + handleError(err) + fmt.Println("The lease was broken, and nobody can acquire a lease for 60 seconds") +} diff --git a/sdk/storage/azdatalake/lease/models.go b/sdk/storage/azdatalake/lease/models.go new file mode 100644 index 000000000000..fced59eab55c --- /dev/null +++ b/sdk/storage/azdatalake/lease/models.go @@ -0,0 +1,238 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" +) + +// FileSystemAcquireOptions contains the optional parameters for the LeaseClient.AcquireLease method. +type FileSystemAcquireOptions struct { + // ModifiedAccessConditions contains optional parameters to access filesystem. + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *FileSystemAcquireOptions) format() *lease.ContainerAcquireOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } + return &lease.ContainerAcquireOptions{ + ModifiedAccessConditions: &blob.ModifiedAccessConditions{ + IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: o.ModifiedAccessConditions.IfUnmodifiedSince, + IfMatch: o.ModifiedAccessConditions.IfMatch, + IfNoneMatch: o.ModifiedAccessConditions.IfNoneMatch, + }, + } +} + +// FileSystemBreakOptions contains the optional parameters for the LeaseClient.BreakLease method. +type FileSystemBreakOptions struct { + // BreakPeriod is the proposed duration of seconds that the lease should continue before it is broken. + BreakPeriod *int32 + // ModifiedAccessConditions contains optional parameters to access filesystem. + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *FileSystemBreakOptions) format() *lease.ContainerBreakOptions { + opts := &lease.ContainerBreakOptions{} + if o == nil { + return opts + } + if o.ModifiedAccessConditions == nil { + opts.BreakPeriod = o.BreakPeriod + return opts + } + return &lease.ContainerBreakOptions{ + BreakPeriod: o.BreakPeriod, + ModifiedAccessConditions: &blob.ModifiedAccessConditions{ + IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: o.ModifiedAccessConditions.IfUnmodifiedSince, + IfMatch: o.ModifiedAccessConditions.IfMatch, + IfNoneMatch: o.ModifiedAccessConditions.IfNoneMatch, + }, + } +} + +// FileSystemChangeOptions contains the optional parameters for the LeaseClient.ChangeLease method. +type FileSystemChangeOptions struct { + // ModifiedAccessConditions contains optional parameters to access filesystem. + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *FileSystemChangeOptions) format() *lease.ContainerChangeOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } + return &lease.ContainerChangeOptions{ + ModifiedAccessConditions: &blob.ModifiedAccessConditions{ + IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: o.ModifiedAccessConditions.IfUnmodifiedSince, + IfMatch: o.ModifiedAccessConditions.IfMatch, + IfNoneMatch: o.ModifiedAccessConditions.IfNoneMatch, + }, + } +} + +// FileSystemReleaseOptions contains the optional parameters for the LeaseClient.ReleaseLease method. +type FileSystemReleaseOptions struct { + // ModifiedAccessConditions contains optional parameters to access filesystem. + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *FileSystemReleaseOptions) format() *lease.ContainerReleaseOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } + return &lease.ContainerReleaseOptions{ + ModifiedAccessConditions: &blob.ModifiedAccessConditions{ + IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: o.ModifiedAccessConditions.IfUnmodifiedSince, + IfMatch: o.ModifiedAccessConditions.IfMatch, + IfNoneMatch: o.ModifiedAccessConditions.IfNoneMatch, + }, + } +} + +// FileSystemRenewOptions contains the optional parameters for the LeaseClient.RenewLease method. +type FileSystemRenewOptions struct { + // ModifiedAccessConditions contains optional parameters to access filesystem. + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *FileSystemRenewOptions) format() *lease.ContainerRenewOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } + return &lease.ContainerRenewOptions{ + ModifiedAccessConditions: &blob.ModifiedAccessConditions{ + IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: o.ModifiedAccessConditions.IfUnmodifiedSince, + IfMatch: o.ModifiedAccessConditions.IfMatch, + IfNoneMatch: o.ModifiedAccessConditions.IfNoneMatch, + }, + } +} + +// PathAcquireOptions contains the optional parameters for the LeaseClient.AcquireLease method. +type PathAcquireOptions struct { + // ModifiedAccessConditions contains optional parameters to access path. + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *PathAcquireOptions) format() *lease.BlobAcquireOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } + return &lease.BlobAcquireOptions{ + ModifiedAccessConditions: &blob.ModifiedAccessConditions{ + IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: o.ModifiedAccessConditions.IfUnmodifiedSince, + IfMatch: o.ModifiedAccessConditions.IfMatch, + IfNoneMatch: o.ModifiedAccessConditions.IfNoneMatch, + }, + } +} + +// PathBreakOptions contains the optional parameters for the LeaseClient.BreakLease method. +type PathBreakOptions struct { + // BreakPeriod is the proposed duration of seconds that the lease should continue before it is broken. + BreakPeriod *int32 + // ModifiedAccessConditions contains optional parameters to access path. + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *PathBreakOptions) format() *lease.BlobBreakOptions { + opts := &lease.BlobBreakOptions{} + if o == nil { + return opts + } + if o.ModifiedAccessConditions == nil { + opts.BreakPeriod = o.BreakPeriod + return opts + } + return &lease.BlobBreakOptions{ + BreakPeriod: o.BreakPeriod, + ModifiedAccessConditions: &blob.ModifiedAccessConditions{ + IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: o.ModifiedAccessConditions.IfUnmodifiedSince, + IfMatch: o.ModifiedAccessConditions.IfMatch, + IfNoneMatch: o.ModifiedAccessConditions.IfNoneMatch, + }, + } +} + +// PathChangeOptions contains the optional parameters for the LeaseClient.ChangeLease method. +type PathChangeOptions struct { + // ModifiedAccessConditions contains optional parameters to access path. + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *PathChangeOptions) format() *lease.BlobChangeOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } + return &lease.BlobChangeOptions{ + ModifiedAccessConditions: &blob.ModifiedAccessConditions{ + IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: o.ModifiedAccessConditions.IfUnmodifiedSince, + IfMatch: o.ModifiedAccessConditions.IfMatch, + IfNoneMatch: o.ModifiedAccessConditions.IfNoneMatch, + }, + } +} + +// PathReleaseOptions contains the optional parameters for the LeaseClient.ReleaseLease method. +type PathReleaseOptions struct { + // ModifiedAccessConditions contains optional parameters to access path. + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *PathReleaseOptions) format() *lease.BlobReleaseOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } + return &lease.BlobReleaseOptions{ + ModifiedAccessConditions: &blob.ModifiedAccessConditions{ + IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: o.ModifiedAccessConditions.IfUnmodifiedSince, + IfMatch: o.ModifiedAccessConditions.IfMatch, + IfNoneMatch: o.ModifiedAccessConditions.IfNoneMatch, + }, + } +} + +// PathRenewOptions contains the optional parameters for the LeaseClient.RenewLease method. +type PathRenewOptions struct { + // ModifiedAccessConditions contains optional parameters to access path. + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *PathRenewOptions) format() *lease.BlobRenewOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } + return &lease.BlobRenewOptions{ + ModifiedAccessConditions: &blob.ModifiedAccessConditions{ + IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, + IfUnmodifiedSince: o.ModifiedAccessConditions.IfUnmodifiedSince, + IfMatch: o.ModifiedAccessConditions.IfMatch, + IfNoneMatch: o.ModifiedAccessConditions.IfNoneMatch, + }, + } +} + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.AccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions diff --git a/sdk/storage/azdatalake/lease/path_client.go b/sdk/storage/azdatalake/lease/path_client.go new file mode 100644 index 000000000000..aab03d04a7fe --- /dev/null +++ b/sdk/storage/azdatalake/lease/path_client.go @@ -0,0 +1,101 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated_blob" +) + +// PathClient provides lease functionality for the underlying path client. +type PathClient struct { + blobClient *lease.BlobClient + leaseID *string +} + +// PathClientOptions contains the optional values when creating a PathClient. +type PathClientOptions = lease.BlobClientOptions + +// NewPathClient creates a path lease client for the provided path client. +// - client - an instance of a path client +// - options - client options; pass nil to accept the default values +func NewPathClient[T directory.Client | file.Client](client *T, options *PathClientOptions) (*PathClient, error) { + var blobClient *blockblob.Client + switch t := any(client).(type) { + case *directory.Client: + _, _, blobClient = base.InnerClients((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(t)) + case *file.Client: + _, _, blobClient = base.InnerClients((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(t)) + default: + return nil, fmt.Errorf("unhandled client type %T", client) + } + blobLeaseClient, err := lease.NewBlobClient(blobClient, options) + if err != nil { + return nil, exported.ConvertToDFSError(err) + } + return &PathClient{ + blobClient: blobLeaseClient, + leaseID: blobLeaseClient.LeaseID(), + }, nil +} + +// LeaseID returns leaseID of the client. +func (c *PathClient) LeaseID() *string { + return c.leaseID +} + +// AcquireLease acquires a lease on the path for write and delete operations. +// The lease Duration must be between 15 and 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (c *PathClient) AcquireLease(ctx context.Context, duration int32, o *PathAcquireOptions) (PathAcquireResponse, error) { + opts := o.format() + resp, err := c.blobClient.AcquireLease(ctx, duration, opts) + return resp, exported.ConvertToDFSError(err) +} + +// BreakLease breaks the path's previously-acquired lease. +func (c *PathClient) BreakLease(ctx context.Context, o *PathBreakOptions) (PathBreakResponse, error) { + opts := o.format() + resp, err := c.blobClient.BreakLease(ctx, opts) + return resp, exported.ConvertToDFSError(err) +} + +// ChangeLease changes the path's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (c *PathClient) ChangeLease(ctx context.Context, proposedID string, o *PathChangeOptions) (PathChangeResponse, error) { + opts := o.format() + resp, err := c.blobClient.ChangeLease(ctx, proposedID, opts) + if err != nil { + return resp, exported.ConvertToDFSError(err) + } + c.leaseID = &proposedID + return resp, nil +} + +// RenewLease renews the path's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (c *PathClient) RenewLease(ctx context.Context, o *PathRenewOptions) (PathRenewResponse, error) { + opts := o.format() + resp, err := c.blobClient.RenewLease(ctx, opts) + return resp, exported.ConvertToDFSError(err) +} + +// ReleaseLease releases the path's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (c *PathClient) ReleaseLease(ctx context.Context, o *PathReleaseOptions) (PathReleaseResponse, error) { + opts := o.format() + resp, err := c.blobClient.ReleaseLease(ctx, opts) + return resp, exported.ConvertToDFSError(err) +} diff --git a/sdk/storage/azdatalake/lease/responses.go b/sdk/storage/azdatalake/lease/responses.go new file mode 100644 index 000000000000..55a174ccd6f8 --- /dev/null +++ b/sdk/storage/azdatalake/lease/responses.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" + +// FileSystemAcquireResponse contains the response from method FileSystemClient.AcquireLease. +type FileSystemAcquireResponse = lease.ContainerAcquireResponse + +// FileSystemBreakResponse contains the response from method FileSystemClient.BreakLease. +type FileSystemBreakResponse = lease.ContainerBreakResponse + +// FileSystemChangeResponse contains the response from method FileSystemClient.ChangeLease. +type FileSystemChangeResponse = lease.ContainerChangeResponse + +// FileSystemReleaseResponse contains the response from method FileSystemClient.ReleaseLease. +type FileSystemReleaseResponse = lease.ContainerReleaseResponse + +// FileSystemRenewResponse contains the response from method FileSystemClient.RenewLease. +type FileSystemRenewResponse = lease.ContainerRenewResponse + +// PathAcquireResponse contains the response from method PathClient.AcquireLease. +type PathAcquireResponse = lease.BlobAcquireResponse + +// PathBreakResponse contains the response from method PathClient.BreakLease. +type PathBreakResponse = lease.BlobBreakResponse + +// PathChangeResponse contains the response from method PathClient.ChangeLease. +type PathChangeResponse = lease.BlobChangeResponse + +// PathReleaseResponse contains the response from method PathClient.ReleaseLease. +type PathReleaseResponse = lease.BlobReleaseResponse + +// PathRenewResponse contains the response from method PathClient.RenewLease. +type PathRenewResponse = lease.BlobRenewResponse diff --git a/sdk/storage/azdatalake/log.go b/sdk/storage/azdatalake/log.go new file mode 100644 index 000000000000..a168fb01f8ba --- /dev/null +++ b/sdk/storage/azdatalake/log.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azdatalake + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" +) + +const ( + // EventUpload is used for logging events related to upload operation. + EventUpload = exported.EventUpload + + // EventError is used for logging errors. + EventError = exported.EventError +) diff --git a/sdk/storage/azdatalake/sas/account.go b/sdk/storage/azdatalake/sas/account.go new file mode 100644 index 000000000000..e5681c4ebdba --- /dev/null +++ b/sdk/storage/azdatalake/sas/account.go @@ -0,0 +1,226 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential = exported.UserDelegationCredential + +// AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSignatureValues struct { + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() + IPRange IPRange `param:"sip"` + ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() +} + +// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" { + return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = Version + } + perms, err := parseAccountPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + resources, err := parseAccountResourceTypes(v.ResourceTypes) + if err != nil { + return QueryParameters{}, err + } + v.ResourceTypes = resources.String() + + startTime, expiryTime := formatTimesForSigning(v.StartTime, v.ExpiryTime) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + "b", // blob service + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That is right, the account SAS requires a terminating extra newline + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: "b", // will always be "b" + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' Permissions field. +type AccountPermissions struct { + Read, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Add, Create, Update, Process, FilterByTags, Tag, SetImmutabilityPolicy bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSignatureValues' Permissions field. +func (p *AccountPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.DeletePreviousVersion { + buffer.WriteRune('x') + } + if p.PermanentDelete { + buffer.WriteRune('y') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + if p.FilterByTags { + buffer.WriteRune('f') + } + if p.Tag { + buffer.WriteRune('t') + } + if p.SetImmutabilityPolicy { + buffer.WriteRune('i') + } + return buffer.String() +} + +// Parse initializes the AccountPermissions' fields from a string. +func parseAccountPermissions(s string) (AccountPermissions, error) { + p := AccountPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'y': + p.PermanentDelete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + case 'i': + p.SetImmutabilityPolicy = true + default: + return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r) + } + } + return p, nil +} + +// AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' ResourceTypes field. +type AccountResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSignatureValues' ResourceTypes field. +func (rt *AccountResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// parseAccountResourceTypes initializes the AccountResourceTypes' fields from a string. +func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { + rt := AccountResourceTypes{} + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return AccountResourceTypes{}, fmt.Errorf("invalid resource type character: '%v'", r) + } + } + return rt, nil +} diff --git a/sdk/storage/azdatalake/sas/account_test.go b/sdk/storage/azdatalake/sas/account_test.go new file mode 100644 index 000000000000..c995fe393b23 --- /dev/null +++ b/sdk/storage/azdatalake/sas/account_test.go @@ -0,0 +1,169 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestAccountPermissions_String(t *testing.T) { + testdata := []struct { + input AccountPermissions + expected string + }{ + {input: AccountPermissions{Read: true}, expected: "r"}, + {input: AccountPermissions{Write: true}, expected: "w"}, + {input: AccountPermissions{Delete: true}, expected: "d"}, + {input: AccountPermissions{DeletePreviousVersion: true}, expected: "x"}, + {input: AccountPermissions{PermanentDelete: true}, expected: "y"}, + {input: AccountPermissions{List: true}, expected: "l"}, + {input: AccountPermissions{Add: true}, expected: "a"}, + {input: AccountPermissions{Create: true}, expected: "c"}, + {input: AccountPermissions{Update: true}, expected: "u"}, + {input: AccountPermissions{Process: true}, expected: "p"}, + {input: AccountPermissions{Tag: true}, expected: "t"}, + {input: AccountPermissions{FilterByTags: true}, expected: "f"}, + {input: AccountPermissions{SetImmutabilityPolicy: true}, expected: "i"}, + {input: AccountPermissions{ + Read: true, + Write: true, + Delete: true, + DeletePreviousVersion: true, + PermanentDelete: true, + List: true, + Add: true, + Create: true, + Update: true, + Process: true, + Tag: true, + FilterByTags: true, + SetImmutabilityPolicy: true, + }, expected: "rwdxylacupfti"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestAccountPermissions_Parse(t *testing.T) { + testdata := []struct { + input string + expected AccountPermissions + }{ + {expected: AccountPermissions{Read: true}, input: "r"}, + {expected: AccountPermissions{Write: true}, input: "w"}, + {expected: AccountPermissions{Delete: true}, input: "d"}, + {expected: AccountPermissions{DeletePreviousVersion: true}, input: "x"}, + {expected: AccountPermissions{PermanentDelete: true}, input: "y"}, + {expected: AccountPermissions{List: true}, input: "l"}, + {expected: AccountPermissions{Add: true}, input: "a"}, + {expected: AccountPermissions{Create: true}, input: "c"}, + {expected: AccountPermissions{Update: true}, input: "u"}, + {expected: AccountPermissions{Process: true}, input: "p"}, + {expected: AccountPermissions{Tag: true}, input: "t"}, + {expected: AccountPermissions{FilterByTags: true}, input: "f"}, + {expected: AccountPermissions{SetImmutabilityPolicy: true}, input: "i"}, + {expected: AccountPermissions{ + Read: true, + Write: true, + Delete: true, + DeletePreviousVersion: true, + PermanentDelete: true, + List: true, + Add: true, + Create: true, + Update: true, + Process: true, + Tag: true, + FilterByTags: true, + SetImmutabilityPolicy: true, + }, input: "rwdxylacupfti"}, + {expected: AccountPermissions{ + Read: true, + Write: true, + Delete: true, + DeletePreviousVersion: true, + PermanentDelete: true, + List: true, + Add: true, + Create: true, + Update: true, + Process: true, + Tag: true, + FilterByTags: true, + SetImmutabilityPolicy: true, + }, input: "trwlapdixfycu"}, + } + for _, c := range testdata { + permissions, err := parseAccountPermissions(c.input) + require.Nil(t, err) + require.Equal(t, c.expected, permissions) + } +} + +func TestAccountPermissions_ParseNegative(t *testing.T) { + _, err := parseAccountPermissions("trwlapdixfycuz") // Here 'z' is invalid + require.NotNil(t, err) + require.Contains(t, err.Error(), "122") +} + +func TestAccountResourceTypes_String(t *testing.T) { + testdata := []struct { + input AccountResourceTypes + expected string + }{ + {input: AccountResourceTypes{Service: true}, expected: "s"}, + {input: AccountResourceTypes{Container: true}, expected: "c"}, + {input: AccountResourceTypes{Object: true}, expected: "o"}, + {input: AccountResourceTypes{ + Service: true, + Container: true, + Object: true, + }, expected: "sco"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestAccountResourceTypes_Parse(t *testing.T) { + testdata := []struct { + input string + expected AccountResourceTypes + }{ + {expected: AccountResourceTypes{Service: true}, input: "s"}, + {expected: AccountResourceTypes{Container: true}, input: "c"}, + {expected: AccountResourceTypes{Object: true}, input: "o"}, + {expected: AccountResourceTypes{ + Service: true, + Container: true, + Object: true, + }, input: "sco"}, + {expected: AccountResourceTypes{ + Service: true, + Container: true, + Object: true, + }, input: "osc"}, + } + for _, c := range testdata { + permissions, err := parseAccountResourceTypes(c.input) + require.Nil(t, err) + require.Equal(t, c.expected, permissions) + } +} + +func TestAccountResourceTypes_ParseNegative(t *testing.T) { + _, err := parseAccountResourceTypes("scoz") // Here 'z' is invalid + require.NotNil(t, err) + require.Contains(t, err.Error(), "122") +} + +// TODO: Sign With Shared Key +// Negative Case +// Version not provided +// SignWithSharedKey tests diff --git a/sdk/storage/azdatalake/sas/query_params.go b/sdk/storage/azdatalake/sas/query_params.go new file mode 100644 index 000000000000..7806687833c0 --- /dev/null +++ b/sdk/storage/azdatalake/sas/query_params.go @@ -0,0 +1,506 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "errors" + "net" + "net/url" + "strings" + "time" +) + +// TimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const ( + TimeFormat = "2006-01-02T15:04:05Z" // "2017-07-27T00:00:00Z" // ISO 8601 +) + +var ( + // Version is the default version encoded in the SAS token. + Version = "2020-02-10" +) + +// TimeFormats ISO 8601 format. +// Please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. +var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", TimeFormat, "2006-01-02T15:04Z", "2006-01-02"} + +// Protocol indicates the http/https. +type Protocol string + +const ( + // ProtocolHTTPS can be specified for a SAS protocol. + ProtocolHTTPS Protocol = "https" + + // ProtocolHTTPSandHTTP can be specified for a SAS protocol. + ProtocolHTTPSandHTTP Protocol = "https,http" +) + +// FormatTimesForSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// Field's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func formatTimesForSigning(startTime, expiryTime time.Time) (string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatTimeWithDefaultFormat(&expiryTime) + } + return ss, se +} + +// formatTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatTimeWithDefaultFormat(t *time.Time) string { + return formatTime(t, TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// ParseTime try to parse a SAS time string. +func parseTime(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range timeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// QueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type QueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol Protocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + snapshotTime time.Time `param:"snapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + signedOID string `param:"skoid"` + signedTID string `param:"sktid"` + signedStart time.Time `param:"skt"` + signedService string `param:"sks"` + signedExpiry time.Time `param:"ske"` + signedVersion string `param:"skv"` + signedDirectoryDepth string `param:"sdd"` + authorizedObjectID string `param:"saoid"` + unauthorizedObjectID string `param:"suoid"` + correlationID string `param:"scid"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +// AuthorizedObjectID returns authorizedObjectID. +func (p *QueryParameters) AuthorizedObjectID() string { + return p.authorizedObjectID +} + +// UnauthorizedObjectID returns unauthorizedObjectID. +func (p *QueryParameters) UnauthorizedObjectID() string { + return p.unauthorizedObjectID +} + +// SignedCorrelationID returns signedCorrelationID. +func (p *QueryParameters) SignedCorrelationID() string { + return p.correlationID +} + +// SignedOID returns signedOID. +func (p *QueryParameters) SignedOID() string { + return p.signedOID +} + +// SignedTID returns signedTID. +func (p *QueryParameters) SignedTID() string { + return p.signedTID +} + +// SignedStart returns signedStart. +func (p *QueryParameters) SignedStart() time.Time { + return p.signedStart +} + +// SignedExpiry returns signedExpiry. +func (p *QueryParameters) SignedExpiry() time.Time { + return p.signedExpiry +} + +// SignedService returns signedService. +func (p *QueryParameters) SignedService() string { + return p.signedService +} + +// SignedVersion returns signedVersion. +func (p *QueryParameters) SignedVersion() string { + return p.signedVersion +} + +// SnapshotTime returns snapshotTime. +func (p *QueryParameters) SnapshotTime() time.Time { + return p.snapshotTime +} + +// Version returns version. +func (p *QueryParameters) Version() string { + return p.version +} + +// Services returns services. +func (p *QueryParameters) Services() string { + return p.services +} + +// ResourceTypes returns resourceTypes. +func (p *QueryParameters) ResourceTypes() string { + return p.resourceTypes +} + +// Protocol returns protocol. +func (p *QueryParameters) Protocol() Protocol { + return p.protocol +} + +// StartTime returns startTime. +func (p *QueryParameters) StartTime() time.Time { + return p.startTime +} + +// ExpiryTime returns expiryTime. +func (p *QueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +// IPRange returns ipRange. +func (p *QueryParameters) IPRange() IPRange { + return p.ipRange +} + +// Identifier returns identifier. +func (p *QueryParameters) Identifier() string { + return p.identifier +} + +// Resource returns resource. +func (p *QueryParameters) Resource() string { + return p.resource +} + +// Permissions returns permissions. +func (p *QueryParameters) Permissions() string { + return p.permissions +} + +// Signature returns signature. +func (p *QueryParameters) Signature() string { + return p.signature +} + +// CacheControl returns cacheControl. +func (p *QueryParameters) CacheControl() string { + return p.cacheControl +} + +// ContentDisposition returns contentDisposition. +func (p *QueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +// ContentEncoding returns contentEncoding. +func (p *QueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +// ContentLanguage returns contentLanguage. +func (p *QueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +// ContentType returns contentType. +func (p *QueryParameters) ContentType() string { + return p.contentType +} + +// SignedDirectoryDepth returns signedDirectoryDepth. +func (p *QueryParameters) SignedDirectoryDepth() string { + return p.signedDirectoryDepth +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *QueryParameters) Encode() string { + v := url.Values{} + + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signedOID != "" { + v.Add("skoid", p.signedOID) + v.Add("sktid", p.signedTID) + v.Add("skt", p.signedStart.Format(TimeFormat)) + v.Add("ske", p.signedExpiry.Format(TimeFormat)) + v.Add("sks", p.signedService) + v.Add("skv", p.signedVersion) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + if p.signedDirectoryDepth != "" { + v.Add("sdd", p.signedDirectoryDepth) + } + if p.authorizedObjectID != "" { + v.Add("saoid", p.authorizedObjectID) + } + if p.unauthorizedObjectID != "" { + v.Add("suoid", p.unauthorizedObjectID) + } + if p.correlationID != "" { + v.Add("scid", p.correlationID) + } + + return v.Encode() +} + +// NewQueryParameters creates and initializes a QueryParameters object based on the +// query parameter map's passed-in values. If a key is unrecognized, it is ignored +func NewQueryParameters(values url.Values) QueryParameters { + p := QueryParameters{} + for k, v := range values { + val := v[0] + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = Protocol(val) + case "st": + p.startTime, p.stTimeFormat, _ = parseTime(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseTime(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOID = val + case "sktid": + p.signedTID = val + case "skt": + p.signedStart, _ = time.Parse(TimeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(TimeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.authorizedObjectID = val + case "suoid": + p.unauthorizedObjectID = val + case "scid": + p.correlationID = val + default: + continue // query param didn't get recognized + } + } + return p +} + +// newQueryParameters creates and initializes a QueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func newQueryParameters(values url.Values, deleteSASParametersFromValues bool) QueryParameters { + p := QueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = Protocol(val) + case "st": + p.startTime, p.stTimeFormat, _ = parseTime(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseTime(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + //case "snapshot": + // p.snapshotTime, _ = time.Parse(exported.SnapshotTimeFormat, val) + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOID = val + case "sktid": + p.signedTID = val + case "skt": + p.signedStart, _ = time.Parse(TimeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(TimeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.authorizedObjectID = val + case "suoid": + p.unauthorizedObjectID = val + case "scid": + p.correlationID = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} diff --git a/sdk/storage/azdatalake/sas/query_params_test.go b/sdk/storage/azdatalake/sas/query_params_test.go new file mode 100644 index 000000000000..1e50647a2ca3 --- /dev/null +++ b/sdk/storage/azdatalake/sas/query_params_test.go @@ -0,0 +1,231 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "fmt" + "net" + "net/url" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestFormatTimesForSigning(t *testing.T) { + testdata := []struct { + inputStart time.Time + inputEnd time.Time + inputSnapshot time.Time + expectedStart string + expectedEnd string + expectedSnapshot string + }{ + {expectedStart: "", expectedEnd: "", expectedSnapshot: ""}, + {inputStart: time.Date(1955, 6, 25, 22, 15, 56, 345456, time.UTC), expectedStart: "1955-06-25T22:15:56Z", expectedEnd: "", expectedSnapshot: ""}, + {inputEnd: time.Date(2023, 4, 5, 8, 50, 27, 4500, time.UTC), expectedStart: "", expectedEnd: "2023-04-05T08:50:27Z", expectedSnapshot: ""}, + {inputSnapshot: time.Date(2021, 1, 5, 22, 15, 33, 1234879, time.UTC), expectedStart: "", expectedEnd: "", expectedSnapshot: "2021-01-05T22:15:33.0012348Z"}, + { + inputStart: time.Date(1955, 6, 25, 22, 15, 56, 345456, time.UTC), + inputEnd: time.Date(2023, 4, 5, 8, 50, 27, 4500, time.UTC), + inputSnapshot: time.Date(2021, 1, 5, 22, 15, 33, 1234879, time.UTC), + expectedStart: "1955-06-25T22:15:56Z", + expectedEnd: "2023-04-05T08:50:27Z", + expectedSnapshot: "2021-01-05T22:15:33.0012348Z", + }, + } + for _, c := range testdata { + start, end := formatTimesForSigning(c.inputStart, c.inputEnd) + require.Equal(t, c.expectedStart, start) + require.Equal(t, c.expectedEnd, end) + } +} + +func TestFormatTimeWithDefaultFormat(t *testing.T) { + testdata := []struct { + input time.Time + expectedTime string + }{ + {input: time.Date(1955, 4, 5, 8, 50, 27, 4500, time.UTC), expectedTime: "1955-04-05T08:50:27Z"}, + {input: time.Date(1917, 3, 9, 16, 22, 56, 0, time.UTC), expectedTime: "1917-03-09T16:22:56Z"}, + {input: time.Date(2021, 1, 5, 22, 15, 0, 0, time.UTC), expectedTime: "2021-01-05T22:15:00Z"}, + {input: time.Date(2023, 6, 25, 0, 0, 0, 0, time.UTC), expectedTime: "2023-06-25T00:00:00Z"}, + } + for _, c := range testdata { + formattedTime := formatTimeWithDefaultFormat(&c.input) + require.Equal(t, c.expectedTime, formattedTime) + } +} + +func TestFormatTime(t *testing.T) { + testdata := []struct { + input time.Time + format string + expectedTime string + }{ + {input: time.Date(1955, 4, 5, 8, 50, 27, 4500, time.UTC), format: "2006-01-02T15:04:05.0000000Z", expectedTime: "1955-04-05T08:50:27.0000045Z"}, + {input: time.Date(1955, 4, 5, 8, 50, 27, 4500, time.UTC), format: "", expectedTime: "1955-04-05T08:50:27Z"}, + {input: time.Date(1917, 3, 9, 16, 22, 56, 0, time.UTC), format: "2006-01-02T15:04:05Z", expectedTime: "1917-03-09T16:22:56Z"}, + {input: time.Date(1917, 3, 9, 16, 22, 56, 0, time.UTC), format: "", expectedTime: "1917-03-09T16:22:56Z"}, + {input: time.Date(2021, 1, 5, 22, 15, 0, 0, time.UTC), format: "2006-01-02T15:04Z", expectedTime: "2021-01-05T22:15Z"}, + {input: time.Date(2021, 1, 5, 22, 15, 0, 0, time.UTC), format: "", expectedTime: "2021-01-05T22:15:00Z"}, + {input: time.Date(2023, 6, 25, 0, 0, 0, 0, time.UTC), format: "2006-01-02", expectedTime: "2023-06-25"}, + {input: time.Date(2023, 6, 25, 0, 0, 0, 0, time.UTC), format: "", expectedTime: "2023-06-25T00:00:00Z"}, + } + for _, c := range testdata { + formattedTime := formatTime(&c.input, c.format) + require.Equal(t, c.expectedTime, formattedTime) + } +} + +func TestParseTime(t *testing.T) { + testdata := []struct { + input string + expectedTime time.Time + expectedFormat string + }{ + {input: "1955-04-05T08:50:27.0000045Z", expectedTime: time.Date(1955, 4, 5, 8, 50, 27, 4500, time.UTC), expectedFormat: "2006-01-02T15:04:05.0000000Z"}, + {input: "1917-03-09T16:22:56Z", expectedTime: time.Date(1917, 3, 9, 16, 22, 56, 0, time.UTC), expectedFormat: "2006-01-02T15:04:05Z"}, + {input: "2021-01-05T22:15Z", expectedTime: time.Date(2021, 1, 5, 22, 15, 0, 0, time.UTC), expectedFormat: "2006-01-02T15:04Z"}, + {input: "2023-06-25", expectedTime: time.Date(2023, 6, 25, 0, 0, 0, 0, time.UTC), expectedFormat: "2006-01-02"}, + } + for _, c := range testdata { + parsedTime, format, err := parseTime(c.input) + require.Nil(t, err) + require.Equal(t, c.expectedTime, parsedTime) + require.Equal(t, c.expectedFormat, format) + } +} + +func TestParseTimeNegative(t *testing.T) { + _, _, err := parseTime("notatime") + require.Error(t, err, "fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") +} + +func TestIPRange_String(t *testing.T) { + testdata := []struct { + inputStart net.IP + inputEnd net.IP + expected string + }{ + {expected: ""}, + {inputStart: net.IPv4(10, 255, 0, 0), expected: "10.255.0.0"}, + {inputStart: net.IPv4(10, 255, 0, 0), inputEnd: net.IPv4(10, 255, 0, 50), expected: "10.255.0.0-10.255.0.50"}, + } + for _, c := range testdata { + var ipRange IPRange + if c.inputStart != nil { + ipRange.Start = c.inputStart + } + if c.inputEnd != nil { + ipRange.End = c.inputEnd + } + require.Equal(t, c.expected, ipRange.String()) + } +} + +func TestSAS(t *testing.T) { + // Note: This is a totally invalid fake SAS, this is just testing our ability to parse different query parameters on a SAS + const sas = "sv=2019-12-12&sr=b&st=2111-01-09T01:42:34.936Z&se=2222-03-09T01:42:34.936Z&sp=rw&sip=168.1.5.60-168.1.5.70&spr=https,http&si=myIdentifier&ss=bf&srt=s&rscc=cc&rscd=cd&rsce=ce&rscl=cl&rsct=ct&skoid=oid&sktid=tid&skt=2111-01-09T01:42:34.936Z&ske=2222-03-09T01:42:34.936Z&sks=s&skv=v&sdd=3&saoid=oid&suoid=oid&scid=cid&sig=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D" + _url := fmt.Sprintf("https://teststorageaccount.blob.core.windows.net/testcontainer/testpath?%s", sas) + _uri, err := url.Parse(_url) + require.NoError(t, err) + sasQueryParams := newQueryParameters(_uri.Query(), true) + validateSAS(t, sas, sasQueryParams) +} + +func validateSAS(t *testing.T, sas string, parameters QueryParameters) { + sasCompMap := make(map[string]string) + for _, sasComp := range strings.Split(sas, "&") { + comp := strings.Split(sasComp, "=") + sasCompMap[comp[0]] = comp[1] + } + + require.Equal(t, parameters.Version(), sasCompMap["sv"]) + require.Equal(t, parameters.Services(), sasCompMap["ss"]) + require.Equal(t, parameters.ResourceTypes(), sasCompMap["srt"]) + require.Equal(t, string(parameters.Protocol()), sasCompMap["spr"]) + if _, ok := sasCompMap["st"]; ok { + startTime, _, err := parseTime(sasCompMap["st"]) + require.NoError(t, err) + require.Equal(t, parameters.StartTime(), startTime) + } + if _, ok := sasCompMap["se"]; ok { + endTime, _, err := parseTime(sasCompMap["se"]) + require.NoError(t, err) + require.Equal(t, parameters.ExpiryTime(), endTime) + } + + if _, ok := sasCompMap["snapshot"]; ok { + snapshotTime, _, err := parseTime(sasCompMap["snapshot"]) + require.NoError(t, err) + require.Equal(t, parameters.SnapshotTime(), snapshotTime) + } + ipRange := parameters.IPRange() + require.Equal(t, ipRange.String(), sasCompMap["sip"]) + require.Equal(t, parameters.Identifier(), sasCompMap["si"]) + require.Equal(t, parameters.Resource(), sasCompMap["sr"]) + require.Equal(t, parameters.Permissions(), sasCompMap["sp"]) + + sign, err := url.QueryUnescape(sasCompMap["sig"]) + require.NoError(t, err) + + require.Equal(t, parameters.Signature(), sign) + require.Equal(t, parameters.CacheControl(), sasCompMap["rscc"]) + require.Equal(t, parameters.ContentDisposition(), sasCompMap["rscd"]) + require.Equal(t, parameters.ContentEncoding(), sasCompMap["rsce"]) + require.Equal(t, parameters.ContentLanguage(), sasCompMap["rscl"]) + require.Equal(t, parameters.ContentType(), sasCompMap["rsct"]) + require.Equal(t, parameters.SignedOID(), sasCompMap["skoid"]) + require.Equal(t, parameters.SignedTID(), sasCompMap["sktid"]) + + if _, ok := sasCompMap["skt"]; ok { + signedStart, _, err := parseTime(sasCompMap["skt"]) + require.NoError(t, err) + require.Equal(t, parameters.SignedStart(), signedStart) + } + require.Equal(t, parameters.SignedService(), sasCompMap["sks"]) + + if _, ok := sasCompMap["ske"]; ok { + signedExpiry, _, err := parseTime(sasCompMap["ske"]) + require.NoError(t, err) + require.Equal(t, parameters.SignedExpiry(), signedExpiry) + } + + require.Equal(t, parameters.SignedVersion(), sasCompMap["skv"]) + require.Equal(t, parameters.SignedDirectoryDepth(), sasCompMap["sdd"]) + require.Equal(t, parameters.AuthorizedObjectID(), sasCompMap["saoid"]) + require.Equal(t, parameters.UnauthorizedObjectID(), sasCompMap["suoid"]) + require.Equal(t, parameters.SignedCorrelationID(), sasCompMap["scid"]) +} + +func TestSASInvalidQueryParameter(t *testing.T) { + // Signature is invalid below + const sas = "sv=2019-12-12&signature=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D&sr=b" + _url := fmt.Sprintf("https://teststorageaccount.blob.core.windows.net/testcontainer/testpath?%s", sas) + _uri, err := url.Parse(_url) + require.NoError(t, err) + newQueryParameters(_uri.Query(), true) + // NewQueryParameters should not delete signature + require.Contains(t, _uri.Query(), "signature") +} + +func TestEncode(t *testing.T) { + // Note: This is a totally invalid fake SAS, this is just testing our ability to parse different query parameters on a SAS + expected := "rscc=cc&rscd=cd&rsce=ce&rscl=cl&rsct=ct&saoid=oid&scid=cid&sdd=3&se=2222-03-09T01%3A42%3A34Z&si=myIdentifier&sig=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D&sip=168.1.5.60-168.1.5.70&ske=2222-03-09T01%3A42%3A34Z&skoid=oid&sks=s&skt=2111-01-09T01%3A42%3A34Z&sktid=tid&skv=v&sp=rw&spr=https%2Chttp&sr=b&srt=sco&ss=bf&st=2111-01-09T01%3A42%3A34Z&suoid=oid&sv=2019-12-12" + randomOrder := "sdd=3&scid=cid&se=2222-03-09T01:42:34.936Z&rsce=ce&ss=bf&skoid=oid&si=myIdentifier&ske=2222-03-09T01:42:34.936Z&saoid=oid&sip=168.1.5.60-168.1.5.70&rscc=cc&srt=sco&sig=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D&rsct=ct&skt=2111-01-09T01:42:34.936Z&rscl=cl&suoid=oid&sv=2019-12-12&sr=b&st=2111-01-09T01:42:34.936Z&rscd=cd&sp=rw&sktid=tid&spr=https,http&sks=s&skv=v" + testdata := []string{expected, randomOrder} + + for _, sas := range testdata { + _url := fmt.Sprintf("https://teststorageaccount.blob.core.windows.net/testcontainer/testpath?%s", sas) + _uri, err := url.Parse(_url) + require.NoError(t, err) + queryParams := newQueryParameters(_uri.Query(), true) + require.Equal(t, expected, queryParams.Encode()) + } +} diff --git a/sdk/storage/azdatalake/sas/service.go b/sdk/storage/azdatalake/sas/service.go new file mode 100644 index 000000000000..22784206a667 --- /dev/null +++ b/sdk/storage/azdatalake/sas/service.go @@ -0,0 +1,457 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" +) + +// DatalakeSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage filesystem or path. +// For more information on creating service sas, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +// For more information on creating user delegation sas, see https://docs.microsoft.com/rest/api/storageservices/create-user-delegation-sas +type DatalakeSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to Version + Protocol Protocol `param:"spr"` // See the Protocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing FileSystemPermissions, FilePermissions or DirectoryPermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + FileSystemName string + // Use "" to create a FileSystem SAS + // DirectoryPath will set this to "" if it is passed + FilePath string + // Not nil for a directory SAS (ie sr=d) + // Use "" to create a FileSystem SAS + DirectoryPath string + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct + AuthorizedObjectID string // saoid + UnauthorizedObjectID string // suoid + CorrelationID string // scid +} + +//TODO: add snapshot and versioning support in the future + +func getDirectoryDepth(path string) string { + if path == "" { + return "" + } + return fmt.Sprint(strings.Count(path, "/") + 1) +} + +// SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. +func (v DatalakeSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + if v.Identifier == "" && v.ExpiryTime.IsZero() || v.Permissions == "" { + return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions") + } + + //Make sure the permission characters are in the correct order + perms, err := parsePathPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + resource := "c" + if v.DirectoryPath != "" { + resource = "d" + v.FilePath = "" + } else if v.FilePath == "" { + // do nothing + } else { + resource = "b" + } + + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime := formatTimesForSigning(v.StartTime, v.ExpiryTime) + + signedIdentifier := v.Identifier + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(sharedKeyCredential.AccountName(), v.FileSystemName, v.FilePath, v.DirectoryPath), + signedIdentifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + "", //snapshot not supported + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Container/Blob-specific SAS parameters + resource: resource, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + signedDirectoryDepth: getDirectoryDepth(v.DirectoryPath), + authorizedObjectID: v.AuthorizedObjectID, + unauthorizedObjectID: v.UnauthorizedObjectID, + correlationID: v.CorrelationID, + // Calculated SAS signature + signature: signature, + identifier: signedIdentifier, + } + + return p, nil +} + +// SignWithUserDelegation uses an account's UserDelegationCredential to sign this signature values to produce the proper SAS query parameters. +func (v DatalakeSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) { + if userDelegationCredential == nil { + return QueryParameters{}, fmt.Errorf("cannot sign SAS query without User Delegation Key") + } + + if v.ExpiryTime.IsZero() || v.Permissions == "" { + return QueryParameters{}, errors.New("user delegation SAS is missing at least one of these: ExpiryTime or Permissions") + } + + // Parse the resource + resource := "c" + if v.DirectoryPath != "" { + resource = "d" + v.FilePath = "" + } else if v.FilePath == "" { + // do nothing + } else { + resource = "b" + } + // make sure the permission characters are in the correct order + if resource == "c" { + perms, err := parseFileSystemPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + perms, err := parsePathPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } + + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime := formatTimesForSigning(v.StartTime, v.ExpiryTime) + + udk := exported.GetUDKParams(userDelegationCredential) + + udkStart, udkExpiry := formatTimesForSigning(*udk.SignedStart, *udk.SignedExpiry) + + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(exported.GetAccountName(userDelegationCredential), v.FileSystemName, v.FilePath, v.DirectoryPath), + *udk.SignedOID, + *udk.SignedTID, + udkStart, + udkExpiry, + *udk.SignedService, + *udk.SignedVersion, + v.AuthorizedObjectID, + v.UnauthorizedObjectID, + v.CorrelationID, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + "", //snapshot not supported + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + signedDirectoryDepth: getDirectoryDepth(v.DirectoryPath), + authorizedObjectID: v.AuthorizedObjectID, + unauthorizedObjectID: v.UnauthorizedObjectID, + correlationID: v.CorrelationID, + // Calculated SAS signature + signature: signature, + } + + //User delegation SAS specific parameters + p.signedOID = *udk.SignedOID + p.signedTID = *udk.SignedTID + p.signedStart = *udk.SignedStart + p.signedExpiry = *udk.SignedExpiry + p.signedService = *udk.SignedService + p.signedVersion = *udk.SignedVersion + + return p, nil +} + +// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. +func getCanonicalName(account string, filesystemName string, fileName string, directoryName string) string { + // Container: "/blob/account/containername" + // Blob: "/blob/account/containername/blobname" + elements := []string{"/blob/", account, "/", filesystemName} + if fileName != "" { + elements = append(elements, "/", strings.Replace(fileName, "\\", "/", -1)) + } else if directoryName != "" { + elements = append(elements, "/", directoryName) + } + return strings.Join(elements, "") +} + +// FileSystemPermissions type simplifies creating the permissions string for an Azure Storage container SAS. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. +// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob +type FileSystemPermissions struct { + Read, Add, Create, Write, Delete, List, Move bool + Execute, ModifyOwnership, ModifyPermissions bool // Meant for hierarchical namespace accounts +} + +// String produces the SAS permissions string for an Azure Storage container. +// Call this method to set BlobSignatureValues' Permissions field. +func (p *FileSystemPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + if p.Move { + b.WriteRune('m') + } + if p.Execute { + b.WriteRune('e') + } + if p.ModifyOwnership { + b.WriteRune('o') + } + if p.ModifyPermissions { + b.WriteRune('p') + } + return b.String() +} + +// Parse initializes ContainerPermissions' fields from a string. +func parseFileSystemPermissions(s string) (FileSystemPermissions, error) { + p := FileSystemPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'm': + p.Move = true + case 'e': + p.Execute = true + case 'o': + p.ModifyOwnership = true + case 'p': + p.ModifyPermissions = true + default: + return FileSystemPermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +} + +// FilePermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. +type FilePermissions struct { + Read, Add, Create, Write, Delete, List, Move bool + Execute, Ownership, Permissions bool +} + +// String produces the SAS permissions string for an Azure Storage blob. +// Call this method to set BlobSignatureValues' Permissions field. +func (p *FilePermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + if p.Move { + b.WriteRune('m') + } + if p.Execute { + b.WriteRune('e') + } + if p.Ownership { + b.WriteRune('o') + } + if p.Permissions { + b.WriteRune('p') + } + return b.String() +} + +// DirectoryPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. +type DirectoryPermissions struct { + Read, Add, Create, Write, Delete, List, Move bool + Execute, Ownership, Permissions bool +} + +// String produces the SAS permissions string for an Azure Storage blob. +// Call this method to set BlobSignatureValues' Permissions field. +func (p *DirectoryPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + if p.Move { + b.WriteRune('m') + } + if p.Execute { + b.WriteRune('e') + } + if p.Ownership { + b.WriteRune('o') + } + if p.Permissions { + b.WriteRune('p') + } + return b.String() +} + +// Since this is internal we can just always convert to FilePermissions to avoid some duplication here +func parsePathPermissions(s string) (FilePermissions, error) { + p := FilePermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'm': + p.Move = true + case 'e': + p.Execute = true + case 'o': + p.Ownership = true + case 'p': + p.Permissions = true + default: + return FilePermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +} diff --git a/sdk/storage/azdatalake/sas/service_test.go b/sdk/storage/azdatalake/sas/service_test.go new file mode 100644 index 000000000000..be33f9f68035 --- /dev/null +++ b/sdk/storage/azdatalake/sas/service_test.go @@ -0,0 +1,252 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestFileSystemPermissions_String(t *testing.T) { + testdata := []struct { + input FileSystemPermissions + expected string + }{ + {input: FileSystemPermissions{Read: true}, expected: "r"}, + {input: FileSystemPermissions{Add: true}, expected: "a"}, + {input: FileSystemPermissions{Create: true}, expected: "c"}, + {input: FileSystemPermissions{Write: true}, expected: "w"}, + {input: FileSystemPermissions{Delete: true}, expected: "d"}, + {input: FileSystemPermissions{List: true}, expected: "l"}, + {input: FileSystemPermissions{Move: true}, expected: "m"}, + {input: FileSystemPermissions{Execute: true}, expected: "e"}, + {input: FileSystemPermissions{ModifyOwnership: true}, expected: "o"}, + {input: FileSystemPermissions{ModifyPermissions: true}, expected: "p"}, + {input: FileSystemPermissions{ + Read: true, + Add: true, + Create: true, + Write: true, + Delete: true, + List: true, + Move: true, + Execute: true, + ModifyOwnership: true, + ModifyPermissions: true, + }, expected: "racwdlmeop"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestFileSystemPermissions_Parse(t *testing.T) { + testdata := []struct { + input string + expected FileSystemPermissions + }{ + {expected: FileSystemPermissions{Read: true}, input: "r"}, + {expected: FileSystemPermissions{Add: true}, input: "a"}, + {expected: FileSystemPermissions{Create: true}, input: "c"}, + {expected: FileSystemPermissions{Write: true}, input: "w"}, + {expected: FileSystemPermissions{Delete: true}, input: "d"}, + {expected: FileSystemPermissions{List: true}, input: "l"}, + {expected: FileSystemPermissions{Move: true}, input: "m"}, + {expected: FileSystemPermissions{Execute: true}, input: "e"}, + {expected: FileSystemPermissions{ModifyOwnership: true}, input: "o"}, + {expected: FileSystemPermissions{ModifyPermissions: true}, input: "p"}, + {expected: FileSystemPermissions{ + Read: true, + Add: true, + Create: true, + Write: true, + Delete: true, + List: true, + Move: true, + Execute: true, + ModifyOwnership: true, + ModifyPermissions: true, + }, input: "racwdlmeop"}, + {expected: FileSystemPermissions{ + Read: true, + Add: true, + Create: true, + Write: true, + Delete: true, + List: true, + Move: true, + Execute: true, + ModifyOwnership: true, + ModifyPermissions: true, + }, input: "cpwmreodal"}, // Wrong order parses correctly + } + for _, c := range testdata { + permissions, err := parseFileSystemPermissions(c.input) + require.Nil(t, err) + require.Equal(t, c.expected, permissions) + } +} + +func TestFileSystemPermissions_ParseNegative(t *testing.T) { + _, err := parseFileSystemPermissions("cpwmreodalz") // Here 'z' is invalid + require.NotNil(t, err) + require.Contains(t, err.Error(), "122") +} + +func TestFilePermissions_String(t *testing.T) { + testdata := []struct { + input FilePermissions + expected string + }{ + {input: FilePermissions{Read: true}, expected: "r"}, + {input: FilePermissions{Add: true}, expected: "a"}, + {input: FilePermissions{Create: true}, expected: "c"}, + {input: FilePermissions{Write: true}, expected: "w"}, + {input: FilePermissions{Delete: true}, expected: "d"}, + {input: FilePermissions{List: true}, expected: "l"}, + {input: FilePermissions{Move: true}, expected: "m"}, + {input: FilePermissions{Execute: true}, expected: "e"}, + {input: FilePermissions{Ownership: true}, expected: "o"}, + {input: FilePermissions{Permissions: true}, expected: "p"}, + {input: FilePermissions{ + Read: true, + Add: true, + Create: true, + Write: true, + Delete: true, + List: true, + Move: true, + Execute: true, + Ownership: true, + Permissions: true, + }, expected: "racwdlmeop"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestFilePermissions_Parse(t *testing.T) { + testdata := []struct { + expected FilePermissions + input string + }{ + {expected: FilePermissions{Read: true}, input: "r"}, + {expected: FilePermissions{Add: true}, input: "a"}, + {expected: FilePermissions{Create: true}, input: "c"}, + {expected: FilePermissions{Write: true}, input: "w"}, + {expected: FilePermissions{Delete: true}, input: "d"}, + {expected: FilePermissions{List: true}, input: "l"}, + {expected: FilePermissions{Move: true}, input: "m"}, + {expected: FilePermissions{Execute: true}, input: "e"}, + {expected: FilePermissions{Ownership: true}, input: "o"}, + {expected: FilePermissions{Permissions: true}, input: "p"}, + {expected: FilePermissions{ + Read: true, + Add: true, + Create: true, + Write: true, + Delete: true, + List: true, + Move: true, + Execute: true, + Ownership: true, + Permissions: true, + }, input: "racwdlmeop"}, + {expected: FilePermissions{ + Read: true, + Add: true, + Create: true, + Write: true, + Delete: true, + List: true, + Move: true, + Execute: true, + Ownership: true, + Permissions: true, + }, input: "apwecrdlmo"}, // Wrong order parses correctly + } + for _, c := range testdata { + permissions, err := parsePathPermissions(c.input) + require.Nil(t, err) + require.Equal(t, c.expected, permissions) + } +} + +func TestDirectoryPermissions_String(t *testing.T) { + testdata := []struct { + input DirectoryPermissions + expected string + }{ + {input: DirectoryPermissions{Read: true}, expected: "r"}, + {input: DirectoryPermissions{Add: true}, expected: "a"}, + {input: DirectoryPermissions{Create: true}, expected: "c"}, + {input: DirectoryPermissions{Write: true}, expected: "w"}, + {input: DirectoryPermissions{Delete: true}, expected: "d"}, + {input: DirectoryPermissions{List: true}, expected: "l"}, + {input: DirectoryPermissions{Move: true}, expected: "m"}, + {input: DirectoryPermissions{Execute: true}, expected: "e"}, + {input: DirectoryPermissions{Ownership: true}, expected: "o"}, + {input: DirectoryPermissions{Permissions: true}, expected: "p"}, + {input: DirectoryPermissions{ + Read: true, + Add: true, + Create: true, + Write: true, + Delete: true, + List: true, + Move: true, + Execute: true, + Ownership: true, + Permissions: true, + }, expected: "racwdlmeop"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestParsePermissionsNegative(t *testing.T) { + _, err := parsePathPermissions("awecrdlfmo") // Here 'f' is invalid + require.NotNil(t, err) + require.Contains(t, err.Error(), "102") +} + +func TestGetCanonicalName(t *testing.T) { + testdata := []struct { + inputAccount string + inputContainer string + inputBlob string + inputDirectory string + expected string + }{ + {inputAccount: "fakestorageaccount", inputContainer: "fakestoragecontainer", expected: "/blob/fakestorageaccount/fakestoragecontainer"}, + {inputAccount: "fakestorageaccount", inputContainer: "fakestoragecontainer", inputBlob: "fakestorageblob", expected: "/blob/fakestorageaccount/fakestoragecontainer/fakestorageblob"}, + {inputAccount: "fakestorageaccount", inputContainer: "fakestoragecontainer", inputBlob: "fakestoragevirtualdir/fakestorageblob", expected: "/blob/fakestorageaccount/fakestoragecontainer/fakestoragevirtualdir/fakestorageblob"}, + {inputAccount: "fakestorageaccount", inputContainer: "fakestoragecontainer", inputBlob: "fakestoragevirtualdir\\fakestorageblob", expected: "/blob/fakestorageaccount/fakestoragecontainer/fakestoragevirtualdir/fakestorageblob"}, + {inputAccount: "fakestorageaccount", inputContainer: "fakestoragecontainer", inputBlob: "fakestoragedirectory", expected: "/blob/fakestorageaccount/fakestoragecontainer/fakestoragedirectory"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, getCanonicalName(c.inputAccount, c.inputContainer, c.inputBlob, c.inputDirectory)) + } +} + +func TestGetDirectoryDepth(t *testing.T) { + testdata := []struct { + input string + expected string + }{ + {input: "", expected: ""}, + {input: "myfile", expected: "1"}, + {input: "mydirectory", expected: "1"}, + {input: "mydirectory/myfile", expected: "2"}, + {input: "mydirectory/mysubdirectory", expected: "2"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, getDirectoryDepth(c.input)) + } +} diff --git a/sdk/storage/azdatalake/sas/url_parts.go b/sdk/storage/azdatalake/sas/url_parts.go new file mode 100644 index 000000000000..bef6a601e451 --- /dev/null +++ b/sdk/storage/azdatalake/sas/url_parts.go @@ -0,0 +1,109 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" +) + +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/containername" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts struct { + Scheme string // Ex: "https://" + Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" + IPEndpointStyleInfo IPEndpointStyleInfo + FileSystemName string // "" if no container + PathName string // "" if no blob + SAS QueryParameters + UnparsedParams string +} + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. +// Any other query parameters remain in the UnparsedParams field. +func ParseURL(u string) (URLParts, error) { + uri, err := url.Parse(u) + if err != nil { + return URLParts{}, err + } + + up := URLParts{ + Scheme: uri.Scheme, + Host: uri.Host, + } + + // Find the container & blob names (if any) + if uri.Path != "" { + path := uri.Path + if path[0] == '/' { + path = path[1:] // If path starts with a slash, remove it + } + if shared.IsIPEndpointStyle(up.Host) { + if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob + up.IPEndpointStyleInfo.AccountName = path + path = "" // No ContainerName present in the URL so path should be empty + } else { + up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes + path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names) + } + } + + filesystemEndIndex := strings.Index(path, "/") // Find the next slash (if it exists) + if filesystemEndIndex == -1 { // Slash not found; path has container name & no blob name + up.FileSystemName = path + } else { + up.FileSystemName = path[:filesystemEndIndex] // The container name is the part between the slashes + up.PathName = path[filesystemEndIndex+1:] // The blob name is after the container slash + } + } + + // Convert the query parameters to a case-sensitive map & trim whitespace + paramsMap := uri.Query() + up.SAS = newQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up, nil +} + +// String returns a URL object whose fields are initialized from the URLParts fields. The URL's RawQuery +// field contains the SAS, snapshot, and unparsed query parameters. +func (up URLParts) String() string { + path := "" + if shared.IsIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + path += "/" + up.IPEndpointStyleInfo.AccountName + } + // Concatenate container & blob names (if they exist) + if up.FileSystemName != "" { + path += "/" + up.FileSystemName + if up.PathName != "" { + path += "/" + up.PathName + } + } + + rawQuery := up.UnparsedParams + sas := up.SAS.Encode() + if sas != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += sas + } + u := url.URL{ + Scheme: up.Scheme, + Host: up.Host, + Path: path, + RawQuery: rawQuery, + } + return u.String() +} diff --git a/sdk/storage/azdatalake/sas/url_parts_test.go b/sdk/storage/azdatalake/sas/url_parts_test.go new file mode 100644 index 000000000000..32fa9569d957 --- /dev/null +++ b/sdk/storage/azdatalake/sas/url_parts_test.go @@ -0,0 +1,73 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseURLIPStyle(t *testing.T) { + urlWithIP := "https://127.0.0.1:5000/fakestorageaccount" + blobURLParts, err := ParseURL(urlWithIP) + require.NoError(t, err) + require.Equal(t, blobURLParts.Scheme, "https") + require.Equal(t, blobURLParts.Host, "127.0.0.1:5000") + require.Equal(t, blobURLParts.IPEndpointStyleInfo.AccountName, "fakestorageaccount") + + urlWithIP = "https://127.0.0.1:5000/fakestorageaccount/fakecontainer" + blobURLParts, err = ParseURL(urlWithIP) + require.NoError(t, err) + require.Equal(t, blobURLParts.Scheme, "https") + require.Equal(t, blobURLParts.Host, "127.0.0.1:5000") + require.Equal(t, blobURLParts.IPEndpointStyleInfo.AccountName, "fakestorageaccount") + require.Equal(t, blobURLParts.FileSystemName, "fakecontainer") + + urlWithIP = "https://127.0.0.1:5000/fakestorageaccount/fakecontainer/fakeblob" + blobURLParts, err = ParseURL(urlWithIP) + require.NoError(t, err) + require.Equal(t, blobURLParts.Scheme, "https") + require.Equal(t, blobURLParts.Host, "127.0.0.1:5000") + require.Equal(t, blobURLParts.IPEndpointStyleInfo.AccountName, "fakestorageaccount") + require.Equal(t, blobURLParts.FileSystemName, "fakecontainer") + require.Equal(t, blobURLParts.PathName, "fakeblob") +} + +func TestParseURL(t *testing.T) { + testStorageAccount := "fakestorageaccount" + host := fmt.Sprintf("%s.blob.core.windows.net", testStorageAccount) + testContainer := "fakecontainer" + fileNames := []string{"/._.TESTT.txt", "/.gitignore/dummyfile1"} + + const sasStr = "?sv=2019-12-12&sr=b&st=2111-01-09T01:42:34.936Z&se=2222-03-09T01:42:34.936Z&sp=rw&sip=168.1.5.60-168.1.5.70&spr=https,http&si=myIdentifier&ss=bf&srt=s&sig=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D" + + for _, fileName := range fileNames { + urlWithVersion := fmt.Sprintf("https://%s.blob.core.windows.net/%s%s?%s", testStorageAccount, testContainer, fileName, sasStr) + blobURLParts, err := ParseURL(urlWithVersion) + require.NoError(t, err) + + require.Equal(t, blobURLParts.Scheme, "https") + require.Equal(t, blobURLParts.Host, host) + require.Equal(t, blobURLParts.FileSystemName, testContainer) + + validateSAS(t, sasStr, blobURLParts.SAS) + } + + for _, fileName := range fileNames { + urlWithVersion := fmt.Sprintf("https://%s.blob.core.windows.net/%s%s?%s", testStorageAccount, testContainer, fileName, sasStr) + blobURLParts, err := ParseURL(urlWithVersion) + require.NoError(t, err) + + require.Equal(t, blobURLParts.Scheme, "https") + require.Equal(t, blobURLParts.Host, host) + require.Equal(t, blobURLParts.FileSystemName, testContainer) + + validateSAS(t, sasStr, blobURLParts.SAS) + } +} diff --git a/sdk/storage/azdatalake/service/client.go b/sdk/storage/azdatalake/service/client.go new file mode 100644 index 000000000000..3a5f0c734ab5 --- /dev/null +++ b/sdk/storage/azdatalake/service/client.go @@ -0,0 +1,297 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated_blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" + "net/http" + "strings" + "time" +) + +// FOR SERVICE CLIENT WE STORE THE GENERATED BLOB LAYER IN ORDER TO USE FS LISTING AND THE TRANSFORMS IT HAS + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure Datalake Storage service. +type Client base.CompositeClient[generated.ServiceClient, generated_blob.ServiceClient, service.Client] + +// NewClient creates an instance of Client with the specified values. +// - serviceURL - the URL of the blob e.g. https://.dfs.core.windows.net/ +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + blobServiceURL, datalakeServiceURL := shared.GetURLs(serviceURL) + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + } + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + blobServiceClientOpts := service.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobSvcClient, _ := service.NewClient(blobServiceURL, cred, &blobServiceClientOpts) + svcClient := base.NewServiceClient(datalakeServiceURL, blobServiceURL, blobSvcClient, azClient, nil, &cred, (*base.ClientOptions)(conOptions)) + + return (*Client)(svcClient), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.dfs.core.windows.net/ +// - options - client options; pass nil to accept the default values. +func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { + blobServiceURL, datalakeServiceURL := shared.GetURLs(serviceURL) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{} + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + blobServiceClientOpts := service.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobSvcClient, _ := service.NewClientWithNoCredential(blobServiceURL, &blobServiceClientOpts) + svcClient := base.NewServiceClient(datalakeServiceURL, blobServiceURL, blobSvcClient, azClient, nil, nil, (*base.ClientOptions)(conOptions)) + + return (*Client)(svcClient), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.dfs.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + blobServiceURL, datalakeServiceURL := shared.GetURLs(serviceURL) + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + } + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + blobServiceClientOpts := service.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobSharedKey, err := exported.ConvertToBlobSharedKey(cred) + if err != nil { + return nil, err + } + blobSvcClient, _ := service.NewClientWithSharedKeyCredential(blobServiceURL, blobSharedKey, &blobServiceClientOpts) + svcClient := base.NewServiceClient(datalakeServiceURL, blobServiceURL, blobSvcClient, azClient, cred, nil, (*base.ClientOptions)(conOptions)) + + return (*Client)(svcClient), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (s *Client) getClientOptions() *base.ClientOptions { + return base.GetCompositeClientOptions((*base.CompositeClient[generated.ServiceClient, generated_blob.ServiceClient, service.Client])(s)) +} + +// NewFileSystemClient creates a new filesystem.Client object by concatenating filesystemName to the end of this Client's URL. +// The new filesystem.Client uses the same request policy pipeline as the Client. +func (s *Client) NewFileSystemClient(filesystemName string) *filesystem.Client { + filesystemURL := runtime.JoinPaths(s.generatedServiceClientWithDFS().Endpoint(), filesystemName) + containerURL, filesystemURL := shared.GetURLs(filesystemURL) + return (*filesystem.Client)(base.NewFileSystemClient(filesystemURL, containerURL, s.serviceClient().NewContainerClient(filesystemName), s.generatedServiceClientWithDFS().InternalClient().WithClientName(shared.FileSystemClient), s.sharedKey(), s.identityCredential(), s.getClientOptions())) +} + +// GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object. +// OAuth is required for this call, as well as any role that can delegate access to the storage account. +func (s *Client) GetUserDelegationCredential(ctx context.Context, info KeyInfo, o *GetUserDelegationCredentialOptions) (*UserDelegationCredential, error) { + url, err := azdatalake.ParseURL(s.BlobURL()) + if err != nil { + return nil, err + } + + getUserDelegationKeyOptions := o.format() + udk, err := s.generatedServiceClientWithBlob().GetUserDelegationKey(ctx, info, getUserDelegationKeyOptions) + if err != nil { + return nil, exported.ConvertToDFSError(err) + } + + return exported.NewUserDelegationCredential(strings.Split(url.Host, ".")[0], udk.UserDelegationKey), nil +} + +func (s *Client) generatedServiceClientWithDFS() *generated.ServiceClient { + svcClientWithDFS, _, _ := base.InnerClients((*base.CompositeClient[generated.ServiceClient, generated_blob.ServiceClient, service.Client])(s)) + return svcClientWithDFS +} + +func (s *Client) generatedServiceClientWithBlob() *generated_blob.ServiceClient { + _, svcClientWithBlob, _ := base.InnerClients((*base.CompositeClient[generated.ServiceClient, generated_blob.ServiceClient, service.Client])(s)) + return svcClientWithBlob +} + +func (s *Client) serviceClient() *service.Client { + _, _, serviceClient := base.InnerClients((*base.CompositeClient[generated.ServiceClient, generated_blob.ServiceClient, service.Client])(s)) + return serviceClient +} + +func (s *Client) sharedKey() *exported.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.ServiceClient, generated_blob.ServiceClient, service.Client])(s)) +} + +func (s *Client) identityCredential() *azcore.TokenCredential { + return base.IdentityCredentialComposite((*base.CompositeClient[generated.ServiceClient, generated_blob.ServiceClient, service.Client])(s)) +} + +// DFSURL returns the URL endpoint used by the Client object. +func (s *Client) DFSURL() string { + return s.generatedServiceClientWithDFS().Endpoint() +} + +// BlobURL returns the URL endpoint used by the Client object. +func (s *Client) BlobURL() string { + return s.generatedServiceClientWithBlob().Endpoint() +} + +// CreateFileSystem creates a new filesystem under the specified account. +func (s *Client) CreateFileSystem(ctx context.Context, filesystem string, options *CreateFileSystemOptions) (CreateFileSystemResponse, error) { + filesystemClient := s.NewFileSystemClient(filesystem) + resp, err := filesystemClient.Create(ctx, options) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// DeleteFileSystem deletes the specified filesystem. +func (s *Client) DeleteFileSystem(ctx context.Context, filesystem string, options *DeleteFileSystemOptions) (DeleteFileSystemResponse, error) { + filesystemClient := s.NewFileSystemClient(filesystem) + resp, err := filesystemClient.Delete(ctx, options) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// SetProperties sets properties for a storage account's Datalake service endpoint. +func (s *Client) SetProperties(ctx context.Context, options *SetPropertiesOptions) (SetPropertiesResponse, error) { + opts := options.format() + resp, err := s.serviceClient().SetProperties(ctx, opts) + err = exported.ConvertToDFSError(err) + return resp, err +} + +// GetProperties gets properties for a storage account's Datalake service endpoint. +func (s *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts := options.format() + resp, err := s.serviceClient().GetProperties(ctx, opts) + err = exported.ConvertToDFSError(err) + return resp, err + +} + +// NewListFileSystemsPager operation returns a pager of the shares under the specified account. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-shares +func (s *Client) NewListFileSystemsPager(o *ListFileSystemsOptions) *runtime.Pager[ListFileSystemsResponse] { + listOptions := generated_blob.ServiceClientListContainersSegmentOptions{} + defaultInclude := ListFileSystemsInclude{} + if o != nil { + if o.Include != defaultInclude && o.Include.Deleted != nil && *o.Include.Deleted { + listOptions.Include = append(listOptions.Include, generated_blob.ListContainersIncludeTypeDeleted) + } + if o.Include != defaultInclude && o.Include.Metadata != nil && *o.Include.Metadata { + listOptions.Include = append(listOptions.Include, generated_blob.ListContainersIncludeTypeMetadata) + } + if o.Include != defaultInclude && o.Include.System != nil && *o.Include.System { + listOptions.Include = append(listOptions.Include, generated_blob.ListContainersIncludeTypeSystem) + } + listOptions.Marker = o.Marker + listOptions.Maxresults = o.MaxResults + listOptions.Prefix = o.Prefix + } + return runtime.NewPager(runtime.PagingHandler[ListFileSystemsResponse]{ + More: func(page ListFileSystemsResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListFileSystemsResponse) (ListFileSystemsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = s.generatedServiceClientWithBlob().ListContainersSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = s.generatedServiceClientWithBlob().ListContainersSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListFileSystemsResponse{}, exported.ConvertToDFSError(err) + } + resp, err := s.generatedServiceClientWithBlob().InternalClient().Pipeline().Do(req) + if err != nil { + return ListFileSystemsResponse{}, exported.ConvertToDFSError(err) + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListFileSystemsResponse{}, exported.ConvertToDFSError(runtime.NewResponseError(resp)) + } + resp1, err := s.generatedServiceClientWithBlob().ListContainersSegmentHandleResponse(resp) + return resp1, exported.ConvertToDFSError(err) + }, + }) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + // format all options to blob service options + res, perms, opts := o.format(resources, permissions) + resp, err := s.serviceClient().GetSASURL(res, perms, expiry, opts) + err = exported.ConvertToDFSError(err) + return resp, err +} diff --git a/sdk/storage/azdatalake/service/client_test.go b/sdk/storage/azdatalake/service/client_test.go new file mode 100644 index 000000000000..286bc62a4cef --- /dev/null +++ b/sdk/storage/azdatalake/service/client_test.go @@ -0,0 +1,772 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/service" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "os" + "testing" + "time" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running service Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &ServiceRecordedTestsSuite{}) + suite.Run(t, &ServiceUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &ServiceRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &ServiceRecordedTestsSuite{}) + } +} + +func (s *ServiceRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(s.T(), suite, test) +} + +func (s *ServiceRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(s.T(), suite, test) +} + +func (s *ServiceUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (s *ServiceUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type ServiceRecordedTestsSuite struct { + suite.Suite +} + +type ServiceUnrecordedTestsSuite struct { + suite.Suite +} + +func (s *ServiceUnrecordedTestsSuite) TestServiceClientFromConnectionString() { + _require := require.New(s.T()) + testName := s.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDatalake) + connectionString, _ := testcommon.GetGenericConnectionString(testcommon.TestAccountDatalake) + + parsedConnStr, err := shared.ParseConnectionString(*connectionString) + _require.Nil(err) + _require.Equal(parsedConnStr.ServiceURL, "https://"+accountName+".blob.core.windows.net/") + + sharedKeyCred, err := azdatalake.NewSharedKeyCredential(parsedConnStr.AccountName, parsedConnStr.AccountKey) + _require.Nil(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(parsedConnStr.ServiceURL, sharedKeyCred, nil) + _require.Nil(err) + fsClient := testcommon.CreateNewFileSystem(context.Background(), _require, testcommon.GenerateFileSystemName(testName), svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) +} + +func (s *ServiceRecordedTestsSuite) TestSetPropertiesLogging() { + _require := require.New(s.T()) + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + days := to.Ptr[int32](5) + enabled := to.Ptr(true) + + loggingOpts := service.Logging{ + Read: enabled, Write: enabled, Delete: enabled, + RetentionPolicy: &service.RetentionPolicy{Enabled: enabled, Days: days}} + opts := service.SetPropertiesOptions{Logging: &loggingOpts} + _, err = svcClient.SetProperties(context.Background(), &opts) + + _require.Nil(err) + resp1, err := svcClient.GetProperties(context.Background(), nil) + + _require.Nil(err) + _require.Equal(resp1.Logging.Write, enabled) + _require.Equal(resp1.Logging.Read, enabled) + _require.Equal(resp1.Logging.Delete, enabled) + _require.Equal(resp1.Logging.RetentionPolicy.Days, days) + _require.Equal(resp1.Logging.RetentionPolicy.Enabled, enabled) +} + +func (s *ServiceRecordedTestsSuite) TestSetPropertiesHourMetrics() { + _require := require.New(s.T()) + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + days := to.Ptr[int32](5) + enabled := to.Ptr(true) + + metricsOpts := service.Metrics{ + Enabled: enabled, IncludeAPIs: enabled, RetentionPolicy: &service.RetentionPolicy{Enabled: enabled, Days: days}} + opts := service.SetPropertiesOptions{HourMetrics: &metricsOpts} + _, err = svcClient.SetProperties(context.Background(), &opts) + + _require.Nil(err) + resp1, err := svcClient.GetProperties(context.Background(), nil) + + _require.Nil(err) + _require.Equal(resp1.HourMetrics.Enabled, enabled) + _require.Equal(resp1.HourMetrics.IncludeAPIs, enabled) + _require.Equal(resp1.HourMetrics.RetentionPolicy.Days, days) + _require.Equal(resp1.HourMetrics.RetentionPolicy.Enabled, enabled) +} + +func (s *ServiceRecordedTestsSuite) TestSetPropertiesMinuteMetrics() { + _require := require.New(s.T()) + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + days := to.Ptr[int32](5) + enabled := to.Ptr(true) + + metricsOpts := service.Metrics{ + Enabled: enabled, IncludeAPIs: enabled, RetentionPolicy: &service.RetentionPolicy{Enabled: enabled, Days: days}} + opts := service.SetPropertiesOptions{MinuteMetrics: &metricsOpts} + _, err = svcClient.SetProperties(context.Background(), &opts) + + _require.Nil(err) + resp1, err := svcClient.GetProperties(context.Background(), nil) + + _require.Nil(err) + _require.Equal(resp1.MinuteMetrics.Enabled, enabled) + _require.Equal(resp1.MinuteMetrics.IncludeAPIs, enabled) + _require.Equal(resp1.MinuteMetrics.RetentionPolicy.Days, days) + _require.Equal(resp1.MinuteMetrics.RetentionPolicy.Enabled, enabled) +} + +func (s *ServiceRecordedTestsSuite) TestSetPropertiesSetCORSMultiple() { + _require := require.New(s.T()) + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + defaultAge := to.Ptr[int32](500) + defaultStr := to.Ptr[string]("") + + allowedOrigins1 := "www.xyz.com" + allowedMethods1 := "GET" + CORSOpts1 := &service.CORSRule{AllowedOrigins: &allowedOrigins1, AllowedMethods: &allowedMethods1} + + allowedOrigins2 := "www.xyz.com,www.ab.com,www.bc.com" + allowedMethods2 := "GET, PUT" + maxAge2 := to.Ptr[int32](500) + exposedHeaders2 := "x-ms-meta-data*,x-ms-meta-source*,x-ms-meta-abc,x-ms-meta-bcd" + allowedHeaders2 := "x-ms-meta-data*,x-ms-meta-target*,x-ms-meta-xyz,x-ms-meta-foo" + + CORSOpts2 := &service.CORSRule{ + AllowedOrigins: &allowedOrigins2, AllowedMethods: &allowedMethods2, + MaxAgeInSeconds: maxAge2, ExposedHeaders: &exposedHeaders2, AllowedHeaders: &allowedHeaders2} + + CORSRules := []*service.CORSRule{CORSOpts1, CORSOpts2} + + opts := service.SetPropertiesOptions{CORS: CORSRules} + _, err = svcClient.SetProperties(context.Background(), &opts) + + _require.Nil(err) + resp, err := svcClient.GetProperties(context.Background(), nil) + _require.NoError(err) + for i := 0; i < len(resp.CORS); i++ { + if resp.CORS[i].AllowedOrigins == &allowedOrigins1 { + _require.Equal(resp.CORS[i].AllowedMethods, &allowedMethods1) + _require.Equal(resp.CORS[i].MaxAgeInSeconds, defaultAge) + _require.Equal(resp.CORS[i].ExposedHeaders, defaultStr) + _require.Equal(resp.CORS[i].AllowedHeaders, defaultStr) + + } else if resp.CORS[i].AllowedOrigins == &allowedOrigins2 { + _require.Equal(resp.CORS[i].AllowedMethods, &allowedMethods2) + _require.Equal(resp.CORS[i].MaxAgeInSeconds, &maxAge2) + _require.Equal(resp.CORS[i].ExposedHeaders, &exposedHeaders2) + _require.Equal(resp.CORS[i].AllowedHeaders, &allowedHeaders2) + } + } +} + +func (s *ServiceRecordedTestsSuite) TestAccountDeleteRetentionPolicy() { + _require := require.New(s.T()) + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + days := to.Ptr[int32](5) + enabled := to.Ptr(true) + _, err = svcClient.SetProperties(context.Background(), &service.SetPropertiesOptions{DeleteRetentionPolicy: &service.RetentionPolicy{Enabled: enabled, Days: days}}) + _require.Nil(err) + + // From FE, 30 seconds is guaranteed to be enough. + time.Sleep(time.Second * 30) + + resp, err := svcClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.EqualValues(*resp.StorageServiceProperties.DeleteRetentionPolicy.Enabled, *enabled) + _require.EqualValues(*resp.StorageServiceProperties.DeleteRetentionPolicy.Days, *days) + + disabled := false + _, err = svcClient.SetProperties(context.Background(), &service.SetPropertiesOptions{DeleteRetentionPolicy: &service.RetentionPolicy{Enabled: &disabled}}) + _require.Nil(err) + + // From FE, 30 seconds is guaranteed to be enough. + time.Sleep(time.Second * 30) + + resp, err = svcClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.EqualValues(*resp.StorageServiceProperties.DeleteRetentionPolicy.Enabled, false) + _require.Nil(resp.StorageServiceProperties.DeleteRetentionPolicy.Days) +} + +func (s *ServiceRecordedTestsSuite) TestAccountDeleteRetentionPolicyEmpty() { + _require := require.New(s.T()) + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + days := to.Ptr[int32](5) + enabled := to.Ptr(true) + _, err = svcClient.SetProperties(context.Background(), &service.SetPropertiesOptions{DeleteRetentionPolicy: &service.RetentionPolicy{Enabled: enabled, Days: days}}) + _require.Nil(err) + + // From FE, 30 seconds is guaranteed to be enough. + time.Sleep(time.Second * 30) + + resp, err := svcClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.EqualValues(*resp.StorageServiceProperties.DeleteRetentionPolicy.Enabled, *enabled) + _require.EqualValues(*resp.StorageServiceProperties.DeleteRetentionPolicy.Days, *days) + + // Empty retention policy causes an error, this is different from track 1.5 + _, err = svcClient.SetProperties(context.Background(), &service.SetPropertiesOptions{DeleteRetentionPolicy: &service.RetentionPolicy{}}) + _require.NotNil(err) +} + +func (s *ServiceRecordedTestsSuite) TestAccountDeleteRetentionPolicyNil() { + _require := require.New(s.T()) + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + days := to.Ptr[int32](5) + enabled := to.Ptr(true) + _, err = svcClient.SetProperties(context.Background(), &service.SetPropertiesOptions{DeleteRetentionPolicy: &service.RetentionPolicy{Enabled: enabled, Days: days}}) + _require.Nil(err) + + // From FE, 30 seconds is guaranteed to be enough. + time.Sleep(time.Second * 30) + + resp, err := svcClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.EqualValues(*resp.StorageServiceProperties.DeleteRetentionPolicy.Enabled, *enabled) + _require.EqualValues(*resp.StorageServiceProperties.DeleteRetentionPolicy.Days, *days) + + _, err = svcClient.SetProperties(context.Background(), &service.SetPropertiesOptions{}) + _require.Nil(err) + + // From FE, 30 seconds is guaranteed to be enough. + time.Sleep(time.Second * 30) + + // If an element of service properties is not passed, the service keeps the current settings. + resp, err = svcClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.EqualValues(*resp.StorageServiceProperties.DeleteRetentionPolicy.Enabled, *enabled) + _require.EqualValues(*resp.StorageServiceProperties.DeleteRetentionPolicy.Days, *days) + + // Disable for other tests + enabled = to.Ptr(false) + _, err = svcClient.SetProperties(context.Background(), &service.SetPropertiesOptions{DeleteRetentionPolicy: &service.RetentionPolicy{Enabled: enabled}}) + _require.Nil(err) +} + +func (s *ServiceRecordedTestsSuite) TestAccountDeleteRetentionPolicyDaysTooLarge() { + _require := require.New(s.T()) + var svcClient *service.Client + var err error + for i := 1; i <= 2; i++ { + if i == 1 { + svcClient, err = testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + } else { + svcClient, err = testcommon.GetServiceClientFromConnectionString(s.T(), testcommon.TestAccountDatalake, nil) + } + _require.Nil(err) + + days := int32(366) // Max days is 365. Left to the service for validation. + enabled := true + _, err = svcClient.SetProperties(context.Background(), &service.SetPropertiesOptions{DeleteRetentionPolicy: &service.RetentionPolicy{Enabled: &enabled, Days: &days}}) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.InvalidXMLDocument) + } +} + +func (s *ServiceRecordedTestsSuite) TestAccountDeleteRetentionPolicyDaysOmitted() { + _require := require.New(s.T()) + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + // Days is required if enabled is true. + enabled := true + _, err = svcClient.SetProperties(context.Background(), &service.SetPropertiesOptions{DeleteRetentionPolicy: &service.RetentionPolicy{Enabled: &enabled}}) + _require.NotNil(err) + + testcommon.ValidateErrorCode(_require, err, datalakeerror.InvalidXMLDocument) +} + +func (s *ServiceUnrecordedTestsSuite) TestSASServiceClient() { + _require := require.New(s.T()) + testName := s.T().Name() + cred, _ := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDatalake) + + serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.dfs.core.windows.net/", cred.AccountName()), cred, nil) + _require.Nil(err) + + fsName := testcommon.GenerateFileSystemName(testName) + + // Note: Always set all permissions, services, types to true to ensure order of string formed is correct. + resources := sas.AccountResourceTypes{ + Object: true, + Service: true, + Container: true, + } + permissions := sas.AccountPermissions{ + Read: true, + Write: true, + Delete: true, + DeletePreviousVersion: true, + List: true, + Add: true, + Create: true, + Update: true, + Process: true, + Tag: true, + FilterByTags: true, + PermanentDelete: true, + } + expiry := time.Now().Add(time.Hour) + sasUrl, err := serviceClient.GetSASURL(resources, permissions, expiry, nil) + _require.Nil(err) + + svcClient, err := testcommon.GetServiceClientNoCredential(s.T(), sasUrl, nil) + _require.Nil(err) + + // create fs using SAS + _, err = svcClient.CreateFileSystem(context.Background(), fsName, nil) + _require.Nil(err) + + _, err = svcClient.DeleteFileSystem(context.Background(), fsName, nil) + _require.Nil(err) +} + +func (s *ServiceRecordedTestsSuite) TestSASServiceClientNoKey() { + _require := require.New(s.T()) + accountName := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME") + + serviceClient, err := service.NewClientWithNoCredential(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), nil) + _require.Nil(err) + resources := sas.AccountResourceTypes{ + Object: true, + Service: true, + Container: true, + } + permissions := sas.AccountPermissions{ + Read: true, + Write: true, + Delete: true, + DeletePreviousVersion: true, + List: true, + Add: true, + Create: true, + Update: true, + Process: true, + Tag: true, + FilterByTags: true, + PermanentDelete: true, + } + + expiry := time.Now().Add(time.Hour) + _, err = serviceClient.GetSASURL(resources, permissions, expiry, nil) + _require.Equal(err.Error(), "SAS can only be signed with a SharedKeyCredential") +} + +func (s *ServiceUnrecordedTestsSuite) TestSASServiceClientSignNegative() { + _require := require.New(s.T()) + accountName := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME") + accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + cred, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + _require.Nil(err) + + serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) + _require.Nil(err) + resources := sas.AccountResourceTypes{ + Object: true, + Service: true, + Container: true, + } + permissions := sas.AccountPermissions{ + Read: true, + Write: true, + Delete: true, + DeletePreviousVersion: true, + List: true, + Add: true, + Create: true, + Update: true, + Process: true, + Tag: true, + FilterByTags: true, + PermanentDelete: true, + } + expiry := time.Time{} + _, err = serviceClient.GetSASURL(resources, permissions, expiry, nil) + _require.Equal(err.Error(), "account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") +} + +func (s *ServiceUnrecordedTestsSuite) TestNoSharedKeyCredError() { + _require := require.New(s.T()) + accountName := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME") + + // Creating service client without credentials + serviceClient, err := service.NewClientWithNoCredential(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), nil) + _require.Nil(err) + + // Adding SAS and options + resources := sas.AccountResourceTypes{ + Object: true, + Service: true, + Container: true, + } + permissions := sas.AccountPermissions{ + Read: true, + Add: true, + Write: true, + Create: true, + Update: true, + Delete: true, + } + start := time.Now().Add(-time.Hour) + expiry := start.Add(time.Hour) + opts := service.GetSASURLOptions{StartTime: &start} + + // GetSASURL fails (with MissingSharedKeyCredential) because service client is created without credentials + _, err = serviceClient.GetSASURL(resources, permissions, expiry, &opts) + _require.Equal(err, datalakeerror.MissingSharedKeyCredential) + +} + +func (s *ServiceUnrecordedTestsSuite) TestGetFileSystemClient() { + _require := require.New(s.T()) + testName := s.T().Name() + accountName := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME") + accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + cred, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + _require.Nil(err) + + serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) + _require.Nil(err) + + fsName := testcommon.GenerateFileSystemName(testName + "1") + fsClient := serviceClient.NewFileSystemClient(fsName) + + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + _, err = fsClient.GetProperties(context.Background(), nil) + _require.Nil(err) +} + +func (s *ServiceUnrecordedTestsSuite) TestSASFileSystemClient() { + _require := require.New(s.T()) + testName := s.T().Name() + accountName := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME") + accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + cred, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + _require.Nil(err) + + serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) + _require.Nil(err) + + fsName := testcommon.GenerateFileSystemName(testName) + fsClient := serviceClient.NewFileSystemClient(fsName) + + permissions := sas.FileSystemPermissions{ + Read: true, + Add: true, + } + start := time.Now().Add(-5 * time.Minute).UTC() + expiry := time.Now().Add(time.Hour) + + opts := filesystem.GetSASURLOptions{StartTime: &start} + sasUrl, err := fsClient.GetSASURL(permissions, expiry, &opts) + _require.Nil(err) + + fsClient2, err := filesystem.NewClientWithNoCredential(sasUrl, nil) + _require.Nil(err) + + _, err = fsClient2.Create(context.Background(), &filesystem.CreateOptions{Metadata: testcommon.BasicMetadata}) + _require.NotNil(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.AuthorizationFailure) +} + +func (s *ServiceUnrecordedTestsSuite) TestSASFileSystem2() { + _require := require.New(s.T()) + testName := s.T().Name() + accountName := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME") + accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + cred, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + _require.Nil(err) + + serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) + _require.Nil(err) + + fsName := testcommon.GenerateFileSystemName(testName) + fsClient := serviceClient.NewFileSystemClient(fsName) + start := time.Now().Add(-5 * time.Minute).UTC() + opts := filesystem.GetSASURLOptions{StartTime: &start} + + sasUrlReadAdd, err := fsClient.GetSASURL(sas.FileSystemPermissions{Read: true, Add: true}, time.Now().Add(time.Hour), &opts) + _require.Nil(err) + _, err = fsClient.Create(context.Background(), &filesystem.CreateOptions{Metadata: testcommon.BasicMetadata}) + _require.Nil(err) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient) + + fsClient1, err := filesystem.NewClientWithNoCredential(sasUrlReadAdd, nil) + _require.Nil(err) + + // filesystem metadata and properties can't be read or written with SAS auth + _, err = fsClient1.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.AuthorizationFailure) + + start = time.Now().Add(-5 * time.Minute).UTC() + opts = filesystem.GetSASURLOptions{StartTime: &start} + + sasUrlRCWL, err := fsClient.GetSASURL(sas.FileSystemPermissions{Add: true, Create: true, Delete: true, List: true}, time.Now().Add(time.Hour), &opts) + _require.Nil(err) + + fsClient2, err := filesystem.NewClientWithNoCredential(sasUrlRCWL, nil) + _require.Nil(err) + + // filesystems can't be created, deleted, or listed with SAS auth + _, err = fsClient2.Create(context.Background(), nil) + _require.Error(err) + testcommon.ValidateErrorCode(_require, err, datalakeerror.AuthorizationFailure) +} + +func (s *ServiceRecordedTestsSuite) TestListFilesystemsBasic() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.Nil(err) + md := map[string]*string{ + "foo": to.Ptr("foovalue"), + "bar": to.Ptr("barvalue"), + } + + fsName := testcommon.GenerateFileSystemName(testName) + fsClient := svcClient.NewFileSystemClient(fsName) + _, err = fsClient.Create(context.Background(), &filesystem.CreateOptions{Metadata: md}) + defer func(fsClient *filesystem.Client, ctx context.Context, options *filesystem.DeleteOptions) { + _, err := fsClient.Delete(ctx, options) + if err != nil { + _require.Nil(err) + } + }(fsClient, context.Background(), nil) + _require.Nil(err) + prefix := testcommon.FileSystemPrefix + listOptions := service.ListFileSystemsOptions{Prefix: &prefix, Include: service.ListFileSystemsInclude{Metadata: to.Ptr(true)}} + pager := svcClient.NewListFileSystemsPager(&listOptions) + + count := 0 + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + for _, ctnr := range resp.FileSystemItems { + _require.NotNil(ctnr.Name) + + if *ctnr.Name == fsName { + _require.NotNil(ctnr.Properties) + _require.NotNil(ctnr.Properties.LastModified) + _require.NotNil(ctnr.Properties.ETag) + _require.Equal(*ctnr.Properties.LeaseStatus, service.StatusTypeUnlocked) + _require.Equal(*ctnr.Properties.LeaseState, service.StateTypeAvailable) + _require.Nil(ctnr.Properties.LeaseDuration) + _require.Nil(ctnr.Properties.PublicAccess) + _require.NotNil(ctnr.Metadata) + + unwrappedMeta := map[string]*string{} + for k, v := range ctnr.Metadata { + if v != nil { + unwrappedMeta[k] = v + } + } + + _require.EqualValues(unwrappedMeta, md) + } + } + if err != nil { + break + } + } + + _require.Nil(err) + _require.GreaterOrEqual(count, 0) +} + +func (s *ServiceRecordedTestsSuite) TestListFilesystemsBasicUsingConnectionString() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClientFromConnectionString(s.T(), testcommon.TestAccountDefault, nil) + _require.Nil(err) + md := map[string]*string{ + "foo": to.Ptr("foovalue"), + "bar": to.Ptr("barvalue"), + } + + fsName := testcommon.GenerateFileSystemName(testName) + fsClient := testcommon.ServiceGetFileSystemClient(fsName, svcClient) + _, err = fsClient.Create(context.Background(), &filesystem.CreateOptions{Metadata: md}) + defer func(fsClient *filesystem.Client, ctx context.Context, options *filesystem.DeleteOptions) { + _, err := fsClient.Delete(ctx, options) + if err != nil { + _require.Nil(err) + } + }(fsClient, context.Background(), nil) + _require.Nil(err) + prefix := testcommon.FileSystemPrefix + listOptions := service.ListFileSystemsOptions{Prefix: &prefix, Include: service.ListFileSystemsInclude{Metadata: to.Ptr(true)}} + pager := svcClient.NewListFileSystemsPager(&listOptions) + + count := 0 + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + + for _, ctnr := range resp.FileSystemItems { + _require.NotNil(ctnr.Name) + + if *ctnr.Name == fsName { + _require.NotNil(ctnr.Properties) + _require.NotNil(ctnr.Properties.LastModified) + _require.NotNil(ctnr.Properties.ETag) + _require.Equal(*ctnr.Properties.LeaseStatus, service.StatusTypeUnlocked) + _require.Equal(*ctnr.Properties.LeaseState, service.StateTypeAvailable) + _require.Nil(ctnr.Properties.LeaseDuration) + _require.Nil(ctnr.Properties.PublicAccess) + _require.NotNil(ctnr.Metadata) + + unwrappedMeta := map[string]*string{} + for k, v := range ctnr.Metadata { + if v != nil { + unwrappedMeta[k] = v + } + } + + _require.EqualValues(unwrappedMeta, md) + } + } + if err != nil { + break + } + } + + _require.Nil(err) + _require.GreaterOrEqual(count, 0) +} + +func (s *ServiceRecordedTestsSuite) TestListFilesystemsPaged() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.Nil(err) + const numFileSystems = 6 + maxResults := int32(2) + const pagedFileSystemsPrefix = "azfilesystempaged" + + filesystems := make([]*filesystem.Client, numFileSystems) + expectedResults := make(map[string]bool) + for i := 0; i < numFileSystems; i++ { + fsName := pagedFileSystemsPrefix + testcommon.GenerateFileSystemName(testName) + fmt.Sprintf("%d", i) + fsClient := testcommon.CreateNewFileSystem(context.Background(), _require, fsName, svcClient) + filesystems[i] = fsClient + expectedResults[fsName] = false + } + + defer func() { + for i := range filesystems { + testcommon.DeleteFileSystem(context.Background(), _require, filesystems[i]) + } + }() + + prefix := pagedFileSystemsPrefix + testcommon.FileSystemPrefix + listOptions := service.ListFileSystemsOptions{MaxResults: &maxResults, Prefix: &prefix, Include: service.ListFileSystemsInclude{Metadata: to.Ptr(true)}} + count := 0 + results := make([]service.FileSystemItem, 0) + pager := svcClient.NewListFileSystemsPager(&listOptions) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + for _, ctnr := range resp.FileSystemItems { + _require.NotNil(ctnr.Name) + results = append(results, *ctnr) + count += 1 + } + } + + _require.Equal(count, numFileSystems) + _require.Equal(len(results), numFileSystems) + + // make sure each fs we see is expected + for _, ctnr := range results { + _, ok := expectedResults[*ctnr.Name] + _require.Equal(ok, true) + expectedResults[*ctnr.Name] = true + } + + // make sure every expected fs was seen + for _, seen := range expectedResults { + _require.Equal(seen, true) + } + +} + +func (s *ServiceRecordedTestsSuite) TestAccountListFilesystemsEmptyPrefix() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + fsClient1 := testcommon.CreateNewFileSystem(context.Background(), _require, testcommon.GenerateFileSystemName(testName)+"1", svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient1) + fsClient2 := testcommon.CreateNewFileSystem(context.Background(), _require, testcommon.GenerateFileSystemName(testName)+"2", svcClient) + defer testcommon.DeleteFileSystem(context.Background(), _require, fsClient2) + + count := 0 + pager := svcClient.NewListFileSystemsPager(nil) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + + for _, container := range resp.FileSystemItems { + count++ + _require.NotNil(container.Name) + } + if err != nil { + break + } + } + _require.GreaterOrEqual(count, 2) +} diff --git a/sdk/storage/azdatalake/service/constants.go b/sdk/storage/azdatalake/service/constants.go new file mode 100644 index 000000000000..a06f2a41a79a --- /dev/null +++ b/sdk/storage/azdatalake/service/constants.go @@ -0,0 +1,57 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated_blob" +) + +// PublicAccessType defines values for AccessType - private (default) or file or filesystem. +type PublicAccessType = filesystem.PublicAccessType + +const ( + File PublicAccessType = filesystem.File + FileSystem PublicAccessType = filesystem.FileSystem +) + +// StatusType defines values for StatusType +type StatusType = generated_blob.LeaseStatusType + +const ( + StatusTypeLocked StatusType = generated_blob.LeaseStatusTypeLocked + StatusTypeUnlocked StatusType = generated_blob.LeaseStatusTypeUnlocked +) + +// PossibleStatusTypeValues returns the possible values for the StatusType const type. +func PossibleStatusTypeValues() []StatusType { + return generated_blob.PossibleLeaseStatusTypeValues() +} + +// DurationType defines values for DurationType +type DurationType = generated_blob.LeaseDurationType + +const ( + DurationTypeInfinite DurationType = generated_blob.LeaseDurationTypeInfinite + DurationTypeFixed DurationType = generated_blob.LeaseDurationTypeFixed +) + +// PossibleDurationTypeValues returns the possible values for the DurationType const type. +func PossibleDurationTypeValues() []DurationType { + return generated_blob.PossibleLeaseDurationTypeValues() +} + +// StateType defines values for StateType +type StateType = generated_blob.LeaseStateType + +const ( + StateTypeAvailable StateType = generated_blob.LeaseStateTypeAvailable + StateTypeLeased StateType = generated_blob.LeaseStateTypeLeased + StateTypeExpired StateType = generated_blob.LeaseStateTypeExpired + StateTypeBreaking StateType = generated_blob.LeaseStateTypeBreaking + StateTypeBroken StateType = generated_blob.LeaseStateTypeBroken +) diff --git a/sdk/storage/azdatalake/service/examples_test.go b/sdk/storage/azdatalake/service/examples_test.go new file mode 100644 index 000000000000..525c86d582d1 --- /dev/null +++ b/sdk/storage/azdatalake/service/examples_test.go @@ -0,0 +1,335 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "log" + "os" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/service" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +func Example_service_Client_NewClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + serviceURL := fmt.Sprintf("https://%s.dfs.core.windows.net/", accountName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + serviceClient, err := service.NewClient(serviceURL, cred, nil) + handleError(err) + fmt.Println(serviceClient.DFSURL()) + fmt.Println(serviceClient.BlobURL()) +} + +func Example_service_Client_NewClientWithSharedKeyCredential() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + serviceURL := fmt.Sprintf("https://%s.dfs.core.windows.net/", accountName) + + cred, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + serviceClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + fmt.Println(serviceClient.DFSURL()) + fmt.Println(serviceClient.BlobURL()) +} + +func Example_service_Client_NewClientWithNoCredential() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + sharedAccessSignature, ok := os.LookupEnv("AZURE_STORAGE_SHARED_ACCESS_SIGNATURE") + if !ok { + panic("AZURE_STORAGE_SHARED_ACCESS_SIGNATURE could not be found") + } + serviceURL := fmt.Sprintf("https://%s.dfs.core.windows.net/?%s", accountName, sharedAccessSignature) + + serviceClient, err := service.NewClientWithNoCredential(serviceURL, nil) + handleError(err) + fmt.Println(serviceClient.DFSURL()) + fmt.Println(serviceClient.BlobURL()) +} + +func Example_service_Client_NewClientFromConnectionString() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + + serviceClient, err := service.NewClientFromConnectionString(connectionString, nil) + handleError(err) + fmt.Println(serviceClient.DFSURL()) + fmt.Println(serviceClient.BlobURL()) +} + +func Example_service_Client_CreateContainer() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + serviceURL := fmt.Sprintf("https://%s.dfs.core.windows.net/", accountName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + + serviceClient, err := service.NewClient(serviceURL, cred, nil) + handleError(err) + + _, err = serviceClient.CreateFileSystem(context.TODO(), "testfs", nil) + handleError(err) + + // ======== 2. Delete a container ======== + defer func(serviceClient1 *service.Client, ctx context.Context, fsName string, options *filesystem.DeleteOptions) { + _, err = serviceClient1.DeleteFileSystem(ctx, fsName, options) + if err != nil { + log.Fatal(err) + } + }(serviceClient, context.TODO(), "testfs", nil) +} + +func Example_service_Client_DeleteFileSystem() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + serviceURL := fmt.Sprintf("https://%s.dfs.core.windows.net/", accountName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + serviceClient, err := service.NewClient(serviceURL, cred, nil) + handleError(err) + + _, err = serviceClient.DeleteFileSystem(context.TODO(), "testfs", nil) + handleError(err) +} + +func Example_service_Client_ListFileSystems() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + serviceURL := fmt.Sprintf("https://%s.dfs.core.windows.net/", accountName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + serviceClient, err := service.NewClient(serviceURL, cred, nil) + handleError(err) + + listFSOptions := service.ListFileSystemsOptions{ + Include: service.ListFileSystemsInclude{ + Metadata: to.Ptr(true), // Include Metadata + Deleted: to.Ptr(true), // Include deleted containers in the result as well + }, + } + pager := serviceClient.NewListFileSystemsPager(&listFSOptions) + + for pager.More() { + resp, err := pager.NextPage(context.TODO()) + if err != nil { + log.Fatal(err) + } + for _, fs := range resp.FileSystemItems { + fmt.Println(*fs.Name) + } + } +} + +func Example_service_Client_GetSASURL() { + cred, err := azdatalake.NewSharedKeyCredential("myAccountName", "myAccountKey") + handleError(err) + serviceClient, err := service.NewClientWithSharedKeyCredential("https://.dfs.core.windows.net", cred, nil) + handleError(err) + + resources := sas.AccountResourceTypes{Service: true} + permission := sas.AccountPermissions{Read: true} + start := time.Now() + expiry := start.AddDate(1, 0, 0) + options := service.GetSASURLOptions{StartTime: &start} + sasURL, err := serviceClient.GetSASURL(resources, permission, expiry, &options) + handleError(err) + + serviceURL := fmt.Sprintf("https://.dfs.core.windows.net/?%s", sasURL) + serviceClientWithSAS, err := service.NewClientWithNoCredential(serviceURL, nil) + handleError(err) + _ = serviceClientWithSAS +} + +func Example_service_Client_SetProperties() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + serviceURL := fmt.Sprintf("https://%s.dfs.core.windows.net/", accountName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + serviceClient, err := service.NewClient(serviceURL, cred, nil) + handleError(err) + + enabled := true // enabling retention period + days := int32(5) // setting retention period to 5 days + serviceSetPropertiesResponse, err := serviceClient.SetProperties(context.TODO(), &service.SetPropertiesOptions{ + DeleteRetentionPolicy: &service.RetentionPolicy{Enabled: &enabled, Days: &days}, + }) + + handleError(err) + fmt.Println(serviceSetPropertiesResponse) +} + +func Example_service_Client_GetProperties() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + serviceURL := fmt.Sprintf("https://%s.dfs.core.windows.net/", accountName) + + cred, err := azidentity.NewDefaultAzureCredential(nil) + handleError(err) + + serviceClient, err := service.NewClient(serviceURL, cred, nil) + handleError(err) + + serviceGetPropertiesResponse, err := serviceClient.GetProperties(context.TODO(), nil) + handleError(err) + + fmt.Println(serviceGetPropertiesResponse) +} + +// This example shows how to create and use an Azure Storage account Shared Access Signature (SAS). +func Example_service_SASSignatureValues_Sign() { + accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), os.Getenv("AZURE_STORAGE_ACCOUNT_KEY") + + credential, err := azdatalake.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + sasQueryParams, err := sas.AccountSignatureValues{ + Protocol: sas.ProtocolHTTPS, + ExpiryTime: time.Now().UTC().Add(48 * time.Hour), + Permissions: to.Ptr(sas.AccountPermissions{Read: true, List: true}).String(), + ResourceTypes: to.Ptr(sas.AccountResourceTypes{Container: true, Object: true}).String(), + }.SignWithSharedKey(credential) + handleError(err) + + sasURL := fmt.Sprintf("https://%s.dfs.core.windows.net/?%s", accountName, sasQueryParams.Encode()) + + // This URL can be used to authenticate requests now + serviceClient, err := service.NewClientWithNoCredential(sasURL, nil) + handleError(err) + + // You can also break a blob URL up into it's constituent parts + blobURLParts, _ := azdatalake.ParseURL(serviceClient.DFSURL()) + fmt.Printf("SAS expiry time = %s\n", blobURLParts.SAS.ExpiryTime()) +} + +func Example_service_Client_NewClientWithUserDelegationCredential() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + const containerName = "testContainer" + + // Create Managed Identity (OAuth) Credentials using Client ID + clientOptions := azcore.ClientOptions{} // Fill clientOptions as needed + optsClientID := azidentity.ManagedIdentityCredentialOptions{ClientOptions: clientOptions, ID: azidentity.ClientID("7cf7db0d-...")} + cred, err := azidentity.NewManagedIdentityCredential(&optsClientID) + handleError(err) + clientOptionsService := service.ClientOptions{} // Same as azcore.ClientOptions using service instead + + svcClient, err := service.NewClient(fmt.Sprintf("https://%s.dfs.core.windows.net/", accountName), cred, &clientOptionsService) + handleError(err) + + // Set current and past time and create key + currentTime := time.Now().UTC().Add(-10 * time.Second) + pastTime := currentTime.Add(48 * time.Hour) + info := service.KeyInfo{ + Start: to.Ptr(currentTime.UTC().Format(sas.TimeFormat)), + Expiry: to.Ptr(pastTime.UTC().Format(sas.TimeFormat)), + } + + udc, err := svcClient.GetUserDelegationCredential(context.Background(), info, nil) + handleError(err) + + fmt.Println("User Delegation Key has been created for ", accountName) + + // Create Blob Signature Values with desired permissions and sign with user delegation credential + sasQueryParams, err := sas.DatalakeSignatureValues{ + Protocol: sas.ProtocolHTTPS, + StartTime: time.Now().UTC().Add(time.Second * -10), + ExpiryTime: time.Now().UTC().Add(15 * time.Minute), + Permissions: to.Ptr(sas.FileSystemPermissions{Read: true, List: true}).String(), + FileSystemName: containerName, + }.SignWithUserDelegation(udc) + handleError(err) + + sasURL := fmt.Sprintf("https://%s.dfs.core.windows.net/?%s", accountName, sasQueryParams.Encode()) + + // This URL can be used to authenticate requests now + serviceClient, err := service.NewClientWithNoCredential(sasURL, nil) + handleError(err) + + // You can also break a blob URL up into it's constituent parts + blobURLParts, _ := azdatalake.ParseURL(serviceClient.DFSURL()) + fmt.Printf("SAS expiry time = %s\n", blobURLParts.SAS.ExpiryTime()) + + // Create Managed Identity (OAuth) Credentials using Resource ID + optsResourceID := azidentity.ManagedIdentityCredentialOptions{ClientOptions: clientOptions, ID: azidentity.ResourceID("/subscriptions/...")} + cred, err = azidentity.NewManagedIdentityCredential(&optsResourceID) + handleError(err) + + svcClient, err = service.NewClient(fmt.Sprintf("https://%s.dfs.core.windows.net/", accountName), cred, &clientOptionsService) + handleError(err) + + udc, err = svcClient.GetUserDelegationCredential(context.Background(), info, nil) + handleError(err) + fmt.Println("User Delegation Key has been created for ", accountName) + + // Create Blob Signature Values with desired permissions and sign with user delegation credential + sasQueryParams, err = sas.DatalakeSignatureValues{ + Protocol: sas.ProtocolHTTPS, + StartTime: time.Now().UTC().Add(time.Second * -10), + ExpiryTime: time.Now().UTC().Add(15 * time.Minute), + Permissions: to.Ptr(sas.FileSystemPermissions{Read: true, List: true}).String(), + FileSystemName: containerName, + }.SignWithUserDelegation(udc) + handleError(err) + + sasURL = fmt.Sprintf("https://%s.dfs.core.windows.net/?%s", accountName, sasQueryParams.Encode()) + + // This URL can be used to authenticate requests now + serviceClient, err = service.NewClientWithNoCredential(sasURL, nil) + handleError(err) + + // You can also break a blob URL up into it's constituent parts + blobURLParts, _ = azdatalake.ParseURL(serviceClient.DFSURL()) + fmt.Printf("SAS expiry time = %s\n", blobURLParts.SAS.ExpiryTime()) +} diff --git a/sdk/storage/azdatalake/service/models.go b/sdk/storage/azdatalake/service/models.go new file mode 100644 index 000000000000..7fec5e4fc6fc --- /dev/null +++ b/sdk/storage/azdatalake/service/models.go @@ -0,0 +1,188 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated_blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" + "time" +) +import blobSAS "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + +// CreateFileSystemOptions contains the optional parameters for the FileSystem Create method. +type CreateFileSystemOptions = filesystem.CreateOptions + +// DeleteFileSystemOptions contains the optional parameters for the FileSystem Delete method. +type DeleteFileSystemOptions = filesystem.DeleteOptions + +// GetUserDelegationCredentialOptions contains optional parameters for GetUserDelegationKey method. +type GetUserDelegationCredentialOptions struct { + // placeholder for future options +} + +func (o *GetUserDelegationCredentialOptions) format() *generated_blob.ServiceClientGetUserDelegationKeyOptions { + return nil +} + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. +type GetPropertiesOptions struct { + // placeholder for future options +} + +func (o *GetPropertiesOptions) format() *service.GetPropertiesOptions { + if o == nil { + return nil + } + return &service.GetPropertiesOptions{} +} + +// SetPropertiesOptions provides set of options for Client.SetProperties +type SetPropertiesOptions struct { + // CORS The set of CORS rules. + CORS []*CORSRule + // DefaultServiceVersion The default version to use for requests to the Datalake service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions. + DefaultServiceVersion *string + // DeleteRetentionPolicy the retention policy which determines how long the associated data should persist. + DeleteRetentionPolicy *RetentionPolicy + // HourMetrics a summary of request statistics grouped by API in hour or minute aggregates + // If version is not set - we default to "1.0" + HourMetrics *Metrics + // Logging Azure Analytics Logging settings. + // If version is not set - we default to "1.0" + Logging *Logging + // MinuteMetrics a summary of request statistics grouped by API in hour or minute aggregates + // If version is not set - we default to "1.0" + MinuteMetrics *Metrics + // StaticWebsite The properties that enable an account to host a static website. + StaticWebsite *StaticWebsite +} + +func (o *SetPropertiesOptions) format() *service.SetPropertiesOptions { + if o == nil { + return nil + } + return &service.SetPropertiesOptions{ + CORS: o.CORS, + DefaultServiceVersion: o.DefaultServiceVersion, + DeleteRetentionPolicy: o.DeleteRetentionPolicy, + HourMetrics: o.HourMetrics, + Logging: o.Logging, + MinuteMetrics: o.MinuteMetrics, + StaticWebsite: o.StaticWebsite, + } +} + +// ListFileSystemsInclude indicates what additional information the service should return with each filesystem. +type ListFileSystemsInclude struct { + // Metadata tells the service whether to return metadata for each filesystem. + Metadata *bool + // Deleted tells the service whether to return soft-deleted filesystems. + Deleted *bool + // System tells the service whether to return system filesystems. + System *bool +} + +// ListFileSystemsOptions contains the optional parameters for the ListFileSystems method. +type ListFileSystemsOptions struct { + // Include tells the service whether to return filesystem metadata. + Include ListFileSystemsInclude + // Marker is the continuation token to use when continuing the operation. + Marker *string + // MaxResults sets the maximum number of paths that will be returned per page. + MaxResults *int32 + // Prefix filters the results to return only filesystems whose names begin with the specified prefix. + Prefix *string +} + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + // StartTime is the time after which the SAS will become valid. + StartTime *time.Time +} + +func (o *GetSASURLOptions) format(resources sas.AccountResourceTypes, permissions sas.AccountPermissions) (blobSAS.AccountResourceTypes, blobSAS.AccountPermissions, *service.GetSASURLOptions) { + res := blobSAS.AccountResourceTypes{ + Service: resources.Service, + Container: resources.Container, + Object: resources.Object, + } + perms := blobSAS.AccountPermissions{ + Read: permissions.Read, + Write: permissions.Write, + Delete: permissions.Delete, + List: permissions.List, + Add: permissions.Add, + Create: permissions.Create, + Update: permissions.Update, + Process: permissions.Process, + } + if o == nil { + return res, perms, nil + } + + return res, perms, &service.GetSASURLOptions{ + StartTime: o.StartTime, + } +} + +// KeyInfo contains KeyInfo struct. +type KeyInfo = generated_blob.KeyInfo + +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain. +type CORSRule = service.CORSRule + +// RetentionPolicy - the retention policy which determines how long the associated data should persist. +type RetentionPolicy = service.RetentionPolicy + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates +type Metrics = service.Metrics + +// Logging - Azure Analytics Logging settings. +type Logging = service.Logging + +// StaticWebsite - The properties that enable an account to host a static website. +type StaticWebsite = service.StaticWebsite + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential = exported.UserDelegationCredential + +// UserDelegationKey contains UserDelegationKey. +type UserDelegationKey = exported.UserDelegationKey + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.AccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions + +// CPKScopeInfo contains a group of parameters for the FileSystemClient.Create method. +type CPKScopeInfo = filesystem.CPKScopeInfo + +// StorageServiceProperties - Storage Service Properties. Returned in GetServiceProperties call. +type StorageServiceProperties = service.StorageServiceProperties + +// ListFileSystemsSegmentResponse contains fields from the ListFileSystems operation +type ListFileSystemsSegmentResponse = generated_blob.ListFileSystemsSegmentResponse + +// FileSystemItem contains fields from the ListFileSystems operation +type FileSystemItem = generated_blob.FileSystemItem + +// FileSystemProperties contains fields from the ListFileSystems operation +type FileSystemProperties = generated_blob.FileSystemProperties diff --git a/sdk/storage/azdatalake/service/responses.go b/sdk/storage/azdatalake/service/responses.go new file mode 100644 index 000000000000..27ef525cf8d7 --- /dev/null +++ b/sdk/storage/azdatalake/service/responses.go @@ -0,0 +1,31 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated_blob" +) + +// CreateFileSystemResponse contains the response fields for the CreateFileSystem operation. +type CreateFileSystemResponse = filesystem.CreateResponse + +// DeleteFileSystemResponse contains the response fields for the DeleteFileSystem operation. +type DeleteFileSystemResponse = filesystem.DeleteResponse + +// SetPropertiesResponse contains the response fields for the SetProperties operation. +type SetPropertiesResponse = service.SetPropertiesResponse + +// GetPropertiesResponse contains the response fields for the GetProperties operation. +type GetPropertiesResponse = service.GetPropertiesResponse + +// ListFileSystemsResponse contains the response fields for the ListFileSystems operation. +type ListFileSystemsResponse = generated_blob.ServiceClientListFileSystemsSegmentResponse diff --git a/sdk/storage/azdatalake/test-resources.json b/sdk/storage/azdatalake/test-resources.json new file mode 100644 index 000000000000..88c380e64a8f --- /dev/null +++ b/sdk/storage/azdatalake/test-resources.json @@ -0,0 +1,584 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "String" + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The principal to assign the role to. This is application object id." + } + } + }, + "variables": { + "mgmtApiVersion": "2022-09-01", + "authorizationApiVersion": "2018-09-01-preview", + "blobDataContributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe')]", + "contributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c')]", + "blobDataOwnerRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "immutableAccountName": "[concat(parameters('baseName'), 'imm')]", + "primaryEncryptionScopeName": "encryptionScope", + "primaryEncryptionScope": "[concat(parameters('baseName'), 'prim', concat('/', variables('primaryEncryptionScopeName')))]", + "secondaryAccountName": "[concat(parameters('baseName'), 'sec')]", + "premiumAccountName": "[concat(parameters('baseName'), 'prem')]", + "dataLakeAccountName": "[concat(parameters('baseName'), 'dtlk')]", + "softDeleteAccountName": "[concat(parameters('baseName'), 'sftdl')]", + "premiumFileAccountName": "[concat(parameters('baseName'), 'pfile')]", + "webjobsPrimaryAccountName": "[concat(parameters('baseName'), 'wjprim')]", + "webjobsSecondaryAccountName": "[concat(parameters('baseName'), 'wjsec')]", + "location": "[resourceGroup().location]", + "resourceGroupName": "[resourceGroup().name]", + "subscriptionId": "[subscription().subscriptionId]", + "encryption": { + "services": { + "file": { + "enabled": true + }, + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('dataContributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataContributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('contributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('contributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('blobDataOwnerRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataOwnerRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('immutableAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot", + "immutableStorageWithVersioning": { + "enabled": true + } + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('immutableAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('immutableAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/encryptionScopes", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryEncryptionScope')]", + "properties": { + "source": "Microsoft.Storage", + "state": "Enabled" + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('secondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('dataLakeAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "isHnsEnabled": true, + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('dataLakeAccountName'), '/default')]", + "properties": { + "deleteRetentionPolicy": { + "allowPermanentDelete": false, + "enabled": true, + "days": 7 + }, + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('dataLakeAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('softDeleteAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "deleteRetentionPolicy": { + "allowPermanentDelete": false, + "enabled": true, + "days": 7 + }, + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/fileServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "shareDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumFileAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "FileStorage", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsPrimaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsSecondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + } + ], + "functions": [ + { + "namespace": "url", + "members": { + "serviceEndpointSuffix": { + "parameters": [ + { + "name": "endpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[substring(parameters('endpoint'), add(indexOf(parameters('endpoint'), '.'),1), sub(length(parameters('endpoint')), add(indexOf(parameters('endpoint'), '.'),2)))]" + } + } + } + }, + { + "namespace": "connectionString", + "members": { + "create": { + "parameters": [ + { + "name": "accountName", + "type": "string" + }, + { + "name": "accountKey", + "type": "string" + }, + { + "name": "blobEndpoint", + "type": "string" + }, + { + "name": "queueEndpoint", + "type": "string" + }, + { + "name": "fileEndpoint", + "type": "string" + }, + { + "name": "tableEndpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=', parameters('accountName'), ';AccountKey=', parameters('accountKey'), ';BlobEndpoint=', parameters('blobEndpoint'), ';QueueEndpoint=', parameters('queueEndpoint'), ';FileEndpoint=', parameters('fileEndpoint'), ';TableEndpoint=', parameters('tableEndpoint'))]" + } + } + } + } + ], + "outputs": { + "AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PRIMARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "PRIMARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "PRIMARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "PRIMARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SECONDARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SECONDARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SECONDARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SECONDARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "BLOB_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumAccountName')]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('dataLakeAccountName')]" + }, + "DATALAKE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "DATALAKE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "DATALAKE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "DATALAKE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('immutableAccountName')]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "IMMUTABLE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('softDeleteAccountName')]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumFileAccountName')]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "AZUREWEBJOBSSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsPrimaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "AZUREWEBJOBSSECONDARYSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsSecondaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "RESOURCE_GROUP_NAME": { + "type": "string", + "value": "[variables('resourceGroupName')]" + }, + "SUBSCRIPTION_ID": { + "type": "string", + "value": "[variables('subscriptionId')]" + }, + "LOCATION": { + "type": "string", + "value": "[variables('location')]" + }, + "AZURE_STORAGE_ENCRYPTION_SCOPE": { + "type": "string", + "value": "[variables('primaryEncryptionScopeName')]" + } + } + } + \ No newline at end of file