diff --git a/.changes/2.1653.0.json b/.changes/2.1653.0.json new file mode 100644 index 0000000000..78ed9081e3 --- /dev/null +++ b/.changes/2.1653.0.json @@ -0,0 +1,17 @@ +[ + { + "type": "feature", + "category": "EC2", + "description": "Documentation updates for Elastic Compute Cloud (EC2)." + }, + { + "type": "feature", + "category": "FMS", + "description": "Increases Customer API's ManagedServiceData length" + }, + { + "type": "feature", + "category": "S3", + "description": "Added response overrides to Head Object requests." + } +] \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 25e9bd8ecd..42f5683ed7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,12 @@ # Changelog for AWS SDK for JavaScript - + +## 2.1653.0 +* feature: EC2: Documentation updates for Elastic Compute Cloud (EC2). +* feature: FMS: Increases Customer API's ManagedServiceData length +* feature: S3: Added response overrides to Head Object requests. + ## 2.1652.0 * feature: APIGateway: Add v2 smoke tests and smithy smokeTests trait for SDK testing. * feature: CognitoIdentity: Add v2 smoke tests and smithy smokeTests trait for SDK testing. diff --git a/README.md b/README.md index 215c8d98c8..2b48d09e51 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ require('aws-sdk/lib/maintenance_mode_message').suppress = true; To use the SDK in the browser, simply add the following script tag to your HTML pages: - + You can also build a custom browser SDK with your specified set of AWS services. This can allow you to reduce the SDK's size, specify different API versions of diff --git a/apis/ec2-2016-11-15.normal.json b/apis/ec2-2016-11-15.normal.json index 9bad28bf41..3a670550f1 100644 --- a/apis/ec2-2016-11-15.normal.json +++ b/apis/ec2-2016-11-15.normal.json @@ -12143,7 +12143,7 @@ }, "State": { "shape": "ByoipCidrState", - "documentation": "
The state of the address pool.
", + "documentation": "The state of the address range.
advertised
: The address range is being advertised to the internet by Amazon Web Services.
deprovisioned
: The address range is deprovisioned.
failed-deprovision
: The request to deprovision the address range was unsuccessful. Ensure that all EIPs from the range have been deallocated and try again.
failed-provision
: The request to provision the address range was unsuccessful.
pending-deprovision
: You’ve submitted a request to deprovision an address range and it's pending.
pending-provision
: You’ve submitted a request to provision an address range and it's pending.
provisioned
: The address range is provisioned and can be advertised. The range is not currently advertised.
provisioned-not-publicly-advertisable
: The address range is provisioned and cannot be advertised.
Indicates whether your client's IP address is preserved as the source. The value is true
or false
.
If true
, your client's IP address is used when you connect to a resource.
If false
, the elastic network interface IP address is used when you connect to a resource.
Default: true
Indicates whether the client IP address is preserved as the source. The following are the possible values.
true
- Use the client IP address as the source.
false
- Use the network interface IP address as the source.
Default: false
The Amazon Resource Name (ARN) of the Outpost.
" + "documentation": "The Amazon Resource Name (ARN) of the Outpost on which to create the volume.
If you intend to use a volume with an instance running on an outpost, then you must create the volume on the same outpost as the instance. You can't use a volume created in an Amazon Web Services Region with an instance on an Amazon Web Services outpost, or the other way around.
" }, "Size": { "shape": "Integer", @@ -27767,7 +27767,7 @@ }, "VolumeIds": { "shape": "VolumeIdStringList", - "documentation": "The volume IDs.
", + "documentation": "The volume IDs. If not specified, then all volumes are included in the response.
", "locationName": "VolumeId" }, "DryRun": { @@ -35871,6 +35871,7 @@ "HostTenancy": { "type": "string", "enum": [ + "default", "dedicated", "host" ] @@ -38952,7 +38953,7 @@ }, "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { "shape": "Integer", - "documentation": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.
You must specify VCpuCount
and MemoryMiB
. All other attributes are optional. Any unspecified optional attribute is set to its default.
When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.
To limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:
AllowedInstanceTypes
- The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.
ExcludedInstanceTypes
- The instance types to exclude from the list, even if they match your specified attributes.
If you specify InstanceRequirements
, you can't specify InstanceType
.
Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the launch instance wizard, or with the RunInstances API or AWS::EC2::Instance Amazon Web Services CloudFormation resource, you can't specify InstanceRequirements
.
For more information, see Attribute-based instance type selection for EC2 Fleet, Attribute-based instance type selection for Spot Fleet, and Spot placement score in the Amazon EC2 User Guide.
" @@ -45692,7 +45693,7 @@ }, "UserData": { "shape": "BlobAttributeValue", - "documentation": "Changes the instance's user data to the specified value. If you are using an Amazon Web Services SDK or command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text.
", + "documentation": "Changes the instance's user data to the specified value. User data must be base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might be performed for you. For more information, see Work with instance user data.
", "locationName": "userData" }, "Value": { @@ -55324,7 +55325,7 @@ }, "UserData": { "shape": "RunInstancesUserData", - "documentation": "The user data script to make available to the instance. For more information, see Run commands on your Amazon EC2 instance at launch in the Amazon EC2 User Guide. If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB.
" + "documentation": "The user data to make available to the instance. User data must be base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might be performed for you. For more information, see Work with instance user data.
" }, "AdditionalInfo": { "shape": "String", diff --git a/apis/fms-2018-01-01.min.json b/apis/fms-2018-01-01.min.json index 5e72a9bb38..1ddfcad8b1 100644 --- a/apis/fms-2018-01-01.min.json +++ b/apis/fms-2018-01-01.min.json @@ -13,7 +13,10 @@ "serviceId": "FMS", "signatureVersion": "v4", "targetPrefix": "AWSFMS_20180101", - "uid": "fms-2018-01-01" + "uid": "fms-2018-01-01", + "auth": [ + "aws.auth#sigv4" + ] }, "operations": { "AssociateAdminAccount": { diff --git a/apis/fms-2018-01-01.normal.json b/apis/fms-2018-01-01.normal.json index 12b17c79fb..0e556c3f6d 100644 --- a/apis/fms-2018-01-01.normal.json +++ b/apis/fms-2018-01-01.normal.json @@ -13,7 +13,10 @@ "serviceId": "FMS", "signatureVersion": "v4", "targetPrefix": "AWSFMS_20180101", - "uid": "fms-2018-01-01" + "uid": "fms-2018-01-01", + "auth": [ + "aws.auth#sigv4" + ] }, "operations": { "AssociateAdminAccount": { @@ -3077,7 +3080,7 @@ }, "ManagedServiceData": { "type": "string", - "max": 10000, + "max": 30000, "min": 1, "pattern": "^((?!\\\\[nr]).)+" }, diff --git a/apis/s3-2006-03-01.examples.json b/apis/s3-2006-03-01.examples.json index e2c4125fd6..e8572583fc 100644 --- a/apis/s3-2006-03-01.examples.json +++ b/apis/s3-2006-03-01.examples.json @@ -84,10 +84,13 @@ "CreateBucket": [ { "input": { - "Bucket": "examplebucket" + "Bucket": "examplebucket", + "CreateBucketConfiguration": { + "LocationConstraint": "eu-west-1" + } }, "output": { - "Location": "/examplebucket" + "Location": "http://examplebucket.Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have read access to the source object and write access to the destination bucket.
General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have s3:PutObject
permission to write the object copy to the destination bucket.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key can't be set to ReadOnly
on the copy destination bucket.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length
. You always need to read the entire response body to check if the copy succeeds. to keep the connection alive while we copy the data.
If the copy is successful, you receive a response with information about the copied object.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a standard Amazon S3 error.
If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK
response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK
status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have read access to the source object and write access to the destination bucket.
General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have s3:PutObject
permission to write the object copy to the destination bucket.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key can't be set to ReadOnly
on the copy destination bucket.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length
. You always need to read the entire response body to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied object.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a standard Amazon S3 error.
If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK
response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK
status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.
When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.
Directory buckets - MFA delete is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always specify the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versiong-enabled bucket, you must specify the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32
, x-amz-checksum-crc32c
, x-amz-checksum-sha1
, or x-amz-checksum-sha256
) is required for all Multi-Object Delete requests.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to DeleteObjects
:
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.
When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.
Directory buckets - MFA delete is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always specify the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32
, x-amz-checksum-crc32c
, x-amz-checksum-sha1
, or x-amz-checksum-sha256
) is required for all Multi-Object Delete requests.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to DeleteObjects
:
Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source
in your request. To specify a byte range, you add the request header x-amz-copy-source-range
in your request.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
All UploadPartCopy
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have READ
access to the source object and WRITE
access to the destination bucket.
General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy
operation.
If the source object is in a general purpose bucket, you must have the s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have the s3:PutObject
permission to write the object copy to the destination bucket.
For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object . By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key cannot be set to ReadOnly
on the copy destination.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Error Code: InvalidRequest
Description: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPartCopy
:
Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source
in your request. To specify a byte range, you add the request header x-amz-copy-source-range
in your request.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
All UploadPartCopy
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have READ
access to the source object and WRITE
access to the destination bucket.
General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy
operation.
If the source object is in a general purpose bucket, you must have the s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have the s3:PutObject
permission to write the object copy to the destination bucket.
For information about permissions required to use the multipart upload API, see Multipart upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key cannot be set to ReadOnly
on the copy destination.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Error Code: InvalidRequest
Description: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPartCopy
:
The established temporary security credentials for the created session..
", + "documentation": "The established temporary security credentials for the created session.
", "locationName": "Credentials" } } @@ -6667,6 +6671,42 @@ "location": "header", "locationName": "Range" }, + "ResponseCacheControl": { + "shape": "ResponseCacheControl", + "documentation": "Sets the Cache-Control
header of the response.
Sets the Content-Disposition
header of the response.
Sets the Content-Encoding
header of the response.
Sets the Content-Language
header of the response.
Sets the Content-Type
header of the response.
Sets the Expires
header of the response.
Version ID used to reference a specific version of the object.
For directory buckets in this API operation, only the null
value of the version ID is supported.
A suffix that is appended to a request that is for a directory on the website endpoint (for example,if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.
Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.
A suffix that is appended to a request that is for a directory on the website endpoint. (For example, if the suffix is index.html
and you make a request to samplebucket/images/
, the data that is returned will be for the object with the key name images/index.html
.) The suffix must not be empty and must not include a slash character.
Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.
Container for the Suffix
element.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png
will appear as test_file%283%29.png
.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png
will appear as test_file%283%29.png
.
Specifies how many newer noncurrent versions must exist before Amazon S3 can perform the associated action on a given version. If there are this many more recent noncurrent versions, Amazon S3 will take the associated action. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
" + "documentation": "Specifies how many noncurrent versions Amazon S3 will retain. You can specify up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any additional noncurrent versions beyond the specified number to retain. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
" } }, "documentation": "Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.
" @@ -8524,7 +8564,7 @@ }, "NewerNoncurrentVersions": { "shape": "VersionCount", - "documentation": "Specifies how many newer noncurrent versions must exist before Amazon S3 can perform the associated action on a given version. If there are this many more recent noncurrent versions, Amazon S3 will take the associated action. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
" + "documentation": "Specifies how many noncurrent versions Amazon S3 will retain in the same storage class before transitioning objects. You can specify up to 100 noncurrent versions to retain. Amazon S3 will transition any additional noncurrent versions beyond the specified number to retain. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
" } }, "documentation": "Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA
, ONEZONE_IA
, INTELLIGENT_TIERING
, GLACIER_IR
, GLACIER
, or DEEP_ARCHIVE
storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA
, ONEZONE_IA
, INTELLIGENT_TIERING
, GLACIER_IR
, GLACIER
, or DEEP_ARCHIVE
storage class at a specific period in the object's lifetime.
>1,l=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,d=a?0:s-1,y=a?1:-1,b=t<0||0===t&&1/t<0?1:0;for(t=Math.abs(t),isNaN(t)||t===1/0?(n=isNaN(t)?1:0,o=m):(o=Math.floor(Math.log(t)/Math.LN2),t*(u=Math.pow(2,-o))<1&&(o--,u*=2),t+=o+c>=1?l/u:l*Math.pow(2,1-c),t*u>=2&&(o++,u/=2),o+c>=m?(n=0,o=m):o+c>=1?(n=(t*u-1)*Math.pow(2,i),o+=c):(n=t*Math.pow(2,c-1)*Math.pow(2,i),o=0));i>=8;e[r+d]=255&n,d+=y,n/=256,i-=8);for(o=o<0;e[r+d]=255&o,d+=y,o/=256,p-=8);e[r+d-y]|=128*b}},{}],443:[function(e,t,r){var a={}.toString;t.exports=Array.isArray||function(e){return"[object Array]"==a.call(e)}},{}],444:[function(e,t,r){!function(e){"use strict";function t(e){return null!==e&&"[object Array]"===Object.prototype.toString.call(e)}function r(e){return null!==e&&"[object Object]"===Object.prototype.toString.call(e)}function a(e,i){if(e===i)return!0;if(Object.prototype.toString.call(e)!==Object.prototype.toString.call(i))return!1;if(!0===t(e)){if(e.length!==i.length)return!1;for(var s=0;s G((f-r)/g)&&i("overflow"),r+=(p-t)*g,t=p,u=0;u =0?(c=b.substr(0,S),l=b.substr(S+1)):(c=b,l=""),d=decodeURIComponent(c),y=decodeURIComponent(l),a(o,d)?i(o[d])?o[d].push(y):o[d]=[o[d],y]:o[d]=y}return o};var i=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)}},{}],448:[function(e,t,r){"use strict";function a(e,t){if(e.map)return e.map(t);for(var r=[],a=0;a >1,l=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,d=a?0:s-1,y=a?1:-1,b=t<0||0===t&&1/t<0?1:0;for(t=Math.abs(t),isNaN(t)||t===1/0?(n=isNaN(t)?1:0,o=m):(o=Math.floor(Math.log(t)/Math.LN2),t*(u=Math.pow(2,-o))<1&&(o--,u*=2),t+=o+c>=1?l/u:l*Math.pow(2,1-c),t*u>=2&&(o++,u/=2),o+c>=m?(n=0,o=m):o+c>=1?(n=(t*u-1)*Math.pow(2,i),o+=c):(n=t*Math.pow(2,c-1)*Math.pow(2,i),o=0));i>=8;e[r+d]=255&n,d+=y,n/=256,i-=8);for(o=o<0;e[r+d]=255&o,d+=y,o/=256,p-=8);e[r+d-y]|=128*b}},{}],443:[function(e,t,r){var a={}.toString;t.exports=Array.isArray||function(e){return"[object Array]"==a.call(e)}},{}],444:[function(e,t,r){!function(e){"use strict";function t(e){return null!==e&&"[object Array]"===Object.prototype.toString.call(e)}function r(e){return null!==e&&"[object Object]"===Object.prototype.toString.call(e)}function a(e,i){if(e===i)return!0;if(Object.prototype.toString.call(e)!==Object.prototype.toString.call(i))return!1;if(!0===t(e)){if(e.length!==i.length)return!1;for(var s=0;s G((f-r)/g)&&i("overflow"),r+=(p-t)*g,t=p,u=0;u =0?(c=b.substr(0,S),l=b.substr(S+1)):(c=b,l=""),d=decodeURIComponent(c),y=decodeURIComponent(l),a(o,d)?i(o[d])?o[d].push(y):o[d]=[o[d],y]:o[d]=y}return o};var i=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)}},{}],448:[function(e,t,r){"use strict";function a(e,t){if(e.map)return e.map(t);for(var r=[],a=0;a=55296&&t<=56319&&i65535&&(e-=65536,t+=w(e>>>10&1023|55296),e=56320|1023&e),t+=w(e)}).join("")}function p(e){return e-48<10?e-22:e-65<26?e-65:e-97<26?e-97:T}function m(e,t){return e+22+75*(e<26)-((0!=t)<<5)}function c(e,t,r){var a=0;for(e=r?G(e/R):e>>1,e+=G(e/t);e>L*k>>1;a+=T)e=G(e/L);return G(a+(L+1)*e/(e+A))}function l(e){var t,r,a,s,o,n,m,l,d,y,b=[],S=e.length,g=0,h=v,I=D;for(r=e.lastIndexOf(x),r<0&&(r=0),a=0;a=S&&i("invalid-input"),l=p(e.charCodeAt(s++)),(l>=T||l>G((f-g)/n))&&i("overflow"),g+=l*n,d=m<=I?C:m>=I+k?k:m-I,!(l=t&&bf&&i("overflow"),b==t){for(l=r,d=T;y=d<=o?C:d>=o+k?k:d-o,!(l=0&&delete e.httpRequest.headers["Content-Length"]}function i(e){var t=new d,r=e.service.api.operations[e.operation].input;if(r.payload){var a={},i=r.members[r.payload];a=e.params[r.payload],"structure"===i.type?(e.httpRequest.body=t.build(a||{},i),s(e)):void 0!==a&&(e.httpRequest.body=a,("binary"===i.type||i.isStreaming)&&s(e,!0))}else e.httpRequest.body=t.build(e.params,r),s(e)}function s(e,t){if(!e.httpRequest.headers["Content-Type"]){var r=t?"binary/octet-stream":"application/json";e.httpRequest.headers["Content-Type"]=r}}function o(e){c.buildRequest(e),b.indexOf(e.httpRequest.method)<0&&i(e)}function n(e){l.extractError(e)}function u(e){c.extractData(e);var t,r=e.request,a=r.service.api.operations[r.operation],i=r.service.api.operations[r.operation].output||{};a.hasEventOutput;if(i.payload){var s=i.members[i.payload],o=e.httpResponse.body;if(s.isEventStream)t=new y,e.data[i.payload]=m.createEventStream(2===p.HttpClient.streamsApiVersion?e.httpResponse.stream:o,t,s);else if("structure"===s.type||"list"===s.type){var t=new y;e.data[i.payload]=t.parse(o,s)}else"binary"===s.type||s.isStreaming?e.data[i.payload]=o:e.data[i.payload]=s.toType(o)}else{var n=e.data;l.extractData(e),e.data=m.merge(n,e.data)}}var p=e("../core"),m=e("../util"),c=e("./rest"),l=e("./json"),d=e("../json/builder"),y=e("../json/parser"),b=["GET","HEAD","DELETE"];t.exports={buildRequest:o,extractError:n,extractData:u,unsetContentLength:a}},{"../core":350,"../json/builder":374,"../json/parser":375,"../util":428,"./json":386,"./rest":388}],390:[function(e,t,r){function a(e){var t=e.service.api.operations[e.operation].input,r=new n.XML.Builder,a=e.params,i=t.payload;if(i){var s=t.members[i];if(void 0===(a=a[i]))return;if("structure"===s.type){var o=s.name;e.httpRequest.body=r.toXML(a,s,o,!0)}else e.httpRequest.body=a}else e.httpRequest.body=r.toXML(a,t,t.name||t.shape||u.string.upperFirst(e.operation)+"Request")}function i(e){p.buildRequest(e),["GET","HEAD"].indexOf(e.httpRequest.method)<0&&a(e)}function s(e){p.extractError(e);var t;try{t=(new n.XML.Parser).parse(e.httpResponse.body.toString())}catch(r){t={Code:e.httpResponse.statusCode,Message:e.httpResponse.statusMessage}}t.Errors&&(t=t.Errors),t.Error&&(t=t.Error),t.Code?e.error=u.error(new Error,{code:t.Code,message:t.Message}):e.error=u.error(new Error,{code:e.httpResponse.statusCode,message:null})}function o(e){p.extractData(e);var t,r=e.request,a=e.httpResponse.body,i=r.service.api.operations[r.operation],s=i.output,o=(i.hasEventOutput,s.payload);if(o){var m=s.members[o];m.isEventStream?(t=new n.XML.Parser,e.data[o]=u.createEventStream(2===n.HttpClient.streamsApiVersion?e.httpResponse.stream:e.httpResponse.body,t,m)):"structure"===m.type?(t=new n.XML.Parser,e.data[o]=t.parse(a.toString(),m)):"binary"===m.type||m.isStreaming?e.data[o]=a:e.data[o]=m.toType(a)}else if(a.length>0){t=new n.XML.Parser;var c=t.parse(a.toString(),s);u.update(e.data,c)}}var n=e("../core"),u=e("../util"),p=e("./rest");t.exports={buildRequest:i,extractError:s,extractData:o}},{"../core":350,"../util":428,"./rest":388}],391:[function(e,t,r){function a(){}function i(e){return e.isQueryName||"ec2"!==e.api.protocol?e.name:e.name[0].toUpperCase()+e.name.substr(1)}function s(e,t,r,a){p.each(r.members,function(r,s){var o=t[r];if(null!==o&&void 0!==o){var n=i(s);n=e?e+"."+n:n,u(n,o,s,a)}})}function o(e,t,r,a){var i=1;p.each(t,function(t,s){var o=r.flattened?".":".entry.",n=o+i+++".",p=n+(r.key.name||"key"),m=n+(r.value.name||"value");u(e+p,t,r.key,a),u(e+m,s,r.value,a)})}function n(e,t,r,a){var s=r.member||{};if(0===t.length)return void("ec2"!==r.api.protocol&&a.call(this,e,null));p.arrayEach(t,function(t,o){var n="."+(o+1);if("ec2"===r.api.protocol)n+="";else if(r.flattened){if(s.name){var p=e.split(".");p.pop(),p.push(i(s)),e=p.join(".")}}else n="."+(s.name?s.name:"member")+n;u(e+n,t,s,a)})}function u(e,t,r,a){null!==t&&void 0!==t&&("structure"===r.type?s(e,t,r,a):"list"===r.type?n(e,t,r,a):"map"===r.type?o(e,t,r,a):a(e,r.toWireFormat(t).toString()))}var p=e("../util");a.prototype.serialize=function(e,t,r){s("",e,t,r)},t.exports=a},{"../util":428}],392:[function(e,t,r){var a=e("../core"),i=null,s={signatureVersion:"v4",signingName:"rds-db",operations:{}},o={region:"string",hostname:"string",port:"number",username:"string"};a.RDS.Signer=a.util.inherit({constructor:function(e){this.options=e||{}},convertUrlToAuthToken:function(e){if(0===e.indexOf("https://"))return e.substring("https://".length)},getAuthToken:function(e,t){"function"==typeof e&&void 0===t&&(t=e,e={});var r=this,o="function"==typeof t;e=a.util.merge(this.options,e);var n=this.validateAuthTokenOptions(e);if(!0!==n){if(o)return t(n,null);throw n}var u={region:e.region,endpoint:new a.Endpoint(e.hostname+":"+e.port),paramValidation:!1,signatureVersion:"v4"};e.credentials&&(u.credentials=e.credentials),i=new a.Service(u),i.api=s;var p=i.makeRequest();if(this.modifyRequestForAuthToken(p,e),!o){var m=p.presign(900);return this.convertUrlToAuthToken(m)}p.presign(900,function(e,a){a&&(a=r.convertUrlToAuthToken(a)),t(e,a)})},modifyRequestForAuthToken:function(e,t){e.on("build",e.buildAsGet),e.httpRequest.body=a.util.queryParamsToString({Action:"connect",DBUser:t.username})},validateAuthTokenOptions:function(e){var t="";e=e||{};for(var r in o)Object.prototype.hasOwnProperty.call(o,r)&&typeof e[r]!==o[r]&&(t+="option '"+r+"' should have been type '"+o[r]+"', was '"+typeof e[r]+"'.\n");return!t.length||a.util.error(new Error,{code:"InvalidParameter",message:t})}})},{"../core":350}],393:[function(e,t,r){t.exports={now:function(){return"undefined"!=typeof performance&&"function"==typeof performance.now?performance.now():Date.now()}}},{}],394:[function(e,t,r){function a(e){return"string"==typeof e&&(e.startsWith("fips-")||e.endsWith("-fips"))}function i(e){return"string"==typeof e&&["aws-global","aws-us-gov-global"].includes(e)}function s(e){return["fips-aws-global","aws-fips","aws-global"].includes(e)?"us-east-1":["fips-aws-us-gov-global","aws-us-gov-global"].includes(e)?"us-gov-west-1":e.replace(/fips-(dkr-|prod-)?|-fips/,"")}t.exports={isFipsRegion:a,isGlobalRegion:i,getRealRegion:s}},{}],395:[function(e,t,r){function a(e){if(!e)return null;var t=e.split("-");return t.length<3?null:t.slice(0,t.length-2).join("-")+"-*"}function i(e){var t=e.config.region,r=a(t),i=e.api.endpointPrefix;return[[t,i],[r,i],[t,"*"],[r,"*"],["*",i],[t,"internal-*"],["*","*"]].map(function(e){return e[0]&&e[1]?e.join("/"):null})}function s(e,t){u.each(t,function(t,r){"globalEndpoint"!==t&&(void 0!==e.config[t]&&null!==e.config[t]||(e.config[t]=r))})}function o(e){for(var t=i(e),r=e.config.useFipsEndpoint,a=e.config.useDualstackEndpoint,o=0;o=a())throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+a().toString(16)+" bytes");return 0|e}function b(e){return+e!=e&&(e=0),s.alloc(+e)}function S(e,t){if(s.isBuffer(e))return e.length;if("undefined"!=typeof ArrayBuffer&&"function"==typeof ArrayBuffer.isView&&(ArrayBuffer.isView(e)||e instanceof ArrayBuffer))return e.byteLength;"string"!=typeof e&&(e=""+e);var r=e.length;if(0===r)return 0;for(var a=!1;;)switch(t){case"ascii":case"latin1":case"binary":return r;case"utf8":case"utf-8":case void 0:return K(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*r;case"hex":return r>>>1;case"base64":return H(e).length;default:if(a)return K(e).length;t=(""+t).toLowerCase(),a=!0}}function g(e,t,r){var a=!1;if((void 0===t||t<0)&&(t=0),t>this.length)return"";if((void 0===r||r>this.length)&&(r=this.length),r<=0)return"";if(r>>>=0,t>>>=0,r<=t)return"";for(e||(e="utf8");;)switch(e){case"hex":return E(this,t,r);case"utf8":case"utf-8":return v(this,t,r);case"ascii":return P(this,t,r);case"latin1":case"binary":return q(this,t,r);case"base64":return D(this,t,r);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return M(this,t,r);default:if(a)throw new TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),a=!0}}function h(e,t,r){var a=e[t];e[t]=e[r],e[r]=a}function I(e,t,r,a,i){if(0===e.length)return-1;if("string"==typeof r?(a=r,r=0):r>2147483647?r=2147483647:r<-2147483648&&(r=-2147483648),r=+r,isNaN(r)&&(r=i?0:e.length-1),r<0&&(r=e.length+r),r>=e.length){if(i)return-1;r=e.length-1}else if(r<0){if(!i)return-1;r=0}if("string"==typeof t&&(t=s.from(t,a)),s.isBuffer(t))return 0===t.length?-1:N(e,t,r,a,i);if("number"==typeof t)return t&=255,s.TYPED_ARRAY_SUPPORT&&"function"==typeof Uint8Array.prototype.indexOf?i?Uint8Array.prototype.indexOf.call(e,t,r):Uint8Array.prototype.lastIndexOf.call(e,t,r):N(e,[t],r,a,i);throw new TypeError("val must be string, number or Buffer")}function N(e,t,r,a,i){function s(e,t){return 1===o?e[t]:e.readUInt16BE(t*o)}var o=1,n=e.length,u=t.length;if(void 0!==a&&("ucs2"===(a=String(a).toLowerCase())||"ucs-2"===a||"utf16le"===a||"utf-16le"===a)){if(e.length<2||t.length<2)return-1;o=2,n/=2,u/=2,r/=2}var p;if(i){var m=-1;for(p=r;p>>8*(a?i:1-i)}function B(e,t,r,a){t<0&&(t=4294967295+t+1);for(var i=0,s=Math.min(e.length-r,4);i>>8*(a?i:3-i)&255}function U(e,t,r,a,i,s){if(r+a>e.length)throw new RangeError("Index out of range");if(r<0)throw new RangeError("Index out of range")}function _(e,t,r,a,i){return i||U(e,t,r,4,3.4028234663852886e38,-3.4028234663852886e38),X.write(e,t,r,a,23,4),r+4}function F(e,t,r,a,i){return i||U(e,t,r,8,1.7976931348623157e308,-1.7976931348623157e308),X.write(e,t,r,a,52,8),r+8}function O(e){if(e=V(e).replace(ee,""),e.length<2)return"";for(;e.length%4!=0;)e+="=";return e}function V(e){return e.trim?e.trim():e.replace(/^\s+|\s+$/g,"")}function z(e){return e<16?"0"+e.toString(16):e.toString(16)}function K(e,t){t=t||1/0;for(var r,a=e.length,i=null,s=[],o=0;o55295&&r<57344){if(!i){if(r>56319){(t-=3)>-1&&s.push(239,191,189);continue}if(o+1===a){(t-=3)>-1&&s.push(239,191,189);continue}i=r;continue}if(r<56320){(t-=3)>-1&&s.push(239,191,189),i=r;continue}r=65536+(i-55296<<10|r-56320)}else i&&(t-=3)>-1&&s.push(239,191,189);if(i=null,r<128){if((t-=1)<0)break;s.push(r)}else if(r<2048){if((t-=2)<0)break;s.push(r>>6|192,63&r|128)}else if(r<65536){if((t-=3)<0)break;s.push(r>>12|224,r>>6&63|128,63&r|128)}else{if(!(r<1114112))throw new Error("Invalid code point");if((t-=4)<0)break;s.push(r>>18|240,r>>12&63|128,r>>6&63|128,63&r|128)}}return s}function j(e){for(var t=[],r=0;r=55296&&t<=56319&&i65535&&(e-=65536,t+=w(e>>>10&1023|55296),e=56320|1023&e),t+=w(e)}).join("")}function p(e){return e-48<10?e-22:e-65<26?e-65:e-97<26?e-97:T}function m(e,t){return e+22+75*(e<26)-((0!=t)<<5)}function c(e,t,r){var a=0;for(e=r?G(e/R):e>>1,e+=G(e/t);e>L*k>>1;a+=T)e=G(e/L);return G(a+(L+1)*e/(e+A))}function l(e){var t,r,a,s,o,n,m,l,d,y,b=[],S=e.length,g=0,h=v,I=D;for(r=e.lastIndexOf(x),r<0&&(r=0),a=0;a=S&&i("invalid-input"),l=p(e.charCodeAt(s++)),(l>=T||l>G((f-g)/n))&&i("overflow"),g+=l*n,d=m<=I?C:m>=I+k?k:m-I,!(l=t&&bf&&i("overflow"),b==t){for(l=r,d=T;y=d<=o?C:d>=o+k?k:d-o,!(l