From 57e8a37099a463f9d4ecc5cdd04a7838bba4be59 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Mon, 8 Mar 2021 11:26:05 -0800 Subject: [PATCH] Release v1.37.26 (2021-03-08) (#3813) Release v1.37.26 (2021-03-08) === ### Service Client Updates * `service/autoscaling`: Updates service documentation * Documentation updates for autoscaling for capacity-optimized-prioritized SpotAllocationStrategy * `service/elasticmapreduce`: Updates service API and documentation * Amazon EMR customers can now specify how EC2 On-Demand Capacity Reservations are used in their EMR clusters with instance fleets using allocation strategy. * `service/kinesis-video-archived-media`: Updates service API and documentation * `service/lambda`: Updates service API and documentation * Documentation updates for Lambda. Constraint updates to AddLayerVersionPermission's Action and OrganizationId parameters, and AddPermission's Principal and SourceAccount parameters. * `service/s3`: Updates service documentation and examples * Amazon S3 Documentation updates * `service/s3control`: Updates service API and documentation * Documentation updates for Amazon S3 --- CHANGELOG.md | 16 + aws/version.go | 2 +- .../apis/autoscaling/2011-01-01/docs-2.json | 16 +- .../elasticmapreduce/2009-03-31/api-2.json | 21 +- .../elasticmapreduce/2009-03-31/docs-2.json | 28 +- .../2017-09-30/api-2.json | 26 +- .../2017-09-30/docs-2.json | 56 +- models/apis/lambda/2015-03-31/api-2.json | 5 +- models/apis/lambda/2015-03-31/docs-2.json | 4 +- models/apis/s3/2006-03-01/docs-2.json | 202 ++--- models/apis/s3/2006-03-01/examples-1.json | 244 +++--- models/apis/s3control/2018-08-20/api-2.json | 4 +- models/apis/s3control/2018-08-20/docs-2.json | 78 +- service/autoscaling/api.go | 48 +- service/emr/api.go | 113 ++- service/kinesisvideoarchivedmedia/api.go | 121 +-- service/kinesisvideoarchivedmedia/errors.go | 3 +- service/lambda/api.go | 12 +- service/s3/api.go | 818 +++++++++--------- service/s3/errors.go | 6 +- service/s3/examples_test.go | 166 ++-- service/s3/s3manager/upload_input.go | 22 +- service/s3control/api.go | 192 ++-- service/s3control/doc.go | 2 +- 24 files changed, 1178 insertions(+), 1027 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b4fb463f352..3742a9136a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +Release v1.37.26 (2021-03-08) +=== + +### Service Client Updates +* `service/autoscaling`: Updates service documentation + * Documentation updates for autoscaling for capacity-optimized-prioritized SpotAllocationStrategy +* `service/elasticmapreduce`: Updates service API and documentation + * Amazon EMR customers can now specify how EC2 On-Demand Capacity Reservations are used in their EMR clusters with instance fleets using allocation strategy. +* `service/kinesis-video-archived-media`: Updates service API and documentation +* `service/lambda`: Updates service API and documentation + * Documentation updates for Lambda. Constraint updates to AddLayerVersionPermission's Action and OrganizationId parameters, and AddPermission's Principal and SourceAccount parameters. +* `service/s3`: Updates service documentation and examples + * Amazon S3 Documentation updates +* `service/s3control`: Updates service API and documentation + * Documentation updates for Amazon S3 + Release v1.37.25 (2021-03-05) === diff --git a/aws/version.go b/aws/version.go index 73bd73ea143..e31f79a3ad9 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.37.25" +const SDKVersion = "1.37.26" diff --git a/models/apis/autoscaling/2011-01-01/docs-2.json b/models/apis/autoscaling/2011-01-01/docs-2.json index 971db9c3991..a2940f68634 100644 --- a/models/apis/autoscaling/2011-01-01/docs-2.json +++ b/models/apis/autoscaling/2011-01-01/docs-2.json @@ -809,7 +809,7 @@ "InstancesDistribution": { "base": "
Describes an instances distribution for an Auto Scaling group with a MixedInstancesPolicy.
The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities.
When you update SpotAllocationStrategy
, SpotInstancePools
, or SpotMaxPrice
, this update action does not deploy any changes across the running Amazon EC2 instances in the group. Your existing Spot Instances continue to run as long as the maximum price for those instances is higher than the current Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.
Specifies the instances distribution. If not provided, the value for each parameter in InstancesDistribution
uses a default value.
Specifies the instances distribution. If not provided, the value for each property in InstancesDistribution
uses a default value.
Describes a launch template and overrides.
You specify these parameters as part of a mixed instances policy.
When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.
", + "base": "Describes a launch template and overrides.
You specify these properties as part of a mixed instances policy.
When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.
", "refs": { "MixedInstancesPolicy$LaunchTemplate": "Specifies the launch template to use and optionally the instance types (overrides) that are used to provision EC2 instances to fulfill On-Demand and Spot capacities. Required when creating a mixed instances policy.
" } @@ -1143,11 +1143,11 @@ } }, "MixedInstancesPolicy": { - "base": "Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.
You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy
as the top-level parameter instead of a launch configuration or launch template.
Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.
You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy
as the top-level property instead of a launch configuration or launch template.
The mixed instances policy for the group.
", - "CreateAutoScalingGroupType$MixedInstancesPolicy": "An embedded object that specifies a mixed instances policy. The required parameters must be specified. If optional parameters are unspecified, their default values are used.
The policy includes parameters that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities, but also the parameters that specify the instance configuration information—the launch template and instance types. The policy can also include a weight for each instance type and different launch templates for individual instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.
", - "UpdateAutoScalingGroupType$MixedInstancesPolicy": "An embedded object that specifies a mixed instances policy. When you make changes to an existing policy, all optional parameters are left unchanged if not specified. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.
" + "CreateAutoScalingGroupType$MixedInstancesPolicy": "An embedded object that specifies a mixed instances policy. The required properties must be specified. If optional properties are unspecified, their default values are used.
The policy includes properties that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities, but also the properties that specify the instance configuration information—the launch template and instance types. The policy can also include a weight for each instance type and different launch templates for individual instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.
", + "UpdateAutoScalingGroupType$MixedInstancesPolicy": "An embedded object that specifies a mixed instances policy. When you make changes to an existing policy, all optional properties are left unchanged if not specified. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.
" } }, "MonitoringEnabled": { @@ -1209,7 +1209,7 @@ "Overrides": { "base": null, "refs": { - "LaunchTemplate$Overrides": "Any parameters that you specify override the same parameters in the launch template. If not provided, Amazon EC2 Auto Scaling uses the instance type specified in the launch template when it launches an instance.
" + "LaunchTemplate$Overrides": "Any properties that you specify override the same properties in the launch template. If not provided, Amazon EC2 Auto Scaling uses the instance type specified in the launch template when it launches an instance.
" } }, "PoliciesType": { @@ -1672,8 +1672,8 @@ "DescribeTagsType$NextToken": "The token for the next set of items to return. (You received this token from a previous call.)
", "FailedScheduledUpdateGroupActionRequest$ErrorMessage": "The error message accompanying the error code.
", "Filter$Name": "The name of the filter. The valid values are: auto-scaling-group
, key
, value
, and propagate-at-launch
.
Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized
, which is also the default value. This strategy uses the order of instance types in the overrides to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.
Indicates how to allocate instances across Spot Instance pools. If the allocation strategy is capacity-optimized
(recommended), the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity. If the allocation strategy is lowest-price
, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. Defaults to lowest-price
if not specified.
Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized
, which is also the default value. This strategy uses the order of instance types in the LaunchTemplateOverrides
to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.
Indicates how to allocate instances across Spot Instance pools.
If the allocation strategy is lowest-price
, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. Defaults to lowest-price
if not specified.
If the allocation strategy is capacity-optimized
(recommended), the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity. Alternatively, you can use capacity-optimized-prioritized
and set the order of instance types in the list of launch template overrides from highest to lowest priority (from first to last in the list). Amazon EC2 Auto Scaling honors the instance type priorities on a best-effort basis but optimizes for capacity first.
The token for the next set of items to return. (You received this token from a previous call.)
", "LaunchConfigurationsType$NextToken": "A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken
value when requesting the next set of items. This value is null when there are no more items to return.
A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken
value when requesting the next set of items. This value is null when there are no more items to return.
The number of EC2 instances in the cluster.
", "JobFlowInstancesDetail$InstanceCount": "The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and core and task node. If the value is greater than 1, one instance is the master node and all others are core and task nodes.
", "JobFlowInstancesDetail$NormalizedInstanceHours": "An approximation of the cost of the cluster, represented in m1.small/hours. This value is increased one time for every hour that an m1.small instance runs. Larger instances are weighted more heavily, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being increased incrementally four times. This result is only an approximation and does not reflect the actual billing rate.
", - "ModifyClusterInput$StepConcurrencyLevel": "The number of steps that can be executed concurrently. You can specify a maximum of 256 steps.
", + "ModifyClusterInput$StepConcurrencyLevel": "The number of steps that can be executed concurrently. You can specify a minimum of 1 step and a maximum of 256 steps.
", "ModifyClusterOutput$StepConcurrencyLevel": "The number of steps that can be executed concurrently.
", "RunJobFlowInput$EbsRootVolumeSize": "The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.
", "RunJobFlowInput$StepConcurrencyLevel": "Specifies the number of steps that can be executed concurrently. The default value is 1
. The maximum value is 256
.
A list of notebook executions.
" } }, + "OnDemandCapacityReservationOptions": { + "base": "Describes the strategy for using unused Capacity Reservations for fulfilling On-Demand capacity.
", + "refs": { + "OnDemandProvisioningSpecification$CapacityReservationOptions": "The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy.
" + } + }, + "OnDemandCapacityReservationPreference": { + "base": null, + "refs": { + "OnDemandCapacityReservationOptions$CapacityReservationPreference": "Indicates the instance's Capacity Reservation preferences. Possible preferences include:
open
- The instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).
none
- The instance avoids running in a Capacity Reservation even if one is available. The instance runs as an On-Demand Instance.
Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.
If you specify use-capacity-reservations-first
, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price
) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price
).
If you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy.
" + } + }, "OnDemandProvisioningAllocationStrategy": { "base": null, "refs": { - "OnDemandProvisioningSpecification$AllocationStrategy": "Specifies the strategy to use in launching On-Demand Instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first.
" + "OnDemandProvisioningSpecification$AllocationStrategy": "Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is lowest-price
(the default), which launches the lowest price first.
The identifier of the instance group to which this instance belongs.
", "InstanceFleetStateChangeReason$Message": "An explanatory message.
", "InstanceGroup$Name": "The name of the instance group.
", - "InstanceGroup$BidPrice": "The bid price for each EC2 Spot Instance type as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice
to set the amount equal to the On-Demand price, or specify an amount in USD.
The status change reason description.
", "InstanceStateChangeReason$Message": "The status change reason description.
", "MetricDimension$Key": "The dimension name.
", @@ -2049,10 +2067,10 @@ "InstanceFleet$Name": "A friendly name for the instance fleet.
", "InstanceFleetConfig$Name": "The friendly name of the instance fleet.
", "InstanceGroupConfig$Name": "Friendly name given to the instance group.
", - "InstanceGroupConfig$BidPrice": "The bid price for each EC2 Spot Instance as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice
to set the amount equal to the On-Demand price, or specify an amount in USD.
Unique identifier for the instance group.
", "InstanceGroupDetail$Name": "Friendly name for the instance group.
", - "InstanceGroupDetail$BidPrice": "The bid price for each EC2 Spot Instance as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice
to set the amount equal to the On-Demand price, or specify an amount in USD.
Unique ID of the instance group to modify.
", "InstanceTypeConfig$BidPrice": "The bid price for each EC2 Spot Instance type as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
Downloads an MP4 file (clip) containing the archived, on-demand media from the specified video stream over the specified time range.
Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.
As a prerequisite to using GetCLip API, you must obtain an endpoint using GetDataEndpoint
, specifying GET_CLIP for the
APIName
parameter.
An Amazon Kinesis video stream has the following requirements for providing data through MP4:
The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC
(for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC
(for AAC) or A_MS/ACM (for G.711).
Data retention must be greater than 0.
The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags.
The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format.
You can monitor the amount of outgoing data by monitoring the GetClip.OutgoingBytes
Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for outgoing AWS data apply.
Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents.
Both the StreamName
and the StreamARN
parameters are optional, but you must specify either the StreamName
or the StreamARN
when invoking this API operation.
An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH:
The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC
(for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC
(for AAC) or A_MS/ACM (for G.711).
Data retention must be greater than 0.
The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags.
The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format.
The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:
Get an endpoint using GetDataEndpoint, specifying GET_DASH_STREAMING_SESSION_URL
for the APIName
parameter.
Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL
. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL
returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).
Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.
The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.
Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples.
The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:
GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback.
GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp
\" and \"moov
\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.
The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.
GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof
\" and \"mdat
\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.
After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.
Data retrieved with this action is billable. See Pricing for details.
The following restrictions apply to MPEG-DASH sessions:
A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.
A Kinesis video stream can have a maximum of ten active MPEG-DASH streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia
connections on a Kinesis video stream does not count against this limit, and the number of active MPEG-DASH sessions does not count against the active GetMedia
connection limit.
The maximum limits for active HLS and MPEG-DASH streaming sessions are independent of each other.
You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes
Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.
For more information about HLS, see HTTP Live Streaming on the Apple Developer site.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.
Both the StreamName
and the StreamARN
parameters are optional, but you must specify either the StreamName
or the StreamARN
when invoking this API operation.
An Amazon Kinesis video stream has the following requirements for providing data through HLS:
The media must contain h.264 or h.265 encoded video and, optionally, AAC encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC
(for h.264) or V_MPEG/ISO/HEVC
(for h.265). Optionally, the codec ID of track 2 should be A_AAC
.
Data retention must be greater than 0.
The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags.
The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7).
Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.
The following procedure shows how to use HLS with Kinesis Video Streams:
Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL
for the APIName
parameter.
Retrieve the HLS URL using GetHLSStreamingSessionURL
. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL
returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).
Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.
The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.
Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.
The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:
GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist
action for each track, and additional metadata for the media player, including estimated bitrate and resolution.
GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment
action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment
actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode
is LIVE
or ON_DEMAND
. The HLS media playlist is typically static for sessions with a PlaybackType
of ON_DEMAND
. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType
of LIVE
. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.
GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp
\" and \"moov
\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.
The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.
GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof
\" and \"mdat
\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.
After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.
Data retrieved with this action is billable. See Pricing for details.
GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.
If the ContainerFormat
is MPEG_TS
, this API is used instead of GetMP4InitFragment
and GetMP4MediaFragment
to retrieve stream media.
Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.
The following restrictions apply to HLS sessions:
A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.
A Kinesis video stream can have a maximum of ten active HLS streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia
connections on a Kinesis video stream does not count against this limit, and the number of active HLS sessions does not count against the active GetMedia
connection limit.
The maximum limits for active HLS and MPEG-DASH streaming sessions are independent of each other.
You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes
Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.
For more information about HLS, see HTTP Live Streaming on the Apple Developer site.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.
You must first call the GetDataEndpoint
API to get an endpoint. Then send the GetMediaForFragmentList
requests to this endpoint using the --endpoint-url parameter.
The following limits apply when using the GetMediaForFragmentList
API:
A client can call GetMediaForFragmentList
up to five times per second per stream.
Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMediaForFragmentList
session.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents.
Both the StreamName
and the StreamARN
parameters are optional, but you must specify either the StreamName
or the StreamARN
when invoking this API operation.
An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH:
The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC
(for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC
(for AAC) or A_MS/ACM (for G.711).
Data retention must be greater than 0.
The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags.
The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format.
The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:
Get an endpoint using GetDataEndpoint, specifying GET_DASH_STREAMING_SESSION_URL
for the APIName
parameter.
Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL
. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL
returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).
Don't share or store this token where an unauthorized entity can access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you use with your AWS credentials.
The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.
Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples.
The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:
GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback.
GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp
\" and \"moov
\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.
The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.
GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof
\" and \"mdat
\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.
After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.
Data retrieved with this action is billable. See Pricing for details.
For restrictions that apply to MPEG-DASH sessions, see Kinesis Video Streams Limits.
You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes
Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.
For more information about HLS, see HTTP Live Streaming on the Apple Developer site.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.
Both the StreamName
and the StreamARN
parameters are optional, but you must specify either the StreamName
or the StreamARN
when invoking this API operation.
An Amazon Kinesis video stream has the following requirements for providing data through HLS:
The media must contain h.264 or h.265 encoded video and, optionally, AAC encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC
(for h.264) or V_MPEG/ISO/HEVC
(for h.265). Optionally, the codec ID of track 2 should be A_AAC
.
Data retention must be greater than 0.
The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags.
The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7).
Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.
The following procedure shows how to use HLS with Kinesis Video Streams:
Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL
for the APIName
parameter.
Retrieve the HLS URL using GetHLSStreamingSessionURL
. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL
returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).
Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.
The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.
Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.
The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:
GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist
action for each track, and additional metadata for the media player, including estimated bitrate and resolution.
GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment
action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment
actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode
is LIVE
or ON_DEMAND
. The HLS media playlist is typically static for sessions with a PlaybackType
of ON_DEMAND
. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType
of LIVE
. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.
GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp
\" and \"moov
\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.
The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.
GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof
\" and \"mdat
\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.
After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.
Data retrieved with this action is billable. See Pricing for details.
GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.
If the ContainerFormat
is MPEG_TS
, this API is used instead of GetMP4InitFragment
and GetMP4MediaFragment
to retrieve stream media.
Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.
A streaming session URL must not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.
You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes
Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.
For more information about HLS, see HTTP Live Streaming on the Apple Developer site.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.
You must first call the GetDataEndpoint
API to get an endpoint. Then send the GetMediaForFragmentList
requests to this endpoint using the --endpoint-url parameter.
For limits, see Kinesis Video Streams Limits.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
Returns a list of Fragment objects from the specified stream and timestamp range within the archived data.
Listing fragments is eventually consistent. This means that even if the producer receives an acknowledgment that a fragment is persisted, the result might not be returned immediately from a request to ListFragments
. However, results are typically available in less than one second.
You must first call the GetDataEndpoint
API to get an endpoint. Then send the ListFragments
requests to this endpoint using the --endpoint-url parameter.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
Kinesis Video Streams has throttled the request because you have exceeded the limit of allowed client calls. Try making the call later.
", + "base": "Kinesis Video Streams has throttled the request because you have exceeded a limit. Try making the call later. For information about limits, see Kinesis Video Streams Limits.
", "refs": { } }, @@ -27,7 +27,7 @@ } }, "ClipTimestampRange": { - "base": "The range of timestamps for which to return fragments.
The values in the ClipTimestampRange are inclusive
. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.
The range of timestamps for which to return fragments.
", "refs": { "ClipFragmentSelector$TimestampRange": "The range of timestamps to return.
" } @@ -69,10 +69,16 @@ "DASHFragmentSelector$FragmentSelectorType": "The source of the timestamps for the requested media.
When FragmentSelectorType
is set to PRODUCER_TIMESTAMP
and GetDASHStreamingSessionURLInput$PlaybackMode is ON_DEMAND
or LIVE_REPLAY
, the first fragment ingested with a producer timestamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In addition, the fragments with producer timestamps within the TimestampRange
ingested immediately following the first fragment (up to the GetDASHStreamingSessionURLInput$MaxManifestFragmentResults value) are included.
Fragments that have duplicate producer timestamps are deduplicated. This means that if producers are producing a stream of fragments with producer timestamps that are approximately equal to the true clock time, the MPEG-DASH manifest will contain all of the fragments within the requested timestamp range. If some fragments are ingested within the same time range and very different points in time, only the oldest ingested collection of fragments are returned.
When FragmentSelectorType
is set to PRODUCER_TIMESTAMP
and GetDASHStreamingSessionURLInput$PlaybackMode is LIVE
, the producer timestamps are used in the MP4 fragments and for deduplication. But the most recently ingested fragments based on server timestamps are included in the MPEG-DASH manifest. This means that even if fragments ingested in the past have producer timestamps with values now, they are not included in the HLS media playlist.
The default is SERVER_TIMESTAMP
.
The maximum number of fragments that are returned in the MPEG-DASH manifest.
When the PlaybackMode
is LIVE
, the most recent fragments are returned up to this value. When the PlaybackMode
is ON_DEMAND
, the oldest fragments are returned, up to this maximum number.
When there are a higher number of fragments available in a live MPEG-DASH manifest, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live MPEG-DASH manifest have a minimum of 3 fragments and a maximum of 10 fragments.
The default is 5 fragments if PlaybackMode
is LIVE
or LIVE_REPLAY
, and 1,000 if PlaybackMode
is ON_DEMAND
.
The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.
" + } + }, "DASHPlaybackMode": { "base": null, "refs": { - "GetDASHStreamingSessionURLInput$PlaybackMode": "Whether to retrieve live, live replay, or archived, on-demand data.
Features of the three types of sessions include the following:
LIVE
: For sessions of this type, the MPEG-DASH manifest is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new manifest on a one-second interval. When this type of session is played in a media player, the user interface typically displays a \"live\" notification, with no scrubber control for choosing the position in the playback window to display.
In LIVE
mode, the newest available fragments are included in an MPEG-DASH manifest, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the MPEG-DASH manifest if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the manifest, the older fragment is not added, and the gap is not filled.
LIVE_REPLAY
: For sessions of this type, the MPEG-DASH manifest is updated similarly to how it is updated for LIVE
mode except that it starts by including fragments from a given start time. Instead of fragments being added as they are ingested, fragments are added as the duration of the next fragment elapses. For example, if the fragments in the session are two seconds long, then a new fragment is added to the manifest every two seconds. This mode is useful to be able to start playback from when an event is detected and continue live streaming media that has not yet been ingested as of the time of the session creation. This mode is also useful to stream previously archived media without being limited by the 1,000 fragment limit in the ON_DEMAND
mode.
ON_DEMAND
: For sessions of this type, the MPEG-DASH manifest contains all the fragments for the session, up to the number that is specified in MaxMediaPlaylistFragmentResults
. The manifest must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.
In all playback modes, if FragmentSelectorType
is PRODUCER_TIMESTAMP
, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the MPEG-DASH manifest. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the MPEG-DASH manifest. This can lead to unexpected behavior in the media player.
The default is LIVE
.
Whether to retrieve live, live replay, or archived, on-demand data.
Features of the three types of sessions include the following:
LIVE
: For sessions of this type, the MPEG-DASH manifest is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new manifest on a one-second interval. When this type of session is played in a media player, the user interface typically displays a \"live\" notification, with no scrubber control for choosing the position in the playback window to display.
In LIVE
mode, the newest available fragments are included in an MPEG-DASH manifest, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the MPEG-DASH manifest if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the manifest, the older fragment is not added, and the gap is not filled.
LIVE_REPLAY
: For sessions of this type, the MPEG-DASH manifest is updated similarly to how it is updated for LIVE
mode except that it starts by including fragments from a given start time. Instead of fragments being added as they are ingested, fragments are added as the duration of the next fragment elapses. For example, if the fragments in the session are two seconds long, then a new fragment is added to the manifest every two seconds. This mode is useful to be able to start playback from when an event is detected and continue live streaming media that has not yet been ingested as of the time of the session creation. This mode is also useful to stream previously archived media without being limited by the 1,000 fragment limit in the ON_DEMAND
mode.
ON_DEMAND
: For sessions of this type, the MPEG-DASH manifest contains all the fragments for the session, up to the number that is specified in MaxManifestFragmentResults
. The manifest must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.
In all playback modes, if FragmentSelectorType
is PRODUCER_TIMESTAMP
, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the MPEG-DASH manifest. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the MPEG-DASH manifest. This can lead to unexpected behavior in the media player.
The default is LIVE
.
The start and end of the timestamp range for the requested media.
This value should not be present if PlaybackType
is LIVE
.
The values in the DASHimestampRange
are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.
The start and end of the timestamp range for the requested media.
This value should not be present if PlaybackType
is LIVE
.
The values in DASHimestampRange
are inclusive. Fragments that start exactly at or after the start time are included in the session. Fragments that start before the start time and continue past it are not included in the session.
The start and end of the timestamp range for the requested media.
This value should not be present if PlaybackType
is LIVE
.
The source of the timestamps for the requested media.
When FragmentSelectorType
is set to PRODUCER_TIMESTAMP
and GetHLSStreamingSessionURLInput$PlaybackMode is ON_DEMAND
or LIVE_REPLAY
, the first fragment ingested with a producer timestamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In addition, the fragments with producer timestamps within the TimestampRange
ingested immediately following the first fragment (up to the GetHLSStreamingSessionURLInput$MaxMediaPlaylistFragmentResults value) are included.
Fragments that have duplicate producer timestamps are deduplicated. This means that if producers are producing a stream of fragments with producer timestamps that are approximately equal to the true clock time, the HLS media playlists will contain all of the fragments within the requested timestamp range. If some fragments are ingested within the same time range and very different points in time, only the oldest ingested collection of fragments are returned.
When FragmentSelectorType
is set to PRODUCER_TIMESTAMP
and GetHLSStreamingSessionURLInput$PlaybackMode is LIVE
, the producer timestamps are used in the MP4 fragments and for deduplication. But the most recently ingested fragments based on server timestamps are included in the HLS media playlist. This means that even if fragments ingested in the past have producer timestamps with values now, they are not included in the HLS media playlist.
The default is SERVER_TIMESTAMP
.
The maximum number of fragments that are returned in the HLS media playlists.
When the PlaybackMode
is LIVE
, the most recent fragments are returned up to this value. When the PlaybackMode
is ON_DEMAND
, the oldest fragments are returned, up to this maximum number.
When there are a higher number of fragments available in a live HLS media playlist, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live HLS media playlist have a minimum of 3 fragments and a maximum of 10 fragments.
The default is 5 fragments if PlaybackMode
is LIVE
or LIVE_REPLAY
, and 1,000 if PlaybackMode
is ON_DEMAND
.
The maximum value of 5,000 fragments corresponds to more than 80 minutes of video on streams with 1-second fragments, and more than 13 hours of video on streams with 10-second fragments.
" + } + }, "HLSPlaybackMode": { "base": null, "refs": { - "GetHLSStreamingSessionURLInput$PlaybackMode": "Whether to retrieve live, live replay, or archived, on-demand data.
Features of the three types of sessions include the following:
LIVE
: For sessions of this type, the HLS media playlist is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new playlist on a one-second interval. When this type of session is played in a media player, the user interface typically displays a \"live\" notification, with no scrubber control for choosing the position in the playback window to display.
In LIVE
mode, the newest available fragments are included in an HLS media playlist, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the HLS media playlist if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the playlist, the older fragment is not added, and the gap is not filled.
LIVE_REPLAY
: For sessions of this type, the HLS media playlist is updated similarly to how it is updated for LIVE
mode except that it starts by including fragments from a given start time. Instead of fragments being added as they are ingested, fragments are added as the duration of the next fragment elapses. For example, if the fragments in the session are two seconds long, then a new fragment is added to the media playlist every two seconds. This mode is useful to be able to start playback from when an event is detected and continue live streaming media that has not yet been ingested as of the time of the session creation. This mode is also useful to stream previously archived media without being limited by the 1,000 fragment limit in the ON_DEMAND
mode.
ON_DEMAND
: For sessions of this type, the HLS media playlist contains all the fragments for the session, up to the number that is specified in MaxMediaPlaylistFragmentResults
. The playlist must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.
In all playback modes, if FragmentSelectorType
is PRODUCER_TIMESTAMP
, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.
The default is LIVE
.
Whether to retrieve live, live replay, or archived, on-demand data.
Features of the three types of sessions include the following:
LIVE
: For sessions of this type, the HLS media playlist is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new playlist on a one-second interval. When this type of session is played in a media player, the user interface typically displays a \"live\" notification, with no scrubber control for choosing the position in the playback window to display.
In LIVE
mode, the newest available fragments are included in an HLS media playlist, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the HLS media playlist if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the playlist, the older fragment is not added, and the gap is not filled.
LIVE_REPLAY
: For sessions of this type, the HLS media playlist is updated similarly to how it is updated for LIVE
mode except that it starts by including fragments from a given start time. Instead of fragments being added as they are ingested, fragments are added as the duration of the next fragment elapses. For example, if the fragments in the session are two seconds long, then a new fragment is added to the media playlist every two seconds. This mode is useful to be able to start playback from when an event is detected and continue live streaming media that has not yet been ingested as of the time of the session creation. This mode is also useful to stream previously archived media without being limited by the 1,000 fragment limit in the ON_DEMAND
mode.
ON_DEMAND
: For sessions of this type, the HLS media playlist contains all the fragments for the session, up to the number that is specified in MaxMediaPlaylistFragmentResults
. The playlist must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.
In all playback modes, if FragmentSelectorType
is PRODUCER_TIMESTAMP
, and if there are multiple fragments with the same start timestamp, the fragment that has the largest fragment number (that is, the newest fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.
The default is LIVE
.
The start and end of the timestamp range for the requested media.
This value should not be present if PlaybackType
is LIVE
.
The values in the HLSTimestampRange
are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.
The start and end of the timestamp range for the requested media.
This value should not be present if PlaybackType
is LIVE
.
The start and end of the timestamp range for the requested media.
This value should not be present if PlaybackType
is LIVE
.
The total number of fragments to return. If the total number of fragments available is more than the value specified in max-results
, then a ListFragmentsOutput$NextToken is provided in the output that you can use to resume pagination.
The maximum number of fragments that are returned in the MPEG-DASH manifest.
When the PlaybackMode
is LIVE
, the most recent fragments are returned up to this value. When the PlaybackMode
is ON_DEMAND
, the oldest fragments are returned, up to this maximum number.
When there are a higher number of fragments available in a live MPEG-DASH manifest, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live MPEG-DASH manifest have a minimum of 3 fragments and a maximum of 10 fragments.
The default is 5 fragments if PlaybackMode
is LIVE
or LIVE_REPLAY
, and 1,000 if PlaybackMode
is ON_DEMAND
.
The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.
", - "GetHLSStreamingSessionURLInput$MaxMediaPlaylistFragmentResults": "The maximum number of fragments that are returned in the HLS media playlists.
When the PlaybackMode
is LIVE
, the most recent fragments are returned up to this value. When the PlaybackMode
is ON_DEMAND
, the oldest fragments are returned, up to this maximum number.
When there are a higher number of fragments available in a live HLS media playlist, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live HLS media playlist have a minimum of 3 fragments and a maximum of 10 fragments.
The default is 5 fragments if PlaybackMode
is LIVE
or LIVE_REPLAY
, and 1,000 if PlaybackMode
is ON_DEMAND
.
The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.
", - "ListFragmentsInput$MaxResults": "The total number of fragments to return. If the total number of fragments available is more than the value specified in max-results
, then a ListFragmentsOutput$NextToken is provided in the output that you can use to resume pagination.
The starting timestamp in the range of timestamps for which to return fragments.
This value is inclusive. Fragments that start before the StartTimestamp
and continue past it are included in the session. If FragmentSelectorType
is SERVER_TIMESTAMP
, the StartTimestamp
must be later than the stream head.
The end of the timestamp range for the requested media.
This value must be within 3 hours of the specified StartTimestamp
, and it must be later than the StartTimestamp
value. If FragmentSelectorType
for the request is SERVER_TIMESTAMP
, this value must be in the past.
This value is inclusive. The EndTimestamp
is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp
value and continue past it are included in the session.
The start of the timestamp range for the requested media.
If the DASHTimestampRange
value is specified, the StartTimestamp
value is required.
This value is inclusive. Fragments that start before the StartTimestamp
and continue past it are included in the session. If FragmentSelectorType
is SERVER_TIMESTAMP
, the StartTimestamp
must be later than the stream head.
The end of the timestamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp
, and it must be later than the StartTimestamp
value.
If FragmentSelectorType
for the request is SERVER_TIMESTAMP
, this value must be in the past.
The EndTimestamp
value is required for ON_DEMAND
mode, but optional for LIVE_REPLAY
mode. If the EndTimestamp
is not set for LIVE_REPLAY
mode then the session will continue to include newly ingested fragments until the session expires.
This value is inclusive. The EndTimestamp
is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp
value and continue past it are included in the session.
The starting timestamp in the range of timestamps for which to return fragments.
Only fragments that start exactly at or after StartTimestamp
are included in the session. Fragments that start before StartTimestamp
and continue past it aren't included in the session. If FragmentSelectorType
is SERVER_TIMESTAMP
, the StartTimestamp
must be later than the stream head.
The end of the timestamp range for the requested media.
This value must be within 24 hours of the specified StartTimestamp
, and it must be later than the StartTimestamp
value. If FragmentSelectorType
for the request is SERVER_TIMESTAMP
, this value must be in the past.
This value is inclusive. The EndTimestamp
is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp
value and continue past it are included in the session.
The start of the timestamp range for the requested media.
If the DASHTimestampRange
value is specified, the StartTimestamp
value is required.
Only fragments that start exactly at or after StartTimestamp
are included in the session. Fragments that start before StartTimestamp
and continue past it aren't included in the session. If FragmentSelectorType
is SERVER_TIMESTAMP
, the StartTimestamp
must be later than the stream head.
The end of the timestamp range for the requested media. This value must be within 24 hours of the specified StartTimestamp
, and it must be later than the StartTimestamp
value.
If FragmentSelectorType
for the request is SERVER_TIMESTAMP
, this value must be in the past.
The EndTimestamp
value is required for ON_DEMAND
mode, but optional for LIVE_REPLAY
mode. If the EndTimestamp
is not set for LIVE_REPLAY
mode then the session will continue to include newly ingested fragments until the session expires.
This value is inclusive. The EndTimestamp
is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp
value and continue past it are included in the session.
The timestamp from the producer corresponding to the fragment.
", "Fragment$ServerTimestamp": "The timestamp from the AWS server corresponding to the fragment.
", - "HLSTimestampRange$StartTimestamp": "The start of the timestamp range for the requested media.
If the HLSTimestampRange
value is specified, the StartTimestamp
value is required.
This value is inclusive. Fragments that start before the StartTimestamp
and continue past it are included in the session. If FragmentSelectorType
is SERVER_TIMESTAMP
, the StartTimestamp
must be later than the stream head.
The end of the timestamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp
, and it must be later than the StartTimestamp
value.
If FragmentSelectorType
for the request is SERVER_TIMESTAMP
, this value must be in the past.
The EndTimestamp
value is required for ON_DEMAND
mode, but optional for LIVE_REPLAY
mode. If the EndTimestamp
is not set for LIVE_REPLAY
mode then the session will continue to include newly ingested fragments until the session expires.
This value is inclusive. The EndTimestamp
is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp
value and continue past it are included in the session.
The start of the timestamp range for the requested media.
If the HLSTimestampRange
value is specified, the StartTimestamp
value is required.
Only fragments that start exactly at or after StartTimestamp
are included in the session. Fragments that start before StartTimestamp
and continue past it aren't included in the session. If FragmentSelectorType
is SERVER_TIMESTAMP
, the StartTimestamp
must be later than the stream head.
The end of the timestamp range for the requested media. This value must be within 24 hours of the specified StartTimestamp
, and it must be later than the StartTimestamp
value.
If FragmentSelectorType
for the request is SERVER_TIMESTAMP
, this value must be in the past.
The EndTimestamp
value is required for ON_DEMAND
mode, but optional for LIVE_REPLAY
mode. If the EndTimestamp
is not set for LIVE_REPLAY
mode then the session will continue to include newly ingested fragments until the session expires.
This value is inclusive. The EndTimestamp
is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp
value and continue past it are included in the session.
The starting timestamp in the range of timestamps for which to return fragments.
", "TimestampRange$EndTimestamp": "The ending timestamp in the range of timestamps for which to return fragments.
" } diff --git a/models/apis/lambda/2015-03-31/api-2.json b/models/apis/lambda/2015-03-31/api-2.json index 3a67fa307b4..cd8a503623c 100644 --- a/models/apis/lambda/2015-03-31/api-2.json +++ b/models/apis/lambda/2015-03-31/api-2.json @@ -2330,6 +2330,7 @@ }, "LayerPermissionAllowedAction":{ "type":"string", + "max":22, "pattern":"lambda:GetLayerVersion" }, "LayerPermissionAllowedPrincipal":{ @@ -2806,6 +2807,7 @@ }, "OrganizationId":{ "type":"string", + "max":34, "pattern":"o-[a-z0-9]{10,32}" }, "PackageType":{ @@ -2844,7 +2846,7 @@ }, "Principal":{ "type":"string", - "pattern":".*" + "pattern":"[^\\s]+" }, "ProvisionedConcurrencyConfigList":{ "type":"list", @@ -3251,6 +3253,7 @@ }, "SourceOwner":{ "type":"string", + "max":12, "pattern":"\\d{12}" }, "State":{ diff --git a/models/apis/lambda/2015-03-31/docs-2.json b/models/apis/lambda/2015-03-31/docs-2.json index 98c63327ab5..cac7181680c 100644 --- a/models/apis/lambda/2015-03-31/docs-2.json +++ b/models/apis/lambda/2015-03-31/docs-2.json @@ -37,7 +37,7 @@ "ListCodeSigningConfigs": "Returns a list of code signing configurations. A request returns up to 10,000 configurations per call. You can use the MaxItems
parameter to return fewer configurations per call.
Lists event source mappings. Specify an EventSourceArn
to only show event source mappings for a single event source.
Retrieves a list of configurations for asynchronous invocation for a function.
To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.
", - "ListFunctions": "Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.
Set FunctionVersion
to ALL
to include all published versions of each function in addition to the unpublished version. To get more information about a function or version, use GetFunction.
Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.
Set FunctionVersion
to ALL
to include all published versions of each function in addition to the unpublished version.
The ListFunctions
action returns a subset of the FunctionConfiguration fields. To get the additional fields (State, StateReasonCode, StateReason, LastUpdateStatus, LastUpdateStatusReason, LastUpdateStatusReasonCode) for a function or version, use GetFunction.
List the functions that use the specified code signing configuration. You can use this method prior to deleting a code signing configuration, to verify that no functions are using it.
", "ListLayerVersions": "Lists the versions of an AWS Lambda layer. Versions that have been deleted aren't listed. Specify a runtime identifier to list only versions that indicate that they're compatible with that runtime.
", "ListLayers": "Lists AWS Lambda layers and shows information about the latest version of each. Specify a runtime identifier to list only layers that indicate that they're compatible with that runtime.
", @@ -1228,7 +1228,7 @@ "ListCodeSigningConfigsRequest$MaxItems": "Maximum number of items to return.
", "ListEventSourceMappingsRequest$MaxItems": "The maximum number of event source mappings to return.
", "ListFunctionsByCodeSigningConfigRequest$MaxItems": "Maximum number of items to return.
", - "ListFunctionsRequest$MaxItems": "The maximum number of functions to return.
", + "ListFunctionsRequest$MaxItems": "The maximum number of functions to return in the response. Note that ListFunctions
returns a maximum of 50 items in each response, even if you set the number higher.
The maximum number of versions to return.
" } }, diff --git a/models/apis/s3/2006-03-01/docs-2.json b/models/apis/s3/2006-03-01/docs-2.json index 0be3035e58d..ce03e3f4bf8 100644 --- a/models/apis/s3/2006-03-01/docs-2.json +++ b/models/apis/s3/2006-03-01/docs-2.json @@ -2,101 +2,101 @@ "version": "2.0", "service": "", "operations": { - "AbortMultipartUpload": "This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts operation and ensure that the parts list is empty.
For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
The following operations are related to AbortMultipartUpload
:
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This operation concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag
value, returned after that part was uploaded.
Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.
Note that if CompleteMultipartUpload
fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload.
For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
CompleteMultipartUpload
has the following special errors:
Error code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
400 Bad Request
Error code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
400 Bad Request
Error code: InvalidPartOrder
Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.
400 Bad Request
Error code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
404 Not Found
The following operations are related to CompleteMultipartUpload
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.
All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. This means that a 200 OK
response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.
If the copy is successful, you receive a response with information about the copied object.
If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.
The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
Metadata
When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.
To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive
header. When you grant permissions, you can use the s3:x-amz-metadata-directive
condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.
x-amz-copy-source-if
Headers
To only copy an object under certain conditions, such as whether the Etag
matches or whether the object was modified before or after a specified date, use the following request parameters:
x-amz-copy-source-if-match
x-amz-copy-source-if-none-match
x-amz-copy-source-if-unmodified-since
x-amz-copy-source-if-modified-since
If both the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
x-amz-copy-source-if-match
condition evaluates to true
x-amz-copy-source-if-unmodified-since
condition evaluates to false
If both the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response code:
x-amz-copy-source-if-none-match
condition evaluates to false
x-amz-copy-source-if-modified-since
condition evaluates to true
All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed.
Server-side encryption
When you perform a CopyObject operation, you can optionally use the appropriate encryption-related headers to encrypt the object using server-side encryption with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption.
If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.
Access Control List (ACL)-Specific Request Headers
When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
Storage Class Options
You can use the CopyObject
operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass
parameter. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.
Versioning
By default, x-amz-copy-source
identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.
If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.
The following operations are related to CopyObject
:
For more information, see Copying Objects.
", + "AbortMultipartUpload": "This action aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts action and ensure that the parts list is empty.
For information about permissions required to use the multipart upload, see Multipart Upload and Permissions.
The following operations are related to AbortMultipartUpload
:
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this action to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This action concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag
value, returned after that part was uploaded.
Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.
Note that if CompleteMultipartUpload
fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload.
For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions.
CompleteMultipartUpload
has the following special errors:
Error code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
400 Bad Request
Error code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
400 Bad Request
Error code: InvalidPartOrder
Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.
400 Bad Request
Error code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
404 Not Found
The following operations are related to CompleteMultipartUpload
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.
All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. This means that a 200 OK
response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.
If the copy is successful, you receive a response with information about the copied object.
If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.
The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
Metadata
When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.
To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive
header. When you grant permissions, you can use the s3:x-amz-metadata-directive
condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.
x-amz-copy-source-if
Headers
To only copy an object under certain conditions, such as whether the Etag
matches or whether the object was modified before or after a specified date, use the following request parameters:
x-amz-copy-source-if-match
x-amz-copy-source-if-none-match
x-amz-copy-source-if-unmodified-since
x-amz-copy-source-if-modified-since
If both the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
x-amz-copy-source-if-match
condition evaluates to true
x-amz-copy-source-if-unmodified-since
condition evaluates to false
If both the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response code:
x-amz-copy-source-if-none-match
condition evaluates to false
x-amz-copy-source-if-modified-since
condition evaluates to true
All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed.
Server-side encryption
When you perform a CopyObject operation, you can optionally use the appropriate encryption-related headers to encrypt the object using server-side encryption with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption.
If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service User Guide.
Access Control List (ACL)-Specific Request Headers
When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
Storage Class Options
You can use the CopyObject
action to change the storage class of an object that is already stored in Amazon S3 using the StorageClass
parameter. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.
Versioning
By default, x-amz-copy-source
identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.
If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.
The following operations are related to CopyObject
:
For more information, see Copying Objects.
", "CreateBucket": "Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a valid AWS Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.
Not every string is an acceptable bucket name. For information about bucket naming restrictions, see Working with Amazon S3 buckets.
If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.
By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe (Ireland) Region. For more information, see Accessing a bucket.
If you send your create bucket request to the s3.amazonaws.com
endpoint, the request goes to the us-east-1 Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets.
When creating a bucket using this operation, you can optionally specify the accounts or groups that should be granted specific permissions on the bucket. There are two ways to grant the appropriate permissions using the request headers.
Specify a canned ACL using the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly using the x-amz-grant-read
, x-amz-grant-write
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For more information, see Access control list (ACL) overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an AWS account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
For example, the following x-amz-grant-read
header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:
x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
The following operations are related to CreateBucket
:
This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.
For more information about multipart uploads, see Multipart Upload Overview.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
For information about the permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.
You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt
, kms:Decrypt
, kms:ReEncrypt*
, kms:GenerateDataKey*
, and kms:DescribeKey
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.
If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.
For more information, see Protecting Data Using Server-Side Encryption.
When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:
Specify a canned ACL with the x-amz-acl
request header. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.
Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms
, but don't provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.
All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.
For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:
Specify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:
x-amz-grant-read
x-amz-grant-write
x-amz-grant-read-acp
x-amz-grant-write-acp
x-amz-grant-full-control
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an AWS account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
For example, the following x-amz-grant-read
header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:
x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
The following operations are related to CreateMultipartUpload
:
This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.
For more information about multipart uploads, see Multipart Upload Overview.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
For information about the permissions required to use the multipart upload API, see Multipart Upload and Permissions.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.
You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt
, kms:Decrypt
, kms:ReEncrypt*
, kms:GenerateDataKey*
, and kms:DescribeKey
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.
If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.
For more information, see Protecting Data Using Server-Side Encryption.
When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:
Specify a canned ACL with the x-amz-acl
request header. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.
Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms
, but don't provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.
All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.
For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:
Specify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:
x-amz-grant-read
x-amz-grant-write
x-amz-grant-read-acp
x-amz-grant-write-acp
x-amz-grant-full-control
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an AWS account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
For example, the following x-amz-grant-read
header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:
x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
The following operations are related to CreateMultipartUpload
:
Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted.
Related Resources
", "DeleteBucketAnalyticsConfiguration": "Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to DeleteBucketAnalyticsConfiguration
:
Deletes the cors
configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:PutBucketCORS
action. The bucket owner has this permission by default and can grant this permission to others.
For information about cors
, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.
Related Resources:
", - "DeleteBucketEncryption": "This implementation of the DELETE operation removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.
To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
Related Resources
", + "DeleteBucketCors": "Deletes the cors
configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:PutBucketCORS
action. The bucket owner has this permission by default and can grant this permission to others.
For information about cors
, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service User Guide.
Related Resources:
", + "DeleteBucketEncryption": "This implementation of the DELETE action removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service User Guide.
To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
Related Resources
", "DeleteBucketIntelligentTieringConfiguration": "Deletes the S3 Intelligent-Tiering configuration from the specified bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to DeleteBucketIntelligentTieringConfiguration
include:
Deletes an inventory configuration (identified by the inventory ID) from the bucket.
To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.
Operations related to DeleteBucketInventoryConfiguration
include:
Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.
To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration
action. By default, the bucket owner has this permission and the bucket owner can grant this permission to others.
There is usually some time lag before lifecycle configuration deletion is fully propagated to all the Amazon S3 systems.
For more information about the object expiration, see Elements to Describe Lifecycle Actions.
Related actions include:
", "DeleteBucketMetricsConfiguration": "Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.
To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to DeleteBucketMetricsConfiguration
:
Removes OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
For information about Amazon S3 Object Ownership, see Using Object Ownership.
The following operations are related to DeleteBucketOwnershipControls
:
This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the DeleteBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and UserPolicies.
The following operations are related to DeleteBucketPolicy
Deletes the replication configuration from the bucket.
To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration
action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
It can take a while for the deletion of a replication configuration to fully propagate.
For information about replication configuration, see Replication in the Amazon S3 Developer Guide.
The following operations are related to DeleteBucketReplication
:
This implementation of the DELETE action uses the policy subresource to delete the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the DeleteBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and UserPolicies.
The following operations are related to DeleteBucketPolicy
Deletes the replication configuration from the bucket.
To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration
action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
It can take a while for the deletion of a replication configuration to fully propagate.
For information about replication configuration, see Replication in the Amazon S3 Developer Guide.
The following operations are related to DeleteBucketReplication
:
Deletes the tags from the bucket.
To use this operation, you must have permission to perform the s3:PutBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
The following operations are related to DeleteBucketTagging
:
This operation removes the website configuration for a bucket. Amazon S3 returns a 200 OK
response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK
response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404
response if the bucket specified in the request does not exist.
This DELETE operation requires the S3:DeleteBucketWebsite
permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite
permission.
For more information about hosting websites, see Hosting Websites on Amazon S3.
The following operations are related to DeleteBucketWebsite
:
Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.
To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, x-amz-delete-marker
, to true.
If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa
request header in the DELETE versionId
request. Requests that include x-amz-mfa
must use HTTPS.
For more information about MFA Delete, see Using MFA Delete. To see sample requests that use versioning, see Sample Request.
You can delete objects by explicitly calling the DELETE Object API or configure its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject
, s3:DeleteObjectVersion
, and s3:PutLifeCycleConfiguration
actions.
The following operation is related to DeleteObject
:
This action removes the website configuration for a bucket. Amazon S3 returns a 200 OK
response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK
response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404
response if the bucket specified in the request does not exist.
This DELETE action requires the S3:DeleteBucketWebsite
permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite
permission.
For more information about hosting websites, see Hosting Websites on Amazon S3.
The following operations are related to DeleteBucketWebsite
:
Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.
To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, x-amz-delete-marker
, to true.
If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa
request header in the DELETE versionId
request. Requests that include x-amz-mfa
must use HTTPS.
For more information about MFA Delete, see Using MFA Delete. To see sample requests that use versioning, see Sample Request.
You can delete objects by explicitly calling DELETE Object or configure its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject
, s3:DeleteObjectVersion
, and s3:PutLifeCycleConfiguration
actions.
The following action is related to DeleteObject
:
Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging.
To use this operation, you must have permission to perform the s3:DeleteObjectTagging
action.
To delete tags of a specific object version, add the versionId
query parameter in the request. You will need permission for the s3:DeleteObjectVersionTagging
action.
The following operations are related to DeleteBucketMetricsConfiguration
:
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion, the operation does not return any information about the delete in the response body.
When performing this operation on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete.
Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
The following operations are related to DeleteObjects
:
This action enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this action provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete action and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
The action supports two modes for the response: verbose and quiet. By default, the action uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete action encountered an error. For a successful deletion, the action does not return any information about the delete in the response body.
When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete.
Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
The following operations are related to DeleteObjects
:
Removes the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The following operations are related to DeletePublicAccessBlock
:
This implementation of the GET operation uses the accelerate
subresource to return the Transfer Acceleration state of a bucket, which is either Enabled
or Suspended
. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.
To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
You set the Transfer Acceleration state of an existing bucket to Enabled
or Suspended
by using the PutBucketAccelerateConfiguration operation.
A GET accelerate
request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.
For more information about transfer acceleration, see Transfer Acceleration in the Amazon Simple Storage Service Developer Guide.
Related Resources
", - "GetBucketAcl": "This implementation of the GET
operation uses the acl
subresource to return the access control list (ACL) of a bucket. To use GET
to return the ACL of the bucket, you must have READ_ACP
access to the bucket. If READ_ACP
permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.
Related Resources
", - "GetBucketAnalyticsConfiguration": "This implementation of the GET operation returns an analytics configuration (identified by the analytics configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon Simple Storage Service Developer Guide.
Related Resources
This implementation of the GET action uses the accelerate
subresource to return the Transfer Acceleration state of a bucket, which is either Enabled
or Suspended
. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.
To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
You set the Transfer Acceleration state of an existing bucket to Enabled
or Suspended
by using the PutBucketAccelerateConfiguration operation.
A GET accelerate
request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.
For more information about transfer acceleration, see Transfer Acceleration in the Amazon Simple Storage Service User Guide.
Related Resources
", + "GetBucketAcl": "This implementation of the GET
action uses the acl
subresource to return the access control list (ACL) of a bucket. To use GET
to return the ACL of the bucket, you must have READ_ACP
access to the bucket. If READ_ACP
permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.
Related Resources
", + "GetBucketAnalyticsConfiguration": "This implementation of the GET action returns an analytics configuration (identified by the analytics configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon Simple Storage Service User Guide.
Related Resources
Returns the cors configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:GetBucketCORS action. By default, the bucket owner has this permission and can grant it to others.
For more information about cors, see Enabling Cross-Origin Resource Sharing.
The following operations are related to GetBucketCors
:
Returns the default encryption configuration for an Amazon S3 bucket. If the bucket does not have a default encryption configuration, GetBucketEncryption returns ServerSideEncryptionConfigurationNotFoundError
.
For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.
To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The following operations are related to GetBucketEncryption
:
Gets the S3 Intelligent-Tiering configuration from the specified bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to GetBucketIntelligentTieringConfiguration
include:
Returns an inventory configuration (identified by the inventory configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.
The following operations are related to GetBucketInventoryConfiguration
:
For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter
element, you should see the updated version of this topic. This topic is provided for backward compatibility.
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
GetBucketLifecycle
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycle
:
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier API description, see GetBucketLifecycle.
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycleConfiguration
:
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier action, see GetBucketLifecycle.
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycleConfiguration
:
Returns the Region the bucket resides in. You set the bucket's Region using the LocationConstraint
request parameter in a CreateBucket
request. For more information, see CreateBucket.
To use this implementation of the operation, you must be the bucket owner.
The following operations are related to GetBucketLocation
:
Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.
The following operations are related to GetBucketLogging
:
Gets a metrics configuration (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.
To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to GetBucketMetricsConfiguration
:
No longer used, see GetBucketNotificationConfiguration.
", - "GetBucketNotificationConfiguration": "Returns the notification configuration of a bucket.
If notifications are not enabled on the bucket, the operation returns an empty NotificationConfiguration
element.
By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification
permission.
For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.
The following operation is related to GetBucketNotification
:
Returns the notification configuration of a bucket.
If notifications are not enabled on the bucket, the action returns an empty NotificationConfiguration
element.
By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification
permission.
For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.
The following action is related to GetBucketNotification
:
Retrieves OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
For information about Amazon S3 Object Ownership, see Using Object Ownership.
The following operations are related to GetBucketOwnershipControls
:
Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
The following operation is related to GetBucketPolicy
:
Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
The following action is related to GetBucketPolicy
:
Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
For more information about when Amazon S3 considers a bucket public, see The Meaning of \"Public\".
The following operations are related to GetBucketPolicyStatus
:
Returns the replication configuration of a bucket.
It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.
For information about replication configuration, see Replication in the Amazon Simple Storage Service Developer Guide.
This operation requires permissions for the s3:GetReplicationConfiguration
action. For more information about permissions, see Using Bucket Policies and User Policies.
If you include the Filter
element in a replication configuration, you must also include the DeleteMarkerReplication
and Priority
elements. The response also returns those elements.
For information about GetBucketReplication
errors, see List of replication-related error codes
The following operations are related to GetBucketReplication
:
Returns the replication configuration of a bucket.
It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.
For information about replication configuration, see Replication in the Amazon Simple Storage Service User Guide.
This action requires permissions for the s3:GetReplicationConfiguration
action. For more information about permissions, see Using Bucket Policies and User Policies.
If you include the Filter
element in a replication configuration, you must also include the DeleteMarkerReplication
and Priority
elements. The response also returns those elements.
For information about GetBucketReplication
errors, see List of replication-related error codes
The following operations are related to GetBucketReplication
:
Returns the request payment configuration of a bucket. To use this version of the operation, you must be the bucket owner. For more information, see Requester Pays Buckets.
The following operations are related to GetBucketRequestPayment
:
Returns the tag set associated with the bucket.
To use this operation, you must have permission to perform the s3:GetBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
GetBucketTagging
has the following special error:
Error code: NoSuchTagSetError
Description: There is no tag set associated with the bucket.
The following operations are related to GetBucketTagging
:
Returns the versioning state of a bucket.
To retrieve the versioning state of a bucket, you must be the bucket owner.
This implementation also returns the MFA Delete status of the versioning state. If the MFA Delete status is enabled
, the bucket owner must use an authentication device to change the versioning state of the bucket.
The following operations are related to GetBucketVersioning
:
Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.
This GET operation requires the S3:GetBucketWebsite
permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite
permission.
The following operations are related to DeleteBucketWebsite
:
Retrieves objects from Amazon S3. To use GET
, you must have READ
access to the object. If you grant READ
access to the anonymous user, you can return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg
, you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object in the GET
operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
, specify the resource as /photos/2006/February/sample.jpg
. For a path-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket
, specify the resource as /examplebucket/photos/2006/February/sample.jpg
. For more information about request types, see HTTP Host Header Bucket Specification.
To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.
If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectStateError
error. For information about restoring archived objects, see Restoring Archived Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging
action), the response also returns the x-amz-tagging-count
header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.
Permissions
You need the s3:GetObject
permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.
Versioning
By default, the GET operation returns the current version of an object. To return a different version, use the versionId
subresource.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
For more information about versioning, see PutBucketVersioning.
Overriding Response Header Values
There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.
You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type
, Content-Language
, Expires
, Cache-Control
, Content-Disposition
, and Content-Encoding
. To override these header values in the GET response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.
response-content-type
response-content-language
response-expires
response-cache-control
response-content-disposition
response-content-encoding
Additional Considerations about Request Headers
If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows: If-Match
condition evaluates to true
, and; If-Unmodified-Since
condition evaluates to false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows: If-None-Match
condition evaluates to false
, and; If-Modified-Since
condition evaluates to true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
The following operations are related to GetObject
:
Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.
This GET action requires the S3:GetBucketWebsite
permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite
permission.
The following operations are related to DeleteBucketWebsite
:
Retrieves objects from Amazon S3. To use GET
, you must have READ
access to the object. If you grant READ
access to the anonymous user, you can return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg
, you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object in the GET
operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
, specify the resource as /photos/2006/February/sample.jpg
. For a path-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket
, specify the resource as /examplebucket/photos/2006/February/sample.jpg
. For more information about request types, see HTTP Host Header Bucket Specification.
To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.
If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectStateError
error. For information about restoring archived objects, see Restoring Archived Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging
action), the response also returns the x-amz-tagging-count
header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.
Permissions
You need the s3:GetObject
permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.
Versioning
By default, the GET action returns the current version of an object. To return a different version, use the versionId
subresource.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
For more information about versioning, see PutBucketVersioning.
Overriding Response Header Values
There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.
You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type
, Content-Language
, Expires
, Cache-Control
, Content-Disposition
, and Content-Encoding
. To override these header values in the GET response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.
response-content-type
response-content-language
response-expires
response-cache-control
response-content-disposition
response-content-encoding
Additional Considerations about Request Headers
If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows: If-Match
condition evaluates to true
, and; If-Unmodified-Since
condition evaluates to false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows: If-None-Match
condition evaluates to false
, and; If-Modified-Since
condition evaluates to true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
The following operations are related to GetObject
:
Returns the access control list (ACL) of an object. To use this operation, you must have READ_ACP
access to the object.
This action is not supported by Amazon S3 on Outposts.
Versioning
By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.
The following operations are related to GetObjectAcl
:
Gets an object's current Legal Hold status. For more information, see Locking Objects.
This action is not supported by Amazon S3 on Outposts.
", "GetObjectLockConfiguration": "Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.
", "GetObjectRetention": "Retrieves an object's retention settings. For more information, see Locking Objects.
This action is not supported by Amazon S3 on Outposts.
", - "GetObjectTagging": "Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.
To use this operation, you must have permission to perform the s3:GetObjectTagging
action. By default, the GET operation returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging
action.
By default, the bucket owner has this permission and can grant this permission to others.
For information about the Amazon S3 object tagging feature, see Object Tagging.
The following operation is related to GetObjectTagging
:
Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Using BitTorrent with Amazon S3.
You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.
To use GET, you must have READ access to the object.
This action is not supported by Amazon S3 on Outposts.
The following operation is related to GetObjectTorrent
:
Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.
To use this operation, you must have permission to perform the s3:GetObjectTagging
action. By default, the GET action returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging
action.
By default, the bucket owner has this permission and can grant this permission to others.
For information about the Amazon S3 object tagging feature, see Object Tagging.
The following action is related to GetObjectTagging
:
Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Using BitTorrent with Amazon S3.
You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.
To use GET, you must have READ access to the object.
This action is not supported by Amazon S3 on Outposts.
The following action is related to GetObjectTorrent
:
Retrieves the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketPublicAccessBlock
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or an object, it checks the PublicAccessBlock
configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock
settings are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
The following operations are related to GetPublicAccessBlock
:
This operation is useful to determine if a bucket exists and you have permission to access it. The operation returns a 200 OK
if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request returns a generic 404 Not Found
or 403 Forbidden
code. A message body is not included, so you cannot determine the exception beyond these error codes.
To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic 404 Not Found
or 403 Forbidden
code. It is not possible to retrieve the exact exception beyond these error codes.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
The last modified property in this case is the creation date of the object.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
Consider the following when using request headers:
Consideration 1 – If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows:
If-Match
condition evaluates to true
, and;
If-Unmodified-Since
condition evaluates to false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows:
If-None-Match
condition evaluates to false
, and;
If-Modified-Since
condition evaluates to true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
Permissions
You need the s3:GetObject
permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 (\"access denied\") error.
The following operation is related to HeadObject
:
Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This operation supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there will be a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to ListBucketAnalyticsConfigurations
:
This action is useful to determine if a bucket exists and you have permission to access it. The action returns a 200 OK
if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request returns a generic 404 Not Found
or 403 Forbidden
code. A message body is not included, so you cannot determine the exception beyond these error codes.
To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The HEAD action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.
A HEAD
request has the same options as a GET
action on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic 404 Not Found
or 403 Forbidden
code. It is not possible to retrieve the exact exception beyond these error codes.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
The last modified property in this case is the creation date of the object.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
Consider the following when using request headers:
Consideration 1 – If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows:
If-Match
condition evaluates to true
, and;
If-Unmodified-Since
condition evaluates to false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows:
If-None-Match
condition evaluates to false
, and;
If-Modified-Since
condition evaluates to true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
Permissions
You need the s3:GetObject
permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 (\"access denied\") error.
The following action is related to HeadObject
:
Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there will be a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to ListBucketAnalyticsConfigurations
:
Lists the S3 Intelligent-Tiering configuration from the specified bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to ListBucketIntelligentTieringConfigurations
include:
Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
The following operations are related to ListBucketInventoryConfigurations
:
Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.
This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token
in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to ListBucketMetricsConfigurations
:
Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
The following operations are related to ListBucketInventoryConfigurations
:
Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token
in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to ListBucketMetricsConfigurations
:
Returns a list of all buckets owned by the authenticated sender of the request.
", - "ListMultipartUploads": "This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.
This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads
parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated
element with the value true. To list the additional multipart uploads, use the key-marker
and upload-id-marker
request parameters.
In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
The following operations are related to ListMultipartUploads
:
This action lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.
This action returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads
parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated
element with the value true. To list the additional multipart uploads, use the key-marker
and upload-id-marker
request parameters.
In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload and Permissions.
The following operations are related to ListMultipartUploads
:
Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.
A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.
To use this operation, you must have READ access to the bucket.
This action is not supported by Amazon S3 on Outposts.
The following operations are related to ListObjectVersions
:
Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.
This API has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects
.
The following operations are related to ListObjects
:
Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list.
To use this operation, you must have READ access to the bucket.
To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
This section describes the latest revision of the API. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.
To get a list of your buckets, see ListBuckets.
The following operations are related to ListObjectsV2
:
Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts
request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with the value of true, and a NextPartNumberMarker
element. In subsequent ListParts
requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker
field value from the previous response.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
The following operations are related to ListParts
:
Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.
To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The Transfer Acceleration state of a bucket can be set to one of the following two values:
Enabled – Enables accelerated data transfers to the bucket.
Suspended – Disables accelerated data transfers to the bucket.
The GetBucketAccelerateConfiguration operation returns the transfer acceleration state of a bucket.
After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.
The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").
For more information about transfer acceleration, see Transfer Acceleration.
The following operations are related to PutBucketAccelerateConfiguration
:
Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.
This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects
.
The following operations are related to ListObjects
:
Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list.
To use this operation, you must have READ access to the bucket.
To use this action in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
This section describes the latest revision of this action. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.
To get a list of your buckets, see ListBuckets.
The following operations are related to ListObjectsV2
:
Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts
request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with the value of true, and a NextPartNumberMarker
element. In subsequent ListParts
requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker
field value from the previous response.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload and Permissions.
The following operations are related to ListParts
:
Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.
To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The Transfer Acceleration state of a bucket can be set to one of the following two values:
Enabled – Enables accelerated data transfers to the bucket.
Suspended – Disables accelerated data transfers to the bucket.
The GetBucketAccelerateConfiguration action returns the transfer acceleration state of a bucket.
After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.
The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").
For more information about transfer acceleration, see Transfer Acceleration.
The following operations are related to PutBucketAccelerateConfiguration
:
Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP
permission.
You can use one of the following two ways to set a bucket's permissions:
Specify the ACL in the request body
Specify permissions using request headers
You cannot specify access permission using both the body and the request headers.
Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.
Access Permissions
You can set access permissions using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl
. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an AWS account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
For example, the following x-amz-grant-write
header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two AWS accounts identified by their email addresses.
x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
Grantee Values
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
Related Resources
", "PutBucketAnalyticsConfiguration": "Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to 1,000 analytics configurations per bucket.
You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat file. See the DataExport
request element. Reports are updated daily and are based on the object filters that you configure. When selecting data export, you specify a destination bucket and an optional destination prefix where the file is written. You can export the data to a destination bucket in a different account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT analytics configuration to. For more information, see Amazon S3 Analytics – Storage Class Analysis.
You must create a bucket policy on the destination bucket where the exported file is written to grant permissions to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
Special Errors
HTTP Error: HTTP 400 Bad Request
Code: InvalidArgument
Cause: Invalid argument.
HTTP Error: HTTP 400 Bad Request
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP Error: HTTP 403 Forbidden
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to set the configuration on the bucket.
Related Resources
Sets the cors
configuration for your bucket. If the configuration exists, Amazon S3 replaces it.
To use this operation, you must be allowed to perform the s3:PutBucketCORS
action. By default, the bucket owner has this permission and can grant it to others.
You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com
to access your Amazon S3 bucket at my.example.bucket.com
by using the browser's XMLHttpRequest
capability.
To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
subresource to the bucket. The cors
subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.
When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors
configuration on the bucket and uses the first CORSRule
rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:
The request's Origin
header must match AllowedOrigin
elements.
The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method
header in case of a pre-flight OPTIONS
request must be one of the AllowedMethod
elements.
Every header specified in the Access-Control-Request-Headers
request header of a pre-flight request must match an AllowedHeader
element.
For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.
Related Resources
", - "PutBucketEncryption": "This operation uses the encryption
subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.
Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon Simple Storage Service Developer Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.
This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).
To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
Related Resources
", + "PutBucketCors": "Sets the cors
configuration for your bucket. If the configuration exists, Amazon S3 replaces it.
To use this operation, you must be allowed to perform the s3:PutBucketCORS
action. By default, the bucket owner has this permission and can grant it to others.
You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com
to access your Amazon S3 bucket at my.example.bucket.com
by using the browser's XMLHttpRequest
capability.
To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
subresource to the bucket. The cors
subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.
When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors
configuration on the bucket and uses the first CORSRule
rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:
The request's Origin
header must match AllowedOrigin
elements.
The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method
header in case of a pre-flight OPTIONS
request must be one of the AllowedMethod
elements.
Every header specified in the Access-Control-Request-Headers
request header of a pre-flight request must match an AllowedHeader
element.
For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service User Guide.
Related Resources
", + "PutBucketEncryption": "This action uses the encryption
subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.
Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon Simple Storage Service User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service User Guide.
This action requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).
To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
Related Resources
", "PutBucketIntelligentTieringConfiguration": "Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to PutBucketIntelligentTieringConfiguration
include:
You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically move objects stored in the S3 Intelligent-Tiering storage class to the Archive Access or Deep Archive Access tier.
Special Errors
HTTP 400 Bad Request Error
Code: InvalidArgument
Cause: Invalid Argument
HTTP 400 Bad Request Error
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP 403 Forbidden Error
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration
bucket permission to set the configuration on the bucket.
This implementation of the PUT
operation adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.
When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service Developer Guide.
You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
Special Errors
HTTP 400 Bad Request Error
Code: InvalidArgument
Cause: Invalid Argument
HTTP 400 Bad Request Error
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP 403 Forbidden Error
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration
bucket permission to set the configuration on the bucket.
Related Resources
For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.
By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.
Related Resources
GetBucketLifecycle(Deprecated)
By default, a resource owner—in this case, a bucket owner, which is the AWS account that created the bucket—can perform any of the operations. A resource owner can also grant others permission to perform the operation. For more information, see the following topics in the Amazon Simple Storage Service Developer Guide:
This implementation of the PUT
action adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.
When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service User Guide.
You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
Special Errors
HTTP 400 Bad Request Error
Code: InvalidArgument
Cause: Invalid Argument
HTTP 400 Bad Request Error
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP 403 Forbidden Error
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration
bucket permission to set the configuration on the bucket.
Related Resources
For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service User Guide.
By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.
Related Resources
GetBucketLifecycle(Deprecated)
By default, a resource owner—in this case, a bucket owner, which is the AWS account that created the bucket—can perform any of the operations. A resource owner can also grant others permission to perform the operation. For more information, see the following topics in the Amazon Simple Storage Service User Guide:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Managing Access Permissions to Your Amazon S3 Resources.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.
Rules
You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. Each rule consists of the following:
Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, or a combination of both.
Status whether the rule is in effect.
One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.
For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.
Permissions
By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.
You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
The following are related to PutBucketLifecycleConfiguration
:
Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same AWS Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.
The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee
request element to grant access to other people. The Permissions
request element specifies the kind of access the grantee has to the logs.
Grantee Values
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request.
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:
<BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />
For more information about server access logging, see Server Access Logging.
For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.
The following operations are related to PutBucketLogging
:
Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.
To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to PutBucketMetricsConfiguration
:
GetBucketLifecycle
has the following special error:
Error code: TooManyConfigurations
Description: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP Status Code: HTTP 400 Bad Request
No longer used, see the PutBucketNotificationConfiguration operation.
", - "PutBucketNotificationConfiguration": "Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.
Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.
By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration
.
<NotificationConfiguration>
</NotificationConfiguration>
This operation replaces the existing notification configuration with the configuration you include in the request body.
After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.
You can disable notifications by adding the empty NotificationConfiguration element.
By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT operation will fail, and Amazon S3 will not add the configuration to your bucket.
Responses
If the configuration in the request body includes only one TopicConfiguration
specifying only the s3:ReducedRedundancyLostObject
event type, the response will also include the x-amz-sns-test-message-id
header containing the message ID of the test notification sent to the topic.
The following operation is related to PutBucketNotificationConfiguration
:
Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.
Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.
By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration
.
<NotificationConfiguration>
</NotificationConfiguration>
This action replaces the existing notification configuration with the configuration you include in the request body.
After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.
You can disable notifications by adding the empty NotificationConfiguration element.
By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.
Responses
If the configuration in the request body includes only one TopicConfiguration
specifying only the s3:ReducedRedundancyLostObject
event type, the response will also include the x-amz-sns-test-message-id
header containing the message ID of the test notification sent to the topic.
The following action is related to PutBucketNotificationConfiguration
:
Creates or modifies OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
For information about Amazon S3 Object Ownership, see Using Object Ownership.
The following operations are related to PutBucketOwnershipControls
:
Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the PutBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
The following operations are related to PutBucketPolicy
:
Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.
To perform this operation, the user or role performing the operation must have the iam:PassRole permission.
Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.
A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.
To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication
, Status
, and Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.
For information about enabling versioning on a bucket, see Using Versioning.
By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.
Handling Replication of Encrypted Objects
By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria
, SseKmsEncryptedObjects
, Status
, EncryptionConfiguration
, and ReplicaKmsKeyID
. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.
For information on PutBucketReplication
errors, see List of replication-related error codes
The following operations are related to PutBucketReplication
:
Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.
To perform this operation, the user or role performing the action must have the iam:PassRole permission.
Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.
A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.
To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication
, Status
, and Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.
For information about enabling versioning on a bucket, see Using Versioning.
By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.
Handling Replication of Encrypted Objects
By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria
, SseKmsEncryptedObjects
, Status
, EncryptionConfiguration
, and ReplicaKmsKeyID
. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.
For information on PutBucketReplication
errors, see List of replication-related error codes
The following operations are related to PutBucketReplication
:
Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.
The following operations are related to PutBucketRequestPayment
:
Sets the tags for a bucket.
Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.
Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.
To use this operation, you must have permissions to perform the s3:PutBucketTagging
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors:
Error code: InvalidTagError
Description: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For information about tag restrictions, see User-Defined Tag Restrictions and AWS-Generated Cost Allocation Tag Restrictions.
Error code: MalformedXMLError
Description: The XML provided does not match the schema.
Error code: OperationAbortedError
Description: A conflicting conditional operation is currently in progress against this resource. Please try again.
Error code: InternalError
Description: The service was unable to apply the provided tag to the bucket.
The following operations are related to PutBucketTagging
:
Sets the tags for a bucket.
Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.
Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.
To use this operation, you must have permissions to perform the s3:PutBucketTagging
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors:
Error code: InvalidTagError
Description: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For information about tag restrictions, see User-Defined Tag Restrictions and AWS-Generated Cost Allocation Tag Restrictions.
Error code: MalformedXMLError
Description: The XML provided does not match the schema.
Error code: OperationAbortedError
Description: A conflicting conditional action is currently in progress against this resource. Please try again.
Error code: InternalError
Description: The service was unable to apply the provided tag to the bucket.
The following operations are related to PutBucketTagging
:
Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.
You can set the versioning state with one of the following values:
Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.
If the bucket owner enables MFA Delete in the bucket versioning configuration, the bucket owner must include the x-amz-mfa request
header and the Status
and the MfaDelete
request elements in a request to set the versioning state of the bucket.
If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle policy will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.
Related Resources
", - "PutBucketWebsite": "Sets the configuration of the website that is specified in the website
subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT operation requires the S3:PutBucketWebsite
permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.
WebsiteConfiguration
RedirectAllRequestsTo
HostName
Protocol
If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.
WebsiteConfiguration
IndexDocument
Suffix
ErrorDocument
Key
RoutingRules
RoutingRule
Condition
HttpErrorCodeReturnedEquals
KeyPrefixEquals
Redirect
Protocol
HostName
ReplaceKeyPrefixWith
ReplaceKeyWith
HttpRedirectCode
Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon Simple Storage Service Developer Guide.
", - "PutObject": "Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.
Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.
To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.
The Content-MD5
header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon Simple Storage Service Developer Guide.
Server-side Encryption
You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption.
If you request server-side encryption using AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.
Access Control List (ACL)-Specific Request Headers
You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
Storage Class Options
By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.
Versioning
If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.
For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.
Related Resources
", - "PutObjectAcl": "Uses the acl
subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have WRITE_ACP
permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon Simple Storage Service Developer Guide.
This action is not supported by Amazon S3 on Outposts.
Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 Developer Guide.
Access Permissions
You can set access permissions using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-ac
l. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an AWS account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
For example, the following x-amz-grant-read
header grants list objects permission to the two AWS accounts identified by their email addresses.
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
Grantee Values
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
Versioning
The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId
subresource.
Related Resources
", + "PutBucketWebsite": "Sets the configuration of the website that is specified in the website
subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT action requires the S3:PutBucketWebsite
permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.
WebsiteConfiguration
RedirectAllRequestsTo
HostName
Protocol
If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.
WebsiteConfiguration
IndexDocument
Suffix
ErrorDocument
Key
RoutingRules
RoutingRule
Condition
HttpErrorCodeReturnedEquals
KeyPrefixEquals
Redirect
Protocol
HostName
ReplaceKeyPrefixWith
ReplaceKeyWith
HttpRedirectCode
Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon Simple Storage Service User Guide.
", + "PutObject": "Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.
Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.
To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.
The Content-MD5
header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon Simple Storage Service Developer Guide.
Server-side Encryption
You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption.
If you request server-side encryption using AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service User Guide.
Access Control List (ACL)-Specific Request Headers
You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
Storage Class Options
By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.
Versioning
If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.
For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.
Related Resources
", + "PutObjectAcl": "Uses the acl
subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have WRITE_ACP
permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon Simple Storage Service User Guide.
This action is not supported by Amazon S3 on Outposts.
Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 Developer Guide.
Access Permissions
You can set access permissions using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-ac
l. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an AWS account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
For example, the following x-amz-grant-read
header grants list objects permission to the two AWS accounts identified by their email addresses.
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
Grantee Values
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
Versioning
The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId
subresource.
Related Resources
", "PutObjectLegalHold": "Applies a Legal Hold configuration to the specified object.
This action is not supported by Amazon S3 on Outposts.
Related Resources
", "PutObjectLockConfiguration": "Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket.
DefaultRetention
requires either Days or Years. You can't specify both at the same time.
Related Resources
", "PutObjectRetention": "Places an Object Retention configuration on an object.
This action is not supported by Amazon S3 on Outposts.
Related Resources
", - "PutObjectTagging": "Sets the supplied tag-set to an object that already exists in a bucket.
A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.
For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
To use this operation, you must have permission to perform the s3:PutObjectTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You also need permission for the s3:PutObjectVersionTagging
action.
For information about the Amazon S3 object tagging feature, see Object Tagging.
Special Errors
Code: InvalidTagError
Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.
Code: MalformedXMLError
Cause: The XML provided does not match the schema.
Code: OperationAbortedError
Cause: A conflicting conditional operation is currently in progress against this resource. Please try again.
Code: InternalError
Cause: The service was unable to apply the provided tag to the object.
Related Resources
", + "PutObjectTagging": "Sets the supplied tag-set to an object that already exists in a bucket.
A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.
For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
To use this operation, you must have permission to perform the s3:PutObjectTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You also need permission for the s3:PutObjectVersionTagging
action.
For information about the Amazon S3 object tagging feature, see Object Tagging.
Special Errors
Code: InvalidTagError
Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.
Code: MalformedXMLError
Cause: The XML provided does not match the schema.
Code: OperationAbortedError
Cause: A conflicting conditional action is currently in progress against this resource. Please try again.
Code: InternalError
Cause: The service was unable to apply the provided tag to the object.
Related Resources
", "PutPublicAccessBlock": "Creates or modifies the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or an object, it checks the PublicAccessBlock
configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock
configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
Related Resources
Restores an archived copy of an object back into Amazon S3
This action is not supported by Amazon S3 on Outposts.
This action performs the following types of requests:
select
- Perform a select query on an archived object
restore an archive
- Restore an archived object
To use this operation, you must have permissions to perform the s3:RestoreObject
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
Querying Archives with Select Requests
You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.
When making a select request, do the following:
Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.
For more information about the S3
structure in the request body, see the following:
Managing Access with ACLs in the Amazon Simple Storage Service Developer Guide
Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide
Define the SQL expression for the SELECT
type of restoration for your query in the request body's SelectParameters
structure. You can use expressions like the following examples.
The following expression returns all records from the specified object.
SELECT * FROM Object
Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.
SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
If you have headers and you set the fileHeaderInfo
in the CSV
structure in the request body to USE
, you can specify headers in the query. (If you set the fileHeaderInfo
field to IGNORE
, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.
SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.
When making a select request, you can also do the following:
To expedite your queries, specify the Expedited
tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.
The following are additional important facts about the select feature:
The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.
You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.
Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409
.
Restoring objects
Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.
To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.
When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier
element of the request body:
Expedited
- Expedited retrievals allow you to quickly access your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
Standard
- Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.
Bulk
- Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling you to retrieve large amounts, even petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects stored in S3 Intelligent-Tiering.
For more information about archive retrieval options and provisioned capacity for Expedited
data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service Developer Guide.
To get the status of object restoration, you can send a HEAD
request. Operations return the x-amz-restore
header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.
After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.
If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.
Responses
A successful operation returns either the 200 OK
or 202 Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202 Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the response.
Special Errors
Code: RestoreAlreadyInProgress
Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: GlacierExpeditedRetrievalNotAvailable
Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)
HTTP Status Code: 503
SOAP Fault Code Prefix: N/A
Related Resources
SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide
This operation filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.
This action is not supported by Amazon S3 on Outposts.
For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service Developer Guide.
For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.
Permissions
You must have s3:GetObject
permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service Developer Guide.
Object Data Formats
You can use Amazon S3 Select to query objects that have the following format properties:
CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.
Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.
For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon Simple Storage Service Developer Guide.
For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.
Working with the Response Body
Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding
header with chunked
as its value in the response. For more information, see Appendix: SelectObjectContent Response .
GetObject Support
The SelectObjectContent
operation does not support the following GetObject
functionality. For more information, see GetObject.
Range
: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.
GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY
storage classes. For more information, about storage classes see Storage Classes in the Amazon Simple Storage Service Developer Guide.
Special Errors
For a list of special errors for this operation, see List of SELECT Object Content Error Codes
Related Resources
", - "UploadPart": "Uploads a part in a multipart upload.
In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.
You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.
To ensure that data is not corrupted when traversing the network, specify the Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.
If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256
header as a checksum instead of Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (AWS Signature Version 4).
Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
For more information on multipart uploads, go to Multipart Upload Overview in the Amazon Simple Storage Service Developer Guide .
For information on the permissions required to use the multipart upload API, go to Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.
You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.
Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.
If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Related Resources
", - "UploadPartCopy": "Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source
in your request and a byte range by adding the request header x-amz-copy-source-range
in your request.
The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon Simple Storage Service Developer Guide.
Instead of using an existing object as part data, you might use the UploadPart operation and provide data in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.
For more information about using the UploadPartCopy
operation, see the following:
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon Simple Storage Service Developer Guide.
For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.
For information about copying objects using a single atomic operation vs. the multipart upload, see Operations on Objects in the Amazon Simple Storage Service Developer Guide.
For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.
Note the following additional considerations about the request headers x-amz-copy-source-if-match
, x-amz-copy-source-if-none-match
, x-amz-copy-source-if-unmodified-since
, and x-amz-copy-source-if-modified-since
:
Consideration 1 - If both of the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request as follows:
x-amz-copy-source-if-match
condition evaluates to true
, and;
x-amz-copy-source-if-unmodified-since
condition evaluates to false
;
Amazon S3 returns 200 OK
and copies the data.
Consideration 2 - If both of the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request as follows:
x-amz-copy-source-if-none-match
condition evaluates to false
, and;
x-amz-copy-source-if-modified-since
condition evaluates to true
;
Amazon S3 returns 412 Precondition Failed
response code.
Versioning
If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source
identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source
, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source
and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source
.
You can optionally specify a specific version of the source object to copy by adding the versionId
subresource as shown in the following example:
x-amz-copy-source: /bucket/object?versionId=version id
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Code: InvalidRequest
Cause: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Related Resources
Restores an archived copy of an object back into Amazon S3
This action is not supported by Amazon S3 on Outposts.
This action performs the following types of requests:
select
- Perform a select query on an archived object
restore an archive
- Restore an archived object
To use this operation, you must have permissions to perform the s3:RestoreObject
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
Querying Archives with Select Requests
You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service User Guide.
When making a select request, do the following:
Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon Simple Storage Service User Guide.
For more information about the S3
structure in the request body, see the following:
Managing Access with ACLs in the Amazon Simple Storage Service User Guide
Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service User Guide
Define the SQL expression for the SELECT
type of restoration for your query in the request body's SelectParameters
structure. You can use expressions like the following examples.
The following expression returns all records from the specified object.
SELECT * FROM Object
Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.
SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
If you have headers and you set the fileHeaderInfo
in the CSV
structure in the request body to USE
, you can specify headers in the query. (If you set the fileHeaderInfo
field to IGNORE
, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.
SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service User Guide.
When making a select request, you can also do the following:
To expedite your queries, specify the Expedited
tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.
The following are additional important facts about the select feature:
The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.
You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.
Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409
.
Restoring objects
Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.
To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.
When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier
element of the request body:
Expedited
- Expedited retrievals allow you to quickly access your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
Standard
- Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.
Bulk
- Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling you to retrieve large amounts, even petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects stored in S3 Intelligent-Tiering.
For more information about archive retrieval options and provisioned capacity for Expedited
data access, see Restoring Archived Objects in the Amazon Simple Storage Service User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service User Guide.
To get the status of object restoration, you can send a HEAD
request. Operations return the x-amz-restore
header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service User Guide.
After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.
If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service User Guide.
Responses
A successful action returns either the 200 OK
or 202 Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202 Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the response.
Special Errors
Code: RestoreAlreadyInProgress
Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: GlacierExpeditedRetrievalNotAvailable
Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)
HTTP Status Code: 503
SOAP Fault Code Prefix: N/A
Related Resources
SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service User Guide
This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.
This action is not supported by Amazon S3 on Outposts.
For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service User Guide.
For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service User Guide.
Permissions
You must have s3:GetObject
permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service User Guide.
Object Data Formats
You can use Amazon S3 Select to query objects that have the following format properties:
CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.
Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.
For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon Simple Storage Service User Guide.
For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service User Guide.
Working with the Response Body
Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding
header with chunked
as its value in the response. For more information, see Appendix: SelectObjectContent Response .
GetObject Support
The SelectObjectContent
action does not support the following GetObject
functionality. For more information, see GetObject.
Range
: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.
GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY
storage classes. For more information, about storage classes see Storage Classes in the Amazon Simple Storage Service User Guide.
Special Errors
For a list of special errors for this operation, see List of SELECT Object Content Error Codes
Related Resources
", + "UploadPart": "Uploads a part in a multipart upload.
In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.
You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.
To ensure that data is not corrupted when traversing the network, specify the Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.
If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256
header as a checksum instead of Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (AWS Signature Version 4).
Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
For more information on multipart uploads, go to Multipart Upload Overview in the Amazon Simple Storage Service User Guide .
For information on the permissions required to use the multipart upload API, go to Multipart Upload and Permissions in the Amazon Simple Storage Service User Guide.
You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon Simple Storage Service User Guide.
Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.
If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Related Resources
", + "UploadPartCopy": "Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source
in your request and a byte range by adding the request header x-amz-copy-source-range
in your request.
The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon Simple Storage Service User Guide.
Instead of using an existing object as part data, you might use the UploadPart action and provide data in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.
For more information about using the UploadPartCopy
operation, see the following:
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon Simple Storage Service User Guide.
For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon Simple Storage Service User Guide.
For information about copying objects using a single atomic action vs. the multipart upload, see Operations on Objects in the Amazon Simple Storage Service User Guide.
For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.
Note the following additional considerations about the request headers x-amz-copy-source-if-match
, x-amz-copy-source-if-none-match
, x-amz-copy-source-if-unmodified-since
, and x-amz-copy-source-if-modified-since
:
Consideration 1 - If both of the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request as follows:
x-amz-copy-source-if-match
condition evaluates to true
, and;
x-amz-copy-source-if-unmodified-since
condition evaluates to false
;
Amazon S3 returns 200 OK
and copies the data.
Consideration 2 - If both of the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request as follows:
x-amz-copy-source-if-none-match
condition evaluates to false
, and;
x-amz-copy-source-if-modified-since
condition evaluates to true
;
Amazon S3 returns 412 Precondition Failed
response code.
Versioning
If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source
identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source
, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source
and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source
.
You can optionally specify a specific version of the source object to copy by adding the versionId
subresource as shown in the following example:
x-amz-copy-source: /bucket/object?versionId=version id
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Code: InvalidRequest
Cause: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Related Resources
Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", "CopyObjectOutput$BucketKeyEnabled": "Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", - "CopyObjectRequest$BucketKeyEnabled": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with a COPY operation doesn’t affect bucket-level settings for S3 Bucket Key.
", + "CopyObjectRequest$BucketKeyEnabled": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key.
", "CreateMultipartUploadOutput$BucketKeyEnabled": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", - "CreateMultipartUploadRequest$BucketKeyEnabled": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with an object operation doesn’t affect bucket-level settings for S3 Bucket Key.
", + "CreateMultipartUploadRequest$BucketKeyEnabled": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key.
", "GetObjectOutput$BucketKeyEnabled": "Indicates whether the object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", "HeadObjectOutput$BucketKeyEnabled": "Indicates whether the object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", "PutObjectOutput$BucketKeyEnabled": "Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", - "PutObjectRequest$BucketKeyEnabled": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with a PUT operation doesn’t affect bucket-level settings for S3 Bucket Key.
", - "ServerSideEncryptionRule$BucketKeyEnabled": "Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled
element to true
causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.
For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.
", + "PutObjectRequest$BucketKeyEnabled": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.
", + "ServerSideEncryptionRule$BucketKeyEnabled": "Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled
element to true
causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.
For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service User Guide.
", "UploadPartCopyOutput$BucketKeyEnabled": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", "UploadPartOutput$BucketKeyEnabled": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
" } @@ -440,15 +440,15 @@ "BucketName": { "base": null, "refs": { - "AbortMultipartUploadRequest$Bucket": "The bucket name to which the upload was taking place.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "AbortMultipartUploadRequest$Bucket": "The bucket name to which the upload was taking place.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "AnalyticsS3BucketDestination$Bucket": "The Amazon Resource Name (ARN) of the bucket to which data is exported.
", "Bucket$Name": "The name of the bucket.
", - "CompleteMultipartUploadOutput$Bucket": "The name of the bucket that contains the newly created object.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "CompleteMultipartUploadOutput$Bucket": "The name of the bucket that contains the newly created object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "CompleteMultipartUploadRequest$Bucket": "Name of the bucket to which the multipart upload was initiated.
", - "CopyObjectRequest$Bucket": "The name of the destination bucket.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "CopyObjectRequest$Bucket": "The name of the destination bucket.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "CreateBucketRequest$Bucket": "The name of the bucket to create.
", - "CreateMultipartUploadOutput$Bucket": "The name of the bucket to which the multipart upload was initiated.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", - "CreateMultipartUploadRequest$Bucket": "The name of the bucket to which to initiate the upload
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "CreateMultipartUploadOutput$Bucket": "The name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "CreateMultipartUploadRequest$Bucket": "The name of the bucket to which to initiate the upload
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "DeleteBucketAnalyticsConfigurationRequest$Bucket": "The name of the bucket from which an analytics configuration is deleted.
", "DeleteBucketCorsRequest$Bucket": "Specifies the bucket whose cors
configuration is being deleted.
The name of the bucket containing the server-side encryption configuration to delete.
", @@ -462,9 +462,9 @@ "DeleteBucketRequest$Bucket": "Specifies the bucket being deleted.
", "DeleteBucketTaggingRequest$Bucket": "The bucket that has the tag set to be removed.
", "DeleteBucketWebsiteRequest$Bucket": "The bucket name for which you want to remove the website configuration.
", - "DeleteObjectRequest$Bucket": "The bucket name of the bucket containing the object.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", - "DeleteObjectTaggingRequest$Bucket": "The bucket name containing the objects from which to remove the tags.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", - "DeleteObjectsRequest$Bucket": "The bucket name containing the objects to delete.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "DeleteObjectRequest$Bucket": "The bucket name of the bucket containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "DeleteObjectTaggingRequest$Bucket": "The bucket name containing the objects from which to remove the tags.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "DeleteObjectsRequest$Bucket": "The bucket name containing the objects to delete.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "DeletePublicAccessBlockRequest$Bucket": "The Amazon S3 bucket whose PublicAccessBlock
configuration you want to delete.
The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store the results.
", "GetBucketAccelerateConfigurationRequest$Bucket": "The name of the bucket for which the accelerate configuration is retrieved.
", @@ -488,31 +488,31 @@ "GetBucketTaggingRequest$Bucket": "The name of the bucket for which to get the tagging information.
", "GetBucketVersioningRequest$Bucket": "The name of the bucket for which to get the versioning information.
", "GetBucketWebsiteRequest$Bucket": "The bucket name for which to get the website configuration.
", - "GetObjectAclRequest$Bucket": "The bucket name that contains the object for which to get the ACL information.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", - "GetObjectLegalHoldRequest$Bucket": "The bucket name containing the object whose Legal Hold status you want to retrieve.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", - "GetObjectLockConfigurationRequest$Bucket": "The bucket whose Object Lock configuration you want to retrieve.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", - "GetObjectRequest$Bucket": "The bucket name containing the object.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", - "GetObjectRetentionRequest$Bucket": "The bucket name containing the object whose retention settings you want to retrieve.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", - "GetObjectTaggingRequest$Bucket": "The bucket name containing the object for which to get the tagging information.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "GetObjectAclRequest$Bucket": "The bucket name that contains the object for which to get the ACL information.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "GetObjectLegalHoldRequest$Bucket": "The bucket name containing the object whose Legal Hold status you want to retrieve.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "GetObjectLockConfigurationRequest$Bucket": "The bucket whose Object Lock configuration you want to retrieve.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "GetObjectRequest$Bucket": "The bucket name containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "GetObjectRetentionRequest$Bucket": "The bucket name containing the object whose retention settings you want to retrieve.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "GetObjectTaggingRequest$Bucket": "The bucket name containing the object for which to get the tagging information.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "GetObjectTorrentRequest$Bucket": "The name of the bucket containing the object for which to get the torrent files.
", "GetPublicAccessBlockRequest$Bucket": "The name of the Amazon S3 bucket whose PublicAccessBlock
configuration you want to retrieve.
The bucket name.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", - "HeadObjectRequest$Bucket": "The name of the bucket containing the object.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "HeadBucketRequest$Bucket": "The bucket name.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "HeadObjectRequest$Bucket": "The name of the bucket containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "InventoryS3BucketDestination$Bucket": "The Amazon Resource Name (ARN) of the bucket where inventory results will be published.
", "ListBucketAnalyticsConfigurationsRequest$Bucket": "The name of the bucket from which analytics configurations are retrieved.
", "ListBucketIntelligentTieringConfigurationsRequest$Bucket": "The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.
", "ListBucketInventoryConfigurationsRequest$Bucket": "The name of the bucket containing the inventory configurations to retrieve.
", "ListBucketMetricsConfigurationsRequest$Bucket": "The name of the bucket containing the metrics configurations to retrieve.
", "ListMultipartUploadsOutput$Bucket": "The name of the bucket to which the multipart upload was initiated.
", - "ListMultipartUploadsRequest$Bucket": "The name of the bucket to which the multipart upload was initiated.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "ListMultipartUploadsRequest$Bucket": "The name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "ListObjectVersionsOutput$Name": "The bucket name.
", "ListObjectVersionsRequest$Bucket": "The bucket name that contains the objects.
", "ListObjectsOutput$Name": "The bucket name.
", - "ListObjectsRequest$Bucket": "The name of the bucket containing the objects.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", - "ListObjectsV2Output$Name": "The bucket name.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", - "ListObjectsV2Request$Bucket": "Bucket name to list.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "ListObjectsRequest$Bucket": "The name of the bucket containing the objects.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "ListObjectsV2Output$Name": "The bucket name.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "ListObjectsV2Request$Bucket": "Bucket name to list.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "ListPartsOutput$Bucket": "The name of the bucket to which the multipart upload was initiated.
", - "ListPartsRequest$Bucket": "The name of the bucket to which the parts are being uploaded.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "ListPartsRequest$Bucket": "The name of the bucket to which the parts are being uploaded.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "PutBucketAccelerateConfigurationRequest$Bucket": "The name of the bucket for which the accelerate configuration is set.
", "PutBucketAclRequest$Bucket": "The bucket to which to apply the ACL.
", "PutBucketAnalyticsConfigurationRequest$Bucket": "The name of the bucket to which an analytics configuration is stored.
", @@ -533,18 +533,18 @@ "PutBucketTaggingRequest$Bucket": "The bucket name.
", "PutBucketVersioningRequest$Bucket": "The bucket name.
", "PutBucketWebsiteRequest$Bucket": "The bucket name.
", - "PutObjectAclRequest$Bucket": "The bucket name that contains the object to which you want to attach the ACL.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", - "PutObjectLegalHoldRequest$Bucket": "The bucket name containing the object that you want to place a Legal Hold on.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "PutObjectAclRequest$Bucket": "The bucket name that contains the object to which you want to attach the ACL.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "PutObjectLegalHoldRequest$Bucket": "The bucket name containing the object that you want to place a Legal Hold on.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", "PutObjectLockConfigurationRequest$Bucket": "The bucket whose Object Lock configuration you want to create or replace.
", - "PutObjectRequest$Bucket": "The bucket name to which the PUT operation was initiated.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", - "PutObjectRetentionRequest$Bucket": "The bucket name that contains the object you want to apply this Object Retention configuration to.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", - "PutObjectTaggingRequest$Bucket": "The bucket name containing the object.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "PutObjectRequest$Bucket": "The bucket name to which the PUT action was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "PutObjectRetentionRequest$Bucket": "The bucket name that contains the object you want to apply this Object Retention configuration to.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "PutObjectTaggingRequest$Bucket": "The bucket name containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "PutPublicAccessBlockRequest$Bucket": "The name of the Amazon S3 bucket whose PublicAccessBlock
configuration you want to set.
The bucket name containing the object to restore.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "RestoreObjectRequest$Bucket": "The bucket name containing the object to restore.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "S3Location$BucketName": "The name of the bucket where the restore results will be placed.
", "SelectObjectContentRequest$Bucket": "The S3 bucket.
", - "UploadPartCopyRequest$Bucket": "The bucket name.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", - "UploadPartRequest$Bucket": "The name of the bucket to which the multipart upload was initiated.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
" + "UploadPartCopyRequest$Bucket": "The bucket name.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "UploadPartRequest$Bucket": "The name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
" } }, "BucketVersioningStatus": { @@ -565,7 +565,7 @@ "refs": { "DeleteObjectRequest$BypassGovernanceRetention": "Indicates whether S3 Object Lock should bypass Governance-mode restrictions to process this operation.
", "DeleteObjectsRequest$BypassGovernanceRetention": "Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. You must have sufficient permissions to perform this operation.
", - "PutObjectRetentionRequest$BypassGovernanceRetention": "Indicates whether this operation should bypass Governance-mode restrictions.
" + "PutObjectRetentionRequest$BypassGovernanceRetention": "Indicates whether this action should bypass Governance-mode restrictions.
" } }, "BytesProcessed": { @@ -592,7 +592,7 @@ "CORSConfiguration": { "base": "Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.
", "refs": { - "PutBucketCorsRequest$CORSConfiguration": "Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.
" + "PutBucketCorsRequest$CORSConfiguration": "Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service User Guide.
" } }, "CORSRule": { @@ -651,7 +651,7 @@ "Code": { "base": null, "refs": { - "Error$Code": "The error code is a string that uniquely identifies an error condition. It is meant to be read and understood by programs that detect and handle errors by type.
Amazon S3 error codes
Code: AccessDenied
Description: Access Denied
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AccountProblem
Description: There is a problem with your AWS account that prevents the operation from completing successfully. Contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AllAccessDisabled
Description: All access to this Amazon S3 resource has been disabled. Contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AmbiguousGrantByEmailAddress
Description: The email address you provided is associated with more than one account.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: AuthorizationHeaderMalformed
Description: The authorization header you provided is invalid.
HTTP Status Code: 400 Bad Request
HTTP Status Code: N/A
Code: BadDigest
Description: The Content-MD5 you specified did not match what we received.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: BucketAlreadyExists
Description: The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: BucketAlreadyOwnedByYou
Description: The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all AWS Regions except in the North Virginia Region. For legacy compatibility, if you re-create an existing bucket that you already own in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).
Code: 409 Conflict (in all Regions except the North Virginia Region)
SOAP Fault Code Prefix: Client
Code: BucketNotEmpty
Description: The bucket you tried to delete is not empty.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: CredentialsNotSupported
Description: This request does not support credentials.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: CrossLocationLoggingProhibited
Description: Cross-location logging not allowed. Buckets in one geographic location cannot log information to a bucket in another location.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: EntityTooLarge
Description: Your proposed upload exceeds the maximum allowed object size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: ExpiredToken
Description: The provided token has expired.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IllegalVersioningConfigurationException
Description: Indicates that the versioning configuration specified in the request is invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IncompleteBody
Description: You did not provide the number of bytes specified by the Content-Length HTTP header
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IncorrectNumberOfFilesInPostRequest
Description: POST requires exactly one file upload per request.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InlineDataTooLarge
Description: Inline data exceeds the maximum allowed size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InternalError
Description: We encountered an internal error. Please try again.
HTTP Status Code: 500 Internal Server Error
SOAP Fault Code Prefix: Server
Code: InvalidAccessKeyId
Description: The AWS access key ID you provided does not exist in our records.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidAddressingHeader
Description: You must specify the Anonymous role.
HTTP Status Code: N/A
SOAP Fault Code Prefix: Client
Code: InvalidArgument
Description: Invalid Argument
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidBucketName
Description: The specified bucket is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidBucketState
Description: The request is not valid with the current state of the bucket.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: InvalidDigest
Description: The Content-MD5 you specified is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidEncryptionAlgorithmError
Description: The encryption request you specified is not valid. The valid value is AES256.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidLocationConstraint
Description: The specified location constraint is not valid. For more information about Regions, see How to Select a Region for Your Buckets.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidObjectState
Description: The operation is not valid for the current state of the object.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidPartOrder
Description: The list of parts was not in ascending order. Parts list must be specified in order by part number.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidPayer
Description: All access to this object has been disabled. Please contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidPolicyDocument
Description: The content of the form does not meet the conditions specified in the policy document.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidRange
Description: The requested range cannot be satisfied.
HTTP Status Code: 416 Requested Range Not Satisfiable
SOAP Fault Code Prefix: Client
Code: InvalidRequest
Description: Please use AWS4-HMAC-SHA256.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: SOAP requests must be made over an HTTPS connection.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported for buckets with non-DNS compliant names.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported for buckets with periods (.) in their names.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style requests.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate is not configured on this bucket.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate is disabled on this bucket.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported on this bucket. Contact AWS Support for more information.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration cannot be enabled on this bucket. Contact AWS Support for more information.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidSecurity
Description: The provided security credentials are not valid.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidSOAPRequest
Description: The SOAP request body is invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidStorageClass
Description: The storage class you specified is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidTargetBucketForLogging
Description: The target bucket for logging does not exist, is not owned by you, or does not have the appropriate grants for the log-delivery group.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidToken
Description: The provided token is malformed or otherwise invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidURI
Description: Couldn't parse the specified URI.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: KeyTooLongError
Description: Your key is too long.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedACLError
Description: The XML you provided was not well-formed or did not validate against our published schema.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedPOSTRequest
Description: The body of your POST request is not well-formed multipart/form-data.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedXML
Description: This happens when the user sends malformed XML (XML that doesn't conform to the published XSD) for the configuration. The error message is, \"The XML you provided was not well-formed or did not validate against our published schema.\"
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MaxMessageLengthExceeded
Description: Your request was too big.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MaxPostPreDataLengthExceededError
Description: Your POST request fields preceding the upload file were too large.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MetadataTooLarge
Description: Your metadata headers exceed the maximum allowed metadata size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MethodNotAllowed
Description: The specified method is not allowed against this resource.
HTTP Status Code: 405 Method Not Allowed
SOAP Fault Code Prefix: Client
Code: MissingAttachment
Description: A SOAP attachment was expected, but none were found.
HTTP Status Code: N/A
SOAP Fault Code Prefix: Client
Code: MissingContentLength
Description: You must provide the Content-Length HTTP header.
HTTP Status Code: 411 Length Required
SOAP Fault Code Prefix: Client
Code: MissingRequestBodyError
Description: This happens when the user sends an empty XML document as a request. The error message is, \"Request body is empty.\"
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MissingSecurityElement
Description: The SOAP 1.1 request is missing a security element.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MissingSecurityHeader
Description: Your request is missing a required header.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: NoLoggingStatusForKey
Description: There is no such thing as a logging status subresource for a key.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: NoSuchBucket
Description: The specified bucket does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchBucketPolicy
Description: The specified bucket does not have a bucket policy.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchKey
Description: The specified key does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchVersion
Description: Indicates that the version ID specified in the request does not match an existing version.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NotImplemented
Description: A header you provided implies functionality that is not implemented.
HTTP Status Code: 501 Not Implemented
SOAP Fault Code Prefix: Server
Code: NotSignedUp
Description: Your account is not signed up for the Amazon S3 service. You must sign up before you can use Amazon S3. You can sign up at the following URL: https://aws.amazon.com/s3
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: OperationAborted
Description: A conflicting conditional operation is currently in progress against this resource. Try again.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: PermanentRedirect
Description: The bucket you are attempting to access must be addressed using the specified endpoint. Send all future requests to this endpoint.
HTTP Status Code: 301 Moved Permanently
SOAP Fault Code Prefix: Client
Code: PreconditionFailed
Description: At least one of the preconditions you specified did not hold.
HTTP Status Code: 412 Precondition Failed
SOAP Fault Code Prefix: Client
Code: Redirect
Description: Temporary redirect.
HTTP Status Code: 307 Moved Temporarily
SOAP Fault Code Prefix: Client
Code: RestoreAlreadyInProgress
Description: Object restore is already in progress.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: RequestIsNotMultiPartContent
Description: Bucket POST must be of the enclosure-type multipart/form-data.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: RequestTimeout
Description: Your socket connection to the server was not read from or written to within the timeout period.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: RequestTimeTooSkewed
Description: The difference between the request time and the server's time is too large.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: RequestTorrentOfBucketError
Description: Requesting the torrent file of a bucket is not permitted.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: SignatureDoesNotMatch
Description: The request signature we calculated does not match the signature you provided. Check your AWS secret access key and signing method. For more information, see REST Authentication and SOAP Authentication for details.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: ServiceUnavailable
Description: Reduce your request rate.
HTTP Status Code: 503 Service Unavailable
SOAP Fault Code Prefix: Server
Code: SlowDown
Description: Reduce your request rate.
HTTP Status Code: 503 Slow Down
SOAP Fault Code Prefix: Server
Code: TemporaryRedirect
Description: You are being redirected to the bucket while DNS updates.
HTTP Status Code: 307 Moved Temporarily
SOAP Fault Code Prefix: Client
Code: TokenRefreshRequired
Description: The provided token must be refreshed.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: TooManyBuckets
Description: You have attempted to create more buckets than allowed.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UnexpectedContent
Description: This request does not support content.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UnresolvableGrantByEmailAddress
Description: The email address you provided does not match any account on record.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UserKeyMustBeSpecified
Description: The bucket POST must contain the specified field name. If it is specified, check the order of the fields.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
The error code is a string that uniquely identifies an error condition. It is meant to be read and understood by programs that detect and handle errors by type.
Amazon S3 error codes
Code: AccessDenied
Description: Access Denied
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AccountProblem
Description: There is a problem with your AWS account that prevents the action from completing successfully. Contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AllAccessDisabled
Description: All access to this Amazon S3 resource has been disabled. Contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AmbiguousGrantByEmailAddress
Description: The email address you provided is associated with more than one account.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: AuthorizationHeaderMalformed
Description: The authorization header you provided is invalid.
HTTP Status Code: 400 Bad Request
HTTP Status Code: N/A
Code: BadDigest
Description: The Content-MD5 you specified did not match what we received.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: BucketAlreadyExists
Description: The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: BucketAlreadyOwnedByYou
Description: The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all AWS Regions except in the North Virginia Region. For legacy compatibility, if you re-create an existing bucket that you already own in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).
Code: 409 Conflict (in all Regions except the North Virginia Region)
SOAP Fault Code Prefix: Client
Code: BucketNotEmpty
Description: The bucket you tried to delete is not empty.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: CredentialsNotSupported
Description: This request does not support credentials.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: CrossLocationLoggingProhibited
Description: Cross-location logging not allowed. Buckets in one geographic location cannot log information to a bucket in another location.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: EntityTooLarge
Description: Your proposed upload exceeds the maximum allowed object size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: ExpiredToken
Description: The provided token has expired.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IllegalVersioningConfigurationException
Description: Indicates that the versioning configuration specified in the request is invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IncompleteBody
Description: You did not provide the number of bytes specified by the Content-Length HTTP header
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IncorrectNumberOfFilesInPostRequest
Description: POST requires exactly one file upload per request.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InlineDataTooLarge
Description: Inline data exceeds the maximum allowed size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InternalError
Description: We encountered an internal error. Please try again.
HTTP Status Code: 500 Internal Server Error
SOAP Fault Code Prefix: Server
Code: InvalidAccessKeyId
Description: The AWS access key ID you provided does not exist in our records.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidAddressingHeader
Description: You must specify the Anonymous role.
HTTP Status Code: N/A
SOAP Fault Code Prefix: Client
Code: InvalidArgument
Description: Invalid Argument
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidBucketName
Description: The specified bucket is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidBucketState
Description: The request is not valid with the current state of the bucket.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: InvalidDigest
Description: The Content-MD5 you specified is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidEncryptionAlgorithmError
Description: The encryption request you specified is not valid. The valid value is AES256.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidLocationConstraint
Description: The specified location constraint is not valid. For more information about Regions, see How to Select a Region for Your Buckets.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidObjectState
Description: The action is not valid for the current state of the object.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidPartOrder
Description: The list of parts was not in ascending order. Parts list must be specified in order by part number.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidPayer
Description: All access to this object has been disabled. Please contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidPolicyDocument
Description: The content of the form does not meet the conditions specified in the policy document.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidRange
Description: The requested range cannot be satisfied.
HTTP Status Code: 416 Requested Range Not Satisfiable
SOAP Fault Code Prefix: Client
Code: InvalidRequest
Description: Please use AWS4-HMAC-SHA256.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: SOAP requests must be made over an HTTPS connection.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported for buckets with non-DNS compliant names.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported for buckets with periods (.) in their names.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style requests.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate is not configured on this bucket.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate is disabled on this bucket.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported on this bucket. Contact AWS Support for more information.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration cannot be enabled on this bucket. Contact AWS Support for more information.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidSecurity
Description: The provided security credentials are not valid.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidSOAPRequest
Description: The SOAP request body is invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidStorageClass
Description: The storage class you specified is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidTargetBucketForLogging
Description: The target bucket for logging does not exist, is not owned by you, or does not have the appropriate grants for the log-delivery group.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidToken
Description: The provided token is malformed or otherwise invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidURI
Description: Couldn't parse the specified URI.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: KeyTooLongError
Description: Your key is too long.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedACLError
Description: The XML you provided was not well-formed or did not validate against our published schema.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedPOSTRequest
Description: The body of your POST request is not well-formed multipart/form-data.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedXML
Description: This happens when the user sends malformed XML (XML that doesn't conform to the published XSD) for the configuration. The error message is, \"The XML you provided was not well-formed or did not validate against our published schema.\"
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MaxMessageLengthExceeded
Description: Your request was too big.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MaxPostPreDataLengthExceededError
Description: Your POST request fields preceding the upload file were too large.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MetadataTooLarge
Description: Your metadata headers exceed the maximum allowed metadata size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MethodNotAllowed
Description: The specified method is not allowed against this resource.
HTTP Status Code: 405 Method Not Allowed
SOAP Fault Code Prefix: Client
Code: MissingAttachment
Description: A SOAP attachment was expected, but none were found.
HTTP Status Code: N/A
SOAP Fault Code Prefix: Client
Code: MissingContentLength
Description: You must provide the Content-Length HTTP header.
HTTP Status Code: 411 Length Required
SOAP Fault Code Prefix: Client
Code: MissingRequestBodyError
Description: This happens when the user sends an empty XML document as a request. The error message is, \"Request body is empty.\"
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MissingSecurityElement
Description: The SOAP 1.1 request is missing a security element.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MissingSecurityHeader
Description: Your request is missing a required header.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: NoLoggingStatusForKey
Description: There is no such thing as a logging status subresource for a key.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: NoSuchBucket
Description: The specified bucket does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchBucketPolicy
Description: The specified bucket does not have a bucket policy.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchKey
Description: The specified key does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchVersion
Description: Indicates that the version ID specified in the request does not match an existing version.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NotImplemented
Description: A header you provided implies functionality that is not implemented.
HTTP Status Code: 501 Not Implemented
SOAP Fault Code Prefix: Server
Code: NotSignedUp
Description: Your account is not signed up for the Amazon S3 service. You must sign up before you can use Amazon S3. You can sign up at the following URL: https://aws.amazon.com/s3
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: OperationAborted
Description: A conflicting conditional action is currently in progress against this resource. Try again.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: PermanentRedirect
Description: The bucket you are attempting to access must be addressed using the specified endpoint. Send all future requests to this endpoint.
HTTP Status Code: 301 Moved Permanently
SOAP Fault Code Prefix: Client
Code: PreconditionFailed
Description: At least one of the preconditions you specified did not hold.
HTTP Status Code: 412 Precondition Failed
SOAP Fault Code Prefix: Client
Code: Redirect
Description: Temporary redirect.
HTTP Status Code: 307 Moved Temporarily
SOAP Fault Code Prefix: Client
Code: RestoreAlreadyInProgress
Description: Object restore is already in progress.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: RequestIsNotMultiPartContent
Description: Bucket POST must be of the enclosure-type multipart/form-data.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: RequestTimeout
Description: Your socket connection to the server was not read from or written to within the timeout period.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: RequestTimeTooSkewed
Description: The difference between the request time and the server's time is too large.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: RequestTorrentOfBucketError
Description: Requesting the torrent file of a bucket is not permitted.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: SignatureDoesNotMatch
Description: The request signature we calculated does not match the signature you provided. Check your AWS secret access key and signing method. For more information, see REST Authentication and SOAP Authentication for details.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: ServiceUnavailable
Description: Reduce your request rate.
HTTP Status Code: 503 Service Unavailable
SOAP Fault Code Prefix: Server
Code: SlowDown
Description: Reduce your request rate.
HTTP Status Code: 503 Slow Down
SOAP Fault Code Prefix: Server
Code: TemporaryRedirect
Description: You are being redirected to the bucket while DNS updates.
HTTP Status Code: 307 Moved Temporarily
SOAP Fault Code Prefix: Client
Code: TokenRefreshRequired
Description: The provided token must be refreshed.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: TooManyBuckets
Description: You have attempted to create more buckets than allowed.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UnexpectedContent
Description: This request does not support content.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UnresolvableGrantByEmailAddress
Description: The email address you provided does not match any account on record.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UserKeyMustBeSpecified
Description: The bucket POST must contain the specified field name. If it is specified, check the order of the fields.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Container for a failed delete operation that describes the object that Amazon S3 attempted to delete and the error it encountered.
" + "DeleteObjectsOutput$Errors": "Container for a failed delete action that describes the object that Amazon S3 attempted to delete and the error it encountered.
" } }, "Event": { @@ -1252,12 +1252,12 @@ "LambdaFunctionConfiguration$Events": "The Amazon S3 bucket event for which to invoke the AWS Lambda function. For more information, see Supported Event Types in the Amazon Simple Storage Service Developer Guide.
", "QueueConfiguration$Events": "A collection of bucket events for which to send notifications
", "QueueConfigurationDeprecated$Events": "A collection of bucket events for which to send notifications
", - "TopicConfiguration$Events": "The Amazon S3 bucket event about which to send notifications. For more information, see Supported Event Types in the Amazon Simple Storage Service Developer Guide.
", + "TopicConfiguration$Events": "The Amazon S3 bucket event about which to send notifications. For more information, see Supported Event Types in the Amazon Simple Storage Service User Guide.
", "TopicConfigurationDeprecated$Events": "A collection of events related to objects
" } }, "ExistingObjectReplication": { - "base": "Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects in the Amazon S3 Developer Guide.
", + "base": "Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects in the Amazon S3 Developer Guide.
", "refs": { "ReplicationRule$ExistingObjectReplication": "" } @@ -2276,11 +2276,11 @@ "base": null, "refs": { "ListObjectVersionsOutput$MaxKeys": "Specifies the maximum number of objects to return.
", - "ListObjectVersionsRequest$MaxKeys": "Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.
", + "ListObjectVersionsRequest$MaxKeys": "Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.
", "ListObjectsOutput$MaxKeys": "The maximum number of keys returned in the response body.
", - "ListObjectsRequest$MaxKeys": "Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.
", - "ListObjectsV2Output$MaxKeys": "Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.
", - "ListObjectsV2Request$MaxKeys": "Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.
" + "ListObjectsRequest$MaxKeys": "Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.
", + "ListObjectsV2Output$MaxKeys": "Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.
", + "ListObjectsV2Request$MaxKeys": "Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.
" } }, "MaxParts": { @@ -2538,7 +2538,7 @@ } }, "ObjectAlreadyInActiveTierError": { - "base": "This operation is not allowed against this storage tier.
", + "base": "This action is not allowed against this storage tier.
", "refs": { } }, @@ -2592,12 +2592,12 @@ "Object$Key": "The name that you assign to an object. You use the object key to retrieve the object.
", "ObjectIdentifier$Key": "Key name of the object.
Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.
The object key.
", - "PutObjectAclRequest$Key": "Key for which the PUT operation was initiated.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "PutObjectAclRequest$Key": "Key for which the PUT action was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "PutObjectLegalHoldRequest$Key": "The key name for the object that you want to place a Legal Hold on.
", - "PutObjectRequest$Key": "Object key for which the PUT operation was initiated.
", + "PutObjectRequest$Key": "Object key for which the PUT action was initiated.
", "PutObjectRetentionRequest$Key": "The key name for the object that you want to apply this Object Retention configuration to.
", "PutObjectTaggingRequest$Key": "Name of the object key.
", - "RestoreObjectRequest$Key": "Object key for which the operation was initiated.
", + "RestoreObjectRequest$Key": "Object key for which the action was initiated.
", "SelectObjectContentRequest$Key": "The object key.
", "Tag$Key": "Name of the object key.
", "UploadPartCopyRequest$Key": "Object key for which the multipart upload was initiated.
", @@ -2696,7 +2696,7 @@ } }, "ObjectNotInActiveTierError": { - "base": "The source object of the COPY operation is not in the active tier and is only stored in Amazon S3 Glacier.
", + "base": "The source object of the COPY action is not in the active tier and is only stored in Amazon S3 Glacier.
", "refs": { } }, @@ -2939,7 +2939,7 @@ } }, "PublicAccessBlockConfiguration": { - "base": "The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.
", + "base": "The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service User Guide.
", "refs": { "GetPublicAccessBlockOutput$PublicAccessBlockConfiguration": "The PublicAccessBlock
configuration currently in effect for this Amazon S3 bucket.
The PublicAccessBlock
configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.
Provides information about object restoration operation and expiration time of the restored object copy.
", + "GetObjectOutput$Restore": "Provides information about object restoration action and expiration time of the restored object copy.
", "HeadObjectOutput$Restore": "If the object is an archived object (an object whose storage class is GLACIER), the response includes this header if either the archive restoration is in progress (see RestoreObject or an archive copy is already restored.
If an archive copy is already restored, the header value indicates when Amazon S3 is scheduled to delete the object copy. For example:
x-amz-restore: ongoing-request=\"false\", expiry-date=\"Fri, 23 Dec 2012 00:00:00 GMT\"
If the object restoration is in progress, the header returns the value ongoing-request=\"true\"
.
For more information about archiving objects, see Transitioning Objects: General Considerations.
" } }, @@ -3432,7 +3432,7 @@ } }, "RoutingRule": { - "base": "Specifies the redirect behavior and when a redirect is applied. For more information about routing rules, see Configuring advanced conditional redirects in the Amazon Simple Storage Service Developer Guide.
", + "base": "Specifies the redirect behavior and when a redirect is applied. For more information about routing rules, see Configuring advanced conditional redirects in the Amazon Simple Storage Service User Guide.
", "refs": { "RoutingRules$member": null } @@ -3546,7 +3546,7 @@ "CopyObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.
", "CopyObjectRequest$SSEKMSKeyId": "Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.
", "CreateMultipartUploadOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.
", - "CreateMultipartUploadRequest$SSEKMSKeyId": "Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.
", + "CreateMultipartUploadRequest$SSEKMSKeyId": "Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.
", "Encryption$KMSKeyId": "If the encryption type is aws:kms
, this optional value specifies the ID of the symmetric customer managed AWS KMS CMK to use for encryption of job results. Amazon S3 only supports symmetric CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.
If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.
", "HeadObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.
", @@ -3870,9 +3870,9 @@ } }, "Transition": { - "base": "Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.
", + "base": "Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service User Guide.
", "refs": { - "Rule$Transition": "Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.
", + "Rule$Transition": "Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service User Guide.
", "TransitionList$member": null } }, diff --git a/models/apis/s3/2006-03-01/examples-1.json b/models/apis/s3/2006-03-01/examples-1.json index 1720486a42d..176209dfaee 100644 --- a/models/apis/s3/2006-03-01/examples-1.json +++ b/models/apis/s3/2006-03-01/examples-1.json @@ -257,10 +257,8 @@ "DeleteObject": [ { "input": { - "Bucket": "examplebucket", - "Key": "objectkey.jpg" - }, - "output": { + "Bucket": "ExampleBucket", + "Key": "HappyFace.jpg" }, "comments": { "input": { @@ -268,14 +266,16 @@ "output": { } }, - "description": "The following example deletes an object from an S3 bucket.", - "id": "to-delete-an-object-1472850136595", - "title": "To delete an object" + "description": "The following example deletes an object from a non-versioned bucket.", + "id": "to-delete-an-object-from-a-non-versioned-bucket-1481588533089", + "title": "To delete an object (from a non-versioned bucket)" }, { "input": { - "Bucket": "ExampleBucket", - "Key": "HappyFace.jpg" + "Bucket": "examplebucket", + "Key": "objectkey.jpg" + }, + "output": { }, "comments": { "input": { @@ -283,9 +283,9 @@ "output": { } }, - "description": "The following example deletes an object from a non-versioned bucket.", - "id": "to-delete-an-object-from-a-non-versioned-bucket-1481588533089", - "title": "To delete an object (from a non-versioned bucket)" + "description": "The following example deletes an object from an S3 bucket.", + "id": "to-delete-an-object-1472850136595", + "title": "To delete an object" } ], "DeleteObjectTagging": [ @@ -334,10 +334,12 @@ "Delete": { "Objects": [ { - "Key": "objectkey1" + "Key": "HappyFace.jpg", + "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" }, { - "Key": "objectkey2" + "Key": "HappyFace.jpg", + "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" } ], "Quiet": false @@ -346,14 +348,12 @@ "output": { "Deleted": [ { - "DeleteMarker": "true", - "DeleteMarkerVersionId": "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F", - "Key": "objectkey1" + "Key": "HappyFace.jpg", + "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" }, { - "DeleteMarker": "true", - "DeleteMarkerVersionId": "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt", - "Key": "objectkey2" + "Key": "HappyFace.jpg", + "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" } ] }, @@ -363,9 +363,9 @@ "output": { } }, - "description": "The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker.", - "id": "to-delete-multiple-objects-from-a-versioned-bucket-1483146248805", - "title": "To delete multiple objects from a versioned bucket" + "description": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.", + "id": "to-delete-multiple-object-versions-from-a-versioned-bucket-1483147087737", + "title": "To delete multiple object versions from a versioned bucket" }, { "input": { @@ -373,12 +373,10 @@ "Delete": { "Objects": [ { - "Key": "HappyFace.jpg", - "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" + "Key": "objectkey1" }, { - "Key": "HappyFace.jpg", - "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" + "Key": "objectkey2" } ], "Quiet": false @@ -387,12 +385,14 @@ "output": { "Deleted": [ { - "Key": "HappyFace.jpg", - "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" + "DeleteMarker": "true", + "DeleteMarkerVersionId": "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F", + "Key": "objectkey1" }, { - "Key": "HappyFace.jpg", - "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" + "DeleteMarker": "true", + "DeleteMarkerVersionId": "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt", + "Key": "objectkey2" } ] }, @@ -402,9 +402,9 @@ "output": { } }, - "description": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.", - "id": "to-delete-multiple-object-versions-from-a-versioned-bucket-1483147087737", - "title": "To delete multiple object versions from a versioned bucket" + "description": "The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker.", + "id": "to-delete-multiple-objects-from-a-versioned-bucket-1483146248805", + "title": "To delete multiple objects from a versioned bucket" } ], "GetBucketCors": [ @@ -840,17 +840,20 @@ { "input": { "Bucket": "examplebucket", - "Key": "exampleobject", - "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" + "Key": "HappyFace.jpg" }, "output": { "TagSet": [ { - "Key": "Key1", - "Value": "Value1" + "Key": "Key4", + "Value": "Value4" + }, + { + "Key": "Key3", + "Value": "Value3" } ], - "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" + "VersionId": "null" }, "comments": { "input": { @@ -858,27 +861,24 @@ "output": { } }, - "description": "The following example retrieves tag set of an object. The request specifies object version.", - "id": "to-retrieve-tag-set-of-a-specific-object-version-1483400283663", - "title": "To retrieve tag set of a specific object version" + "description": "The following example retrieves tag set of an object.", + "id": "to-retrieve-tag-set-of-an-object-1481833847896", + "title": "To retrieve tag set of an object" }, { "input": { "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "exampleobject", + "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" }, "output": { "TagSet": [ { - "Key": "Key4", - "Value": "Value4" - }, - { - "Key": "Key3", - "Value": "Value3" + "Key": "Key1", + "Value": "Value1" } ], - "VersionId": "null" + "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" }, "comments": { "input": { @@ -886,9 +886,9 @@ "output": { } }, - "description": "The following example retrieves tag set of an object.", - "id": "to-retrieve-tag-set-of-an-object-1481833847896", - "title": "To retrieve tag set of an object" + "description": "The following example retrieves tag set of an object. The request specifies object version.", + "id": "to-retrieve-tag-set-of-a-specific-object-version-1483400283663", + "title": "To retrieve tag set of a specific object version" } ], "GetObjectTorrent": [ @@ -989,47 +989,37 @@ "ListMultipartUploads": [ { "input": { - "Bucket": "examplebucket", - "KeyMarker": "nextkeyfrompreviousresponse", - "MaxUploads": "2", - "UploadIdMarker": "valuefrompreviousresponse" + "Bucket": "examplebucket" }, "output": { - "Bucket": "acl1", - "IsTruncated": true, - "KeyMarker": "", - "MaxUploads": "2", - "NextKeyMarker": "someobjectkey", - "NextUploadIdMarker": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", - "UploadIdMarker": "", "Uploads": [ { "Initiated": "2014-05-01T05:40:58.000Z", "Initiator": { - "DisplayName": "ownder-display-name", + "DisplayName": "display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "mohanataws", - "ID": "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" + "DisplayName": "display-name", + "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" + "UploadId": "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" }, { "Initiated": "2014-05-01T05:41:27.000Z", "Initiator": { - "DisplayName": "ownder-display-name", + "DisplayName": "display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "ownder-display-name", + "DisplayName": "display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" + "UploadId": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" } ] }, @@ -1039,43 +1029,53 @@ "output": { } }, - "description": "The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next setup of multipart uploads.", - "id": "list-next-set-of-multipart-uploads-when-previous-result-is-truncated-1482428106748", - "title": "List next set of multipart uploads when previous result is truncated" + "description": "The following example lists in-progress multipart uploads on a specific bucket.", + "id": "to-list-in-progress-multipart-uploads-on-a-bucket-1481852775260", + "title": "To list in-progress multipart uploads on a bucket" }, { "input": { - "Bucket": "examplebucket" + "Bucket": "examplebucket", + "KeyMarker": "nextkeyfrompreviousresponse", + "MaxUploads": "2", + "UploadIdMarker": "valuefrompreviousresponse" }, "output": { + "Bucket": "acl1", + "IsTruncated": true, + "KeyMarker": "", + "MaxUploads": "2", + "NextKeyMarker": "someobjectkey", + "NextUploadIdMarker": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", + "UploadIdMarker": "", "Uploads": [ { "Initiated": "2014-05-01T05:40:58.000Z", "Initiator": { - "DisplayName": "display-name", + "DisplayName": "ownder-display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "display-name", - "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" + "DisplayName": "mohanataws", + "ID": "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" + "UploadId": "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" }, { "Initiated": "2014-05-01T05:41:27.000Z", "Initiator": { - "DisplayName": "display-name", + "DisplayName": "ownder-display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "display-name", + "DisplayName": "ownder-display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" + "UploadId": "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" } ] }, @@ -1085,9 +1085,9 @@ "output": { } }, - "description": "The following example lists in-progress multipart uploads on a specific bucket.", - "id": "to-list-in-progress-multipart-uploads-on-a-bucket-1481852775260", - "title": "To list in-progress multipart uploads on a bucket" + "description": "The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next setup of multipart uploads.", + "id": "list-next-set-of-multipart-uploads-when-previous-result-is-truncated-1482428106748", + "title": "List next set of multipart uploads when previous result is truncated" } ], "ListObjectVersions": [ @@ -1567,17 +1567,13 @@ "PutObject": [ { "input": { - "Body": "filetoupload", + "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "exampleobject", - "Metadata": { - "metadata1": "value1", - "metadata2": "value2" - } + "Key": "HappyFace.jpg" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0" + "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" }, "comments": { "input": { @@ -1585,9 +1581,9 @@ "output": { } }, - "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757", - "title": "To upload object and specify user-defined metadata" + "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", + "id": "to-upload-an-object-1481760101010", + "title": "To upload an object" }, { "input": { @@ -1616,11 +1612,14 @@ "input": { "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "HappyFace.jpg", + "ServerSideEncryption": "AES256", + "StorageClass": "STANDARD_IA" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" + "ServerSideEncryption": "AES256", + "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp" }, "comments": { "input": { @@ -1628,9 +1627,9 @@ "output": { } }, - "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", - "id": "to-upload-an-object-1481760101010", - "title": "To upload an object" + "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.", + "id": "to-upload-an-object-(specify-optional-headers)", + "title": "To upload an object (specify optional headers)" }, { "input": { @@ -1696,16 +1695,17 @@ }, { "input": { - "Body": "HappyFace.jpg", + "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "HappyFace.jpg", - "ServerSideEncryption": "AES256", - "StorageClass": "STANDARD_IA" + "Key": "exampleobject", + "Metadata": { + "metadata1": "value1", + "metadata2": "value2" + } }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "ServerSideEncryption": "AES256", - "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp" + "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0" }, "comments": { "input": { @@ -1713,9 +1713,9 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.", - "id": "to-upload-an-object-(specify-optional-headers)", - "title": "To upload an object (specify optional headers)" + "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757", + "title": "To upload object and specify user-defined metadata" } ], "PutObjectAcl": [ @@ -1826,14 +1826,15 @@ "input": { "Bucket": "examplebucket", "CopySource": "/bucketname/sourceobjectkey", + "CopySourceRange": "bytes=1-100000", "Key": "examplelargeobject", - "PartNumber": "1", + "PartNumber": "2", "UploadId": "exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--" }, "output": { "CopyPartResult": { - "ETag": "\"b0c6f0e7e054ab8fa2536a2677f8734d\"", - "LastModified": "2016-12-29T21:24:43.000Z" + "ETag": "\"65d16d19e65a7508a51f043180edcc36\"", + "LastModified": "2016-12-29T21:44:28.000Z" } }, "comments": { @@ -1842,23 +1843,22 @@ "output": { } }, - "description": "The following example uploads a part of a multipart upload by copying data from an existing object as data source.", - "id": "to-upload-a-part-by-copying-data-from-an-existing-object-as-data-source-1483046746348", - "title": "To upload a part by copying data from an existing object as data source" + "description": "The following example uploads a part of a multipart upload by copying a specified byte range from an existing object as data source.", + "id": "to-upload-a-part-by-copying-byte-range-from-an-existing-object-as-data-source-1483048068594", + "title": "To upload a part by copying byte range from an existing object as data source" }, { "input": { "Bucket": "examplebucket", "CopySource": "/bucketname/sourceobjectkey", - "CopySourceRange": "bytes=1-100000", "Key": "examplelargeobject", - "PartNumber": "2", + "PartNumber": "1", "UploadId": "exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--" }, "output": { "CopyPartResult": { - "ETag": "\"65d16d19e65a7508a51f043180edcc36\"", - "LastModified": "2016-12-29T21:44:28.000Z" + "ETag": "\"b0c6f0e7e054ab8fa2536a2677f8734d\"", + "LastModified": "2016-12-29T21:24:43.000Z" } }, "comments": { @@ -1867,9 +1867,9 @@ "output": { } }, - "description": "The following example uploads a part of a multipart upload by copying a specified byte range from an existing object as data source.", - "id": "to-upload-a-part-by-copying-byte-range-from-an-existing-object-as-data-source-1483048068594", - "title": "To upload a part by copying byte range from an existing object as data source" + "description": "The following example uploads a part of a multipart upload by copying data from an existing object as data source.", + "id": "to-upload-a-part-by-copying-data-from-an-existing-object-as-data-source-1483046746348", + "title": "To upload a part by copying data from an existing object as data source" } ] } diff --git a/models/apis/s3control/2018-08-20/api-2.json b/models/apis/s3control/2018-08-20/api-2.json index e228fd3f651..9400c5d32e8 100755 --- a/models/apis/s3control/2018-08-20/api-2.json +++ b/models/apis/s3control/2018-08-20/api-2.json @@ -2770,12 +2770,12 @@ "type":"string", "max":1024, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:=+\\-@%]*)$" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*)$" }, "TagValueString":{ "type":"string", "max":1024, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:=+\\-@%]*)$" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*)$" }, "Tagging":{ "type":"structure", diff --git a/models/apis/s3control/2018-08-20/docs-2.json b/models/apis/s3control/2018-08-20/docs-2.json index c843df7b3ea..120d0b0d0d5 100755 --- a/models/apis/s3control/2018-08-20/docs-2.json +++ b/models/apis/s3control/2018-08-20/docs-2.json @@ -1,46 +1,46 @@ { "version": "2.0", - "service": "AWS S3 Control provides access to Amazon S3 control plane operations.
", + "service": "AWS S3 Control provides access to Amazon S3 control plane actions.
", "operations": { - "CreateAccessPoint": "Creates an access point and associates it with the specified bucket. For more information, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.
Using this action with Amazon S3 on Outposts
This action:
Requires a virtual private cloud (VPC) configuration as S3 on Outposts only supports VPC style access points.
Does not support ACL on S3 on Outposts buckets.
Does not support Public Access on S3 on Outposts buckets.
Does not support object lock for S3 on Outposts buckets.
For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide .
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to CreateAccessPoint
:
This API operation creates an Amazon S3 on Outposts bucket. To create an S3 bucket, see Create Bucket in the Amazon Simple Storage Service API.
Creates a new Outposts bucket. By creating the bucket, you become the bucket owner. To create an Outposts bucket, you must have S3 on Outposts. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.
Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.
S3 on Outposts buckets do not support
ACLs. Instead, configure access point policies to manage access to buckets.
Public access.
Object Lock
Bucket Location constraint
For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id
in your API request, see the Examples section.
The following actions are related to CreateBucket
for Amazon S3 on Outposts:
You can use S3 Batch Operations to perform large-scale batch operations on Amazon S3 objects. Batch Operations can run a single operation on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
This operation creates a S3 Batch Operations job.
Related actions include:
", + "CreateAccessPoint": "Creates an access point and associates it with the specified bucket. For more information, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service User Guide.
Using this action with Amazon S3 on Outposts
This action:
Requires a virtual private cloud (VPC) configuration as S3 on Outposts only supports VPC style access points.
Does not support ACL on S3 on Outposts buckets.
Does not support Public Access on S3 on Outposts buckets.
Does not support object lock for S3 on Outposts buckets.
For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide .
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to CreateAccessPoint
:
This action creates an Amazon S3 on Outposts bucket. To create an S3 bucket, see Create Bucket in the Amazon Simple Storage Service API.
Creates a new Outposts bucket. By creating the bucket, you become the bucket owner. To create an Outposts bucket, you must have S3 on Outposts. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.
Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.
S3 on Outposts buckets support:
Tags
LifecycleConfigurations for deleting expired objects
For a list of Amazon S3 features not supported by Amazon S3 on Outposts, see Unsupported Amazon S3 features.
For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id
in your API request, see the Examples section.
The following actions are related to CreateBucket
for Amazon S3 on Outposts:
You can use S3 Batch Operations to perform large-scale batch actions on Amazon S3 objects. Batch Operations can run a single action on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.
This action creates a S3 Batch Operations job.
Related actions include:
", "DeleteAccessPoint": "Deletes the specified access point.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to DeleteAccessPoint
:
Deletes the access point policy for the specified access point.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to DeleteAccessPointPolicy
:
This API operation deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see DeleteBucket in the Amazon Simple Storage Service API.
Deletes the Amazon S3 on Outposts bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
Related Resources
", - "DeleteBucketLifecycleConfiguration": "This API action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon Simple Storage Service API.
Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.
To use this operation, you must have permission to perform the s3-outposts:DeleteLifecycleConfiguration
action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
For more information about object expiration, see Elements to Describe Lifecycle Actions.
Related actions include:
", - "DeleteBucketPolicy": "This API operation deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon Simple Storage Service API.
This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy
permissions on the specified Outposts bucket and belong to the bucket owner's account to use this operation. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to DeleteBucketPolicy
:
This operation deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon Simple Storage Service API.
Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.
To use this operation, you must have permission to perform the PutBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to DeleteBucketTagging
:
Removes the entire tag set from the specified S3 Batch Operations job. To use this operation, you must have permission to perform the s3:DeleteJobTagging
action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.
Related actions include:
", + "DeleteBucket": "This action deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see DeleteBucket in the Amazon Simple Storage Service API.
Deletes the Amazon S3 on Outposts bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
Related Resources
", + "DeleteBucketLifecycleConfiguration": "This action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon Simple Storage Service API.
Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3-outposts:DeleteLifecycleConfiguration
action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
For more information about object expiration, see Elements to Describe Lifecycle Actions.
Related actions include:
", + "DeleteBucketPolicy": "This action deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon Simple Storage Service API.
This implementation of the DELETE action uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy
permissions on the specified Outposts bucket and belong to the bucket owner's account to use this action. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to DeleteBucketPolicy
:
This action deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon Simple Storage Service API.
Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the PutBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to DeleteBucketTagging
:
Removes the entire tag set from the specified S3 Batch Operations job. To use this operation, you must have permission to perform the s3:DeleteJobTagging
action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service User Guide.
Related actions include:
", "DeletePublicAccessBlock": "Removes the PublicAccessBlock
configuration for an AWS account. For more information, see Using Amazon S3 block public access.
Related actions include:
", - "DeleteStorageLensConfiguration": "Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Retrieves the configuration parameters and status for a Batch Operations job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Related actions include:
", + "DeleteStorageLensConfiguration": "Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Retrieves the configuration parameters and status for a Batch Operations job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Related actions include:
", "GetAccessPoint": "Returns configuration information about the specified access point.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to GetAccessPoint
:
Returns the access point policy associated with the specified access point.
The following actions are related to GetAccessPointPolicy
:
Indicates whether the specified access point currently has a policy that allows public access. For more information about public access through access points, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.
", - "GetBucket": "Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:GetBucket
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.
If you don't have s3-outposts:GetBucket
permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied
error.
The following actions are related to GetBucket
for Amazon S3 on Outposts:
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
This operation gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon Simple Storage Service API.
Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.
To use this operation, you must have permission to perform the s3-outposts:GetLifecycleConfiguration
action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following actions are related to GetBucketLifecycleConfiguration
:
This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon Simple Storage Service API.
Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy
permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to GetBucketPolicy
:
This operation gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see GetBucketTagging in the Amazon Simple Storage Service API.
Returns the tag set associated with the Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
To use this operation, you must have permission to perform the GetBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
GetBucketTagging
has the following special error:
Error code: NoSuchTagSetError
Description: There is no tag set associated with the bucket.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to GetBucketTagging
:
Returns the tags on an S3 Batch Operations job. To use this operation, you must have permission to perform the s3:GetJobTagging
action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.
Related actions include:
", + "GetBucket": "Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.
If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:GetBucket
permissions on the specified bucket and belong to the bucket owner's account in order to use this action. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.
If you don't have s3-outposts:GetBucket
permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied
error.
The following actions are related to GetBucket
for Amazon S3 on Outposts:
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
This action gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon Simple Storage Service API.
Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3-outposts:GetLifecycleConfiguration
action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following actions are related to GetBucketLifecycleConfiguration
:
This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon Simple Storage Service API.
Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.
If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this action.
Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy
permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to GetBucketPolicy
:
This action gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see GetBucketTagging in the Amazon Simple Storage Service API.
Returns the tag set associated with the Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the GetBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
GetBucketTagging
has the following special error:
Error code: NoSuchTagSetError
Description: There is no tag set associated with the bucket.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to GetBucketTagging
:
Returns the tags on an S3 Batch Operations job. To use this operation, you must have permission to perform the s3:GetJobTagging
action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service User Guide.
Related actions include:
", "GetPublicAccessBlock": "Retrieves the PublicAccessBlock
configuration for an AWS account. For more information, see Using Amazon S3 block public access.
Related actions include:
", - "GetStorageLensConfiguration": "Gets the Amazon S3 Storage Lens configuration. For more information, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:GetStorageLensConfiguration
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Gets the Amazon S3 Storage Lens configuration. For more information, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:GetStorageLensConfiguration
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Returns a list of the access points currently associated with the specified bucket. You can retrieve up to 1000 access points per call. If the specified bucket has more than 1,000 access points (or the number specified in maxResults
, whichever is less), the response will include a continuation token that you can use to list the additional access points.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to ListAccessPoints
:
Lists current S3 Batch Operations jobs and jobs that have ended within the last 30 days for the AWS account making the request. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Related actions include:
", - "ListRegionalBuckets": "Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id
in your request, see the Examples section.
Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:ListStorageLensConfigurations
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Lists current S3 Batch Operations jobs and jobs that have ended within the last 30 days for the AWS account making the request. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Related actions include:
", + "ListRegionalBuckets": "Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.
For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id
in your request, see the Examples section.
Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:ListStorageLensConfigurations
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Associates an access policy with the specified access point. Each access point can have only one policy, so a request made to this API replaces any existing policy associated with the specified access point.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to PutAccessPointPolicy
:
This action puts a lifecycle configuration to an Amazon S3 on Outposts bucket. To put a lifecycle configuration to an S3 bucket, see PutBucketLifecycleConfiguration in the Amazon Simple Storage Service API.
Creates a new lifecycle configuration for the Outposts bucket or replaces an existing lifecycle configuration. Outposts buckets only support lifecycle configurations that delete/expire objects after a certain period of time and abort incomplete multipart uploads. For more information, see Managing Lifecycle Permissions for Amazon S3 on Outposts.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to PutBucketLifecycleConfiguration
:
This action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon Simple Storage Service API.
Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy
permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to PutBucketPolicy
:
This action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon Simple Storage Service API.
Sets the tags for an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.
Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.
To use this operation, you must have permissions to perform the s3-outposts:PutBucketTagging
action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors:
Error code: InvalidTagError
Description: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For information about tag restrictions, see User-Defined Tag Restrictions and AWS-Generated Cost Allocation Tag Restrictions.
Error code: MalformedXMLError
Description: The XML provided does not match the schema.
Error code: OperationAbortedError
Description: A conflicting conditional operation is currently in progress against this resource. Try again.
Error code: InternalError
Description: The service was unable to apply the provided tag to the bucket.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to PutBucketTagging
:
Sets the supplied tag-set on an S3 Batch Operations job.
A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.
If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you are charged for a Tier 1 Request (PUT). For more information, see Amazon S3 pricing.
For deleting existing tags for your Batch Operations job, a DeleteJobTagging request is preferred because it achieves the same result without incurring charges.
A few things to consider about using tags:
Amazon S3 limits the maximum number of tags to 50 tags per job.
You can associate up to 50 tags with a job as long as they have unique tag keys.
A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length.
The key and values are case sensitive.
For tagging-related restrictions related to characters and encodings, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide.
To use this operation, you must have permission to perform the s3:PutJobTagging
action.
Related actions include:
", + "PutBucketPolicy": "This action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon Simple Storage Service API.
Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.
If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy
permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this action.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to PutBucketPolicy
:
This action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon Simple Storage Service API.
Sets the tags for an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.
Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.
Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.
To use this action, you must have permissions to perform the s3-outposts:PutBucketTagging
action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors:
Error code: InvalidTagError
Description: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For information about tag restrictions, see User-Defined Tag Restrictions and AWS-Generated Cost Allocation Tag Restrictions.
Error code: MalformedXMLError
Description: The XML provided does not match the schema.
Error code: OperationAbortedError
Description: A conflicting conditional action is currently in progress against this resource. Try again.
Error code: InternalError
Description: The service was unable to apply the provided tag to the bucket.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to PutBucketTagging
:
Sets the supplied tag-set on an S3 Batch Operations job.
A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service User Guide.
If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you are charged for a Tier 1 Request (PUT). For more information, see Amazon S3 pricing.
For deleting existing tags for your Batch Operations job, a DeleteJobTagging request is preferred because it achieves the same result without incurring charges.
A few things to consider about using tags:
Amazon S3 limits the maximum number of tags to 50 tags per job.
You can associate up to 50 tags with a job as long as they have unique tag keys.
A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length.
The key and values are case sensitive.
For tagging-related restrictions related to characters and encodings, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide.
To use this action, you must have permission to perform the s3:PutJobTagging
action.
Related actions include:
", "PutPublicAccessBlock": "Creates or modifies the PublicAccessBlock
configuration for an AWS account. For more information, see Using Amazon S3 block public access.
Related actions include:
", - "PutStorageLensConfiguration": "Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:PutStorageLensConfiguration
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Updates an existing S3 Batch Operations job's priority. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Related actions include:
", - "UpdateJobStatus": "Updates the status for the specified job. Use this operation to confirm that you want to run a job or to cancel an existing job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Related actions include:
" + "PutStorageLensConfiguration": "Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:PutStorageLensConfiguration
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Updates an existing S3 Batch Operations job's priority. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Related actions include:
", + "UpdateJobStatus": "Updates the status for the specified job. Use this action to confirm that you want to run a job or to cancel an existing job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Related actions include:
" }, "shapes": { "AbortIncompleteMultipartUpload": { @@ -566,7 +566,7 @@ "IAMRoleArn": { "base": null, "refs": { - "CreateJobRequest$RoleArn": "The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that Batch Operations will use to run this job's operation on every object in the manifest.
", + "CreateJobRequest$RoleArn": "The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that Batch Operations will use to run this job's action on every object in the manifest.
", "JobDescriptor$RoleArn": "The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role assigned to run the tasks for this job.
" } }, @@ -738,9 +738,9 @@ } }, "JobOperation": { - "base": "The operation that you want this job to perform on every object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service Developer Guide.
", + "base": "The operation that you want this job to perform on every object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service User Guide.
", "refs": { - "CreateJobRequest$Operation": "The operation that you want this job to perform on every object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service Developer Guide.
", + "CreateJobRequest$Operation": "The action that you want this job to perform on every object listed in the manifest. For more information about the available actions, see Operations in the Amazon Simple Storage Service User Guide.
", "JobDescriptor$Operation": "The operation that the specified job is configured to run on the objects listed in the manifest.
" } }, @@ -1051,12 +1051,12 @@ "refs": { "GetAccessPointPolicyResult$Policy": "The access point policy associated with the specified access point.
", "GetBucketPolicyResult$Policy": "The policy of the Outposts bucket.
", - "PutAccessPointPolicyRequest$Policy": "The policy that you want to apply to the specified access point. For more information about access point policies, see Managing data access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.
", + "PutAccessPointPolicyRequest$Policy": "The policy that you want to apply to the specified access point. For more information about access point policies, see Managing data access with Amazon S3 Access Points in the Amazon Simple Storage Service User Guide.
", "PutBucketPolicyRequest$Policy": "The bucket policy as a JSON document.
" } }, "PolicyStatus": { - "base": "Indicates whether this access point policy is public. For more information about how Amazon S3 evaluates policies to determine whether they are public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.
", + "base": "Indicates whether this access point policy is public. For more information about how Amazon S3 evaluates policies to determine whether they are public, see The Meaning of \"Public\" in the Amazon Simple Storage Service User Guide.
", "refs": { "GetAccessPointPolicyStatusResult$PolicyStatus": "Indicates the current policy status of the specified access point.
" } @@ -1356,9 +1356,9 @@ } }, "S3Retention": { - "base": "Contains the S3 Object Lock retention mode to be applied to all objects in the S3 Batch Operations job. If you don't provide Mode
and RetainUntilDate
data types in your operation, you will remove the retention from your objects. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Contains the S3 Object Lock retention mode to be applied to all objects in the S3 Batch Operations job. If you don't provide Mode
and RetainUntilDate
data types in your operation, you will remove the retention from your objects. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Contains the Object Lock retention mode to be applied to all objects in the Batch Operations job. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
" + "S3SetObjectRetentionOperation$Retention": "Contains the Object Lock retention mode to be applied to all objects in the Batch Operations job. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service User Guide.
" } }, "S3SSEAlgorithm": { @@ -1374,13 +1374,13 @@ } }, "S3SetObjectLegalHoldOperation": { - "base": "Contains the configuration for an S3 Object Lock legal hold operation that an S3 Batch Operations job passes every object to the underlying PutObjectLegalHold
API. For more information, see Using S3 Object Lock legal hold with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Contains the configuration for an S3 Object Lock legal hold operation that an S3 Batch Operations job passes every object to the underlying PutObjectLegalHold
API. For more information, see Using S3 Object Lock legal hold with S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Contains the configuration parameters for the Object Lock retention action for an S3 Batch Operations job. Batch Operations passes every object to the underlying PutObjectRetention
API. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Contains the configuration parameters for the Object Lock retention action for an S3 Batch Operations job. Batch Operations passes every object to the underlying PutObjectRetention
API. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Specifies when an object transitions to a specified storage class. For more information about Amazon S3 Lifecycle configuration rules, see Transitioning objects using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.
", + "base": "Specifies when an object transitions to a specified storage class. For more information about Amazon S3 Lifecycle configuration rules, see Transitioning objects using Amazon S3 Lifecycle in the Amazon Simple Storage Service User Guide.
", "refs": { "TransitionList$member": null } diff --git a/service/autoscaling/api.go b/service/autoscaling/api.go index 018b8cd28f1..470c1e2d181 100644 --- a/service/autoscaling/api.go +++ b/service/autoscaling/api.go @@ -6648,13 +6648,13 @@ type CreateAutoScalingGroupInput struct { MinSize *int64 `type:"integer" required:"true"` // An embedded object that specifies a mixed instances policy. The required - // parameters must be specified. If optional parameters are unspecified, their + // properties must be specified. If optional properties are unspecified, their // default values are used. // - // The policy includes parameters that not only define the distribution of On-Demand + // The policy includes properties that not only define the distribution of On-Demand // Instances and Spot Instances, the maximum price to pay for Spot Instances, // and how the Auto Scaling group allocates instance types to fulfill On-Demand - // and Spot capacities, but also the parameters that specify the instance configuration + // and Spot capacities, but also the properties that specify the instance configuration // information—the launch template and instance types. The policy can also // include a weight for each instance type and different launch templates for // individual instance types. For more information, see Auto Scaling groups @@ -11133,11 +11133,12 @@ type InstancesDistribution struct { // Indicates how to allocate instance types to fulfill On-Demand capacity. The // only valid value is prioritized, which is also the default value. This strategy - // uses the order of instance types in the overrides to define the launch priority - // of each instance type. The first instance type in the array is prioritized - // higher than the last. If all your On-Demand capacity cannot be fulfilled - // using your highest priority instance, then the Auto Scaling groups launches - // the remaining capacity using the second priority instance type, and so on. + // uses the order of instance types in the LaunchTemplateOverrides to define + // the launch priority of each instance type. The first instance type in the + // array is prioritized higher than the last. If all your On-Demand capacity + // cannot be fulfilled using your highest priority instance, then the Auto Scaling + // groups launches the remaining capacity using the second priority instance + // type, and so on. OnDemandAllocationStrategy *string `type:"string"` // The minimum amount of the Auto Scaling group's capacity that must be fulfilled @@ -11153,13 +11154,20 @@ type InstancesDistribution struct { // to 100 if not specified. If set to 100, only On-Demand Instances are provisioned. OnDemandPercentageAboveBaseCapacity *int64 `type:"integer"` - // Indicates how to allocate instances across Spot Instance pools. If the allocation - // strategy is capacity-optimized (recommended), the Auto Scaling group launches - // instances using Spot pools that are optimally chosen based on the available - // Spot capacity. If the allocation strategy is lowest-price, the Auto Scaling - // group launches instances using the Spot pools with the lowest price, and - // evenly allocates your instances across the number of Spot pools that you - // specify. Defaults to lowest-price if not specified. + // Indicates how to allocate instances across Spot Instance pools. + // + // If the allocation strategy is lowest-price, the Auto Scaling group launches + // instances using the Spot pools with the lowest price, and evenly allocates + // your instances across the number of Spot pools that you specify. Defaults + // to lowest-price if not specified. + // + // If the allocation strategy is capacity-optimized (recommended), the Auto + // Scaling group launches instances using Spot pools that are optimally chosen + // based on the available Spot capacity. Alternatively, you can use capacity-optimized-prioritized + // and set the order of instance types in the list of launch template overrides + // from highest to lowest priority (from first to last in the list). Amazon + // EC2 Auto Scaling honors the instance type priorities on a best-effort basis + // but optimizes for capacity first. SpotAllocationStrategy *string `type:"string"` // The number of Spot Instance pools across which to allocate your Spot Instances. @@ -11482,7 +11490,7 @@ func (s *LaunchConfiguration) SetUserData(v string) *LaunchConfiguration { // Describes a launch template and overrides. // -// You specify these parameters as part of a mixed instances policy. +// You specify these properties as part of a mixed instances policy. // // When you update the launch template or overrides, existing Amazon EC2 instances // continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches @@ -11494,7 +11502,7 @@ type LaunchTemplate struct { // The launch template to use. LaunchTemplateSpecification *LaunchTemplateSpecification `type:"structure"` - // Any parameters that you specify override the same parameters in the launch + // Any properties that you specify override the same properties in the launch // template. If not provided, Amazon EC2 Auto Scaling uses the instance type // specified in the launch template when it launches an instance. Overrides []*LaunchTemplateOverrides `type:"list"` @@ -12233,13 +12241,13 @@ func (s *MetricGranularityType) SetGranularity(v string) *MetricGranularityType // // You can create a mixed instances policy for a new Auto Scaling group, or // you can create it for an existing group by updating the group to specify -// MixedInstancesPolicy as the top-level parameter instead of a launch configuration +// MixedInstancesPolicy as the top-level property instead of a launch configuration // or launch template. type MixedInstancesPolicy struct { _ struct{} `type:"structure"` // Specifies the instances distribution. If not provided, the value for each - // parameter in InstancesDistribution uses a default value. + // property in InstancesDistribution uses a default value. InstancesDistribution *InstancesDistribution `type:"structure"` // Specifies the launch template to use and optionally the instance types (overrides) @@ -14655,7 +14663,7 @@ type UpdateAutoScalingGroupInput struct { MinSize *int64 `type:"integer"` // An embedded object that specifies a mixed instances policy. When you make - // changes to an existing policy, all optional parameters are left unchanged + // changes to an existing policy, all optional properties are left unchanged // if not specified. For more information, see Auto Scaling groups with multiple // instance types and purchase options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-purchase-options.html) // in the Amazon EC2 Auto Scaling User Guide. diff --git a/service/emr/api.go b/service/emr/api.go index e95be405d2d..af920b876e8 100644 --- a/service/emr/api.go +++ b/service/emr/api.go @@ -8918,9 +8918,9 @@ type InstanceGroup struct { // of a CloudWatch metric. See PutAutoScalingPolicy. AutoScalingPolicy *AutoScalingPolicyDescription `type:"structure"` - // The bid price for each EC2 Spot Instance type as defined by InstanceType. - // Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice - // is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%. + // If specified, indicates that the instance group uses Spot Instances. This + // is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice + // to set the amount equal to the On-Demand price, or specify an amount in USD. BidPrice *string `type:"string"` // @@ -9102,9 +9102,9 @@ type InstanceGroupConfig struct { // of a CloudWatch metric. See PutAutoScalingPolicy. AutoScalingPolicy *AutoScalingPolicy `type:"structure"` - // The bid price for each EC2 Spot Instance as defined by InstanceType. Expressed - // in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, - // BidPriceAsPercentageOfOnDemandPrice defaults to 100%. + // If specified, indicates that the instance group uses Spot Instances. This + // is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice + // to set the amount equal to the On-Demand price, or specify an amount in USD. BidPrice *string `type:"string"` // @@ -9241,9 +9241,9 @@ func (s *InstanceGroupConfig) SetName(v string) *InstanceGroupConfig { type InstanceGroupDetail struct { _ struct{} `type:"structure"` - // The bid price for each EC2 Spot Instance as defined by InstanceType. Expressed - // in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, - // BidPriceAsPercentageOfOnDemandPrice defaults to 100%. + // If specified, indicates that the instance group uses Spot Instances. This + // is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice + // to set the amount equal to the On-Demand price, or specify an amount in USD. BidPrice *string `type:"string"` // The date/time the instance group was created. @@ -11847,7 +11847,7 @@ type ModifyClusterInput struct { ClusterId *string `type:"string" required:"true"` // The number of steps that can be executed concurrently. You can specify a - // maximum of 256 steps. + // minimum of 1 step and a maximum of 256 steps. StepConcurrencyLevel *int64 `type:"integer"` } @@ -12305,6 +12305,59 @@ func (s *NotebookExecutionSummary) SetStatus(v string) *NotebookExecutionSummary return s } +// Describes the strategy for using unused Capacity Reservations for fulfilling +// On-Demand capacity. +type OnDemandCapacityReservationOptions struct { + _ struct{} `type:"structure"` + + // Indicates the instance's Capacity Reservation preferences. Possible preferences + // include: + // + // * open - The instance can run in any open Capacity Reservation that has + // matching attributes (instance type, platform, Availability Zone). + // + // * none - The instance avoids running in a Capacity Reservation even if + // one is available. The instance runs as an On-Demand Instance. + CapacityReservationPreference *string `type:"string" enum:"OnDemandCapacityReservationPreference"` + + // Indicates whether to use unused Capacity Reservations for fulfilling On-Demand + // capacity. + // + // If you specify use-capacity-reservations-first, the fleet uses unused Capacity + // Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. + // If multiple instance pools have unused Capacity Reservations, the On-Demand + // allocation strategy (lowest-price) is applied. If the number of unused Capacity + // Reservations is less than the On-Demand target capacity, the remaining On-Demand + // target capacity is launched according to the On-Demand allocation strategy + // (lowest-price). + // + // If you do not specify a value, the fleet fulfils the On-Demand capacity according + // to the chosen On-Demand allocation strategy. + UsageStrategy *string `type:"string" enum:"OnDemandCapacityReservationUsageStrategy"` +} + +// String returns the string representation +func (s OnDemandCapacityReservationOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OnDemandCapacityReservationOptions) GoString() string { + return s.String() +} + +// SetCapacityReservationPreference sets the CapacityReservationPreference field's value. +func (s *OnDemandCapacityReservationOptions) SetCapacityReservationPreference(v string) *OnDemandCapacityReservationOptions { + s.CapacityReservationPreference = &v + return s +} + +// SetUsageStrategy sets the UsageStrategy field's value. +func (s *OnDemandCapacityReservationOptions) SetUsageStrategy(v string) *OnDemandCapacityReservationOptions { + s.UsageStrategy = &v + return s +} + // The launch specification for On-Demand Instances in the instance fleet, which // determines the allocation strategy. // @@ -12314,12 +12367,16 @@ func (s *NotebookExecutionSummary) SetStatus(v string) *NotebookExecutionSummary type OnDemandProvisioningSpecification struct { _ struct{} `type:"structure"` - // Specifies the strategy to use in launching On-Demand Instance fleets. Currently, + // Specifies the strategy to use in launching On-Demand instance fleets. Currently, // the only option is lowest-price (the default), which launches the lowest // price first. // // AllocationStrategy is a required field AllocationStrategy *string `type:"string" required:"true" enum:"OnDemandProvisioningAllocationStrategy"` + + // The launch specification for On-Demand instances in the instance fleet, which + // determines the allocation strategy. + CapacityReservationOptions *OnDemandCapacityReservationOptions `type:"structure"` } // String returns the string representation @@ -12351,6 +12408,12 @@ func (s *OnDemandProvisioningSpecification) SetAllocationStrategy(v string) *OnD return s } +// SetCapacityReservationOptions sets the CapacityReservationOptions field's value. +func (s *OnDemandProvisioningSpecification) SetCapacityReservationOptions(v *OnDemandCapacityReservationOptions) *OnDemandProvisioningSpecification { + s.CapacityReservationOptions = v + return s +} + // Placement group configuration for an Amazon EMR cluster. The configuration // specifies the placement strategy that can be applied to instance roles during // cluster creation. @@ -16146,6 +16209,34 @@ func NotebookExecutionStatus_Values() []string { } } +const ( + // OnDemandCapacityReservationPreferenceOpen is a OnDemandCapacityReservationPreference enum value + OnDemandCapacityReservationPreferenceOpen = "open" + + // OnDemandCapacityReservationPreferenceNone is a OnDemandCapacityReservationPreference enum value + OnDemandCapacityReservationPreferenceNone = "none" +) + +// OnDemandCapacityReservationPreference_Values returns all elements of the OnDemandCapacityReservationPreference enum +func OnDemandCapacityReservationPreference_Values() []string { + return []string{ + OnDemandCapacityReservationPreferenceOpen, + OnDemandCapacityReservationPreferenceNone, + } +} + +const ( + // OnDemandCapacityReservationUsageStrategyUseCapacityReservationsFirst is a OnDemandCapacityReservationUsageStrategy enum value + OnDemandCapacityReservationUsageStrategyUseCapacityReservationsFirst = "use-capacity-reservations-first" +) + +// OnDemandCapacityReservationUsageStrategy_Values returns all elements of the OnDemandCapacityReservationUsageStrategy enum +func OnDemandCapacityReservationUsageStrategy_Values() []string { + return []string{ + OnDemandCapacityReservationUsageStrategyUseCapacityReservationsFirst, + } +} + const ( // OnDemandProvisioningAllocationStrategyLowestPrice is a OnDemandProvisioningAllocationStrategy enum value OnDemandProvisioningAllocationStrategyLowestPrice = "lowest-price" diff --git a/service/kinesisvideoarchivedmedia/api.go b/service/kinesisvideoarchivedmedia/api.go index aab8aabbfee..398b35baebb 100644 --- a/service/kinesisvideoarchivedmedia/api.go +++ b/service/kinesisvideoarchivedmedia/api.go @@ -117,7 +117,8 @@ func (c *KinesisVideoArchivedMedia) GetClipRequest(input *GetClipInput) (req *re // // * ClientLimitExceededException // Kinesis Video Streams has throttled the request because you have exceeded -// the limit of allowed client calls. Try making the call later. +// a limit. Try making the call later. For information about limits, see Kinesis +// Video Streams Limits (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). // // * NotAuthorizedException // Status Code: 403, The caller is not authorized to perform an operation on @@ -247,9 +248,9 @@ func (c *KinesisVideoArchivedMedia) GetDASHStreamingSessionURLRequest(input *Get // an authenticated URL (that includes an encrypted session token) for the session's // MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH). // -// Don't share or store this token where an unauthorized entity could access -// it. The token provides access to the content of the stream. Safeguard the -// token with the same measures that you would use with your AWS credentials. +// Don't share or store this token where an unauthorized entity can access it. +// The token provides access to the content of the stream. Safeguard the token +// with the same measures that you use with your AWS credentials. // // The media that is made available through the manifest consists only of the // requested stream, time range, and format. No other media data (such as frames @@ -290,20 +291,8 @@ func (c *KinesisVideoArchivedMedia) GetDASHStreamingSessionURLRequest(input *Get // this action is billable. See Pricing (https://aws.amazon.com/kinesis/video-streams/pricing/) // for details. // -// The following restrictions apply to MPEG-DASH sessions: -// -// * A streaming session URL should not be shared between players. The service -// might throttle a session if multiple media players are sharing it. For -// connection limits, see Kinesis Video Streams Limits (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). -// -// * A Kinesis video stream can have a maximum of ten active MPEG-DASH streaming -// sessions. If a new session is created when the maximum number of sessions -// is already active, the oldest (earliest created) session is closed. The -// number of active GetMedia connections on a Kinesis video stream does not -// count against this limit, and the number of active MPEG-DASH sessions -// does not count against the active GetMedia connection limit. The maximum -// limits for active HLS and MPEG-DASH streaming sessions are independent -// of each other. +// For restrictions that apply to MPEG-DASH sessions, see Kinesis Video Streams +// Limits (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). // // You can monitor the amount of data that the media player consumes by monitoring // the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information @@ -359,7 +348,8 @@ func (c *KinesisVideoArchivedMedia) GetDASHStreamingSessionURLRequest(input *Get // // * ClientLimitExceededException // Kinesis Video Streams has throttled the request because you have exceeded -// the limit of allowed client calls. Try making the call later. +// a limit. Try making the call later. For information about limits, see Kinesis +// Video Streams Limits (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). // // * NotAuthorizedException // Status Code: 403, The caller is not authorized to perform an operation on @@ -550,20 +540,9 @@ func (c *KinesisVideoArchivedMedia) GetHLSStreamingSessionURLRequest(input *GetH // to retrieve stream media. Data retrieved with this action is billable. // For more information, see Kinesis Video Streams pricing (https://aws.amazon.com/kinesis/video-streams/pricing/). // -// The following restrictions apply to HLS sessions: -// -// * A streaming session URL should not be shared between players. The service -// might throttle a session if multiple media players are sharing it. For -// connection limits, see Kinesis Video Streams Limits (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). -// -// * A Kinesis video stream can have a maximum of ten active HLS streaming -// sessions. If a new session is created when the maximum number of sessions -// is already active, the oldest (earliest created) session is closed. The -// number of active GetMedia connections on a Kinesis video stream does not -// count against this limit, and the number of active HLS sessions does not -// count against the active GetMedia connection limit. The maximum limits -// for active HLS and MPEG-DASH streaming sessions are independent of each -// other. +// A streaming session URL must not be shared between players. The service might +// throttle a session if multiple media players are sharing it. For connection +// limits, see Kinesis Video Streams Limits (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). // // You can monitor the amount of data that the media player consumes by monitoring // the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information @@ -619,7 +598,8 @@ func (c *KinesisVideoArchivedMedia) GetHLSStreamingSessionURLRequest(input *GetH // // * ClientLimitExceededException // Kinesis Video Streams has throttled the request because you have exceeded -// the limit of allowed client calls. Try making the call later. +// a limit. Try making the call later. For information about limits, see Kinesis +// Video Streams Limits (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). // // * NotAuthorizedException // Status Code: 403, The caller is not authorized to perform an operation on @@ -715,14 +695,7 @@ func (c *KinesisVideoArchivedMedia) GetMediaForFragmentListRequest(input *GetMed // the GetMediaForFragmentList requests to this endpoint using the --endpoint-url // parameter (https://docs.aws.amazon.com/cli/latest/reference/). // -// The following limits apply when using the GetMediaForFragmentList API: -// -// * A client can call GetMediaForFragmentList up to five times per second -// per stream. -// -// * Kinesis Video Streams sends media data at a rate of up to 25 megabytes -// per second (or 200 megabits per second) during a GetMediaForFragmentList -// session. +// For limits, see Kinesis Video Streams Limits (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). // // If an error is thrown after invoking a Kinesis Video Streams archived media // API, in addition to the HTTP status code and the response body, it includes @@ -767,7 +740,8 @@ func (c *KinesisVideoArchivedMedia) GetMediaForFragmentListRequest(input *GetMed // // * ClientLimitExceededException // Kinesis Video Streams has throttled the request because you have exceeded -// the limit of allowed client calls. Try making the call later. +// a limit. Try making the call later. For information about limits, see Kinesis +// Video Streams Limits (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). // // * NotAuthorizedException // Status Code: 403, The caller is not authorized to perform an operation on @@ -900,7 +874,8 @@ func (c *KinesisVideoArchivedMedia) ListFragmentsRequest(input *ListFragmentsInp // // * ClientLimitExceededException // Kinesis Video Streams has throttled the request because you have exceeded -// the limit of allowed client calls. Try making the call later. +// a limit. Try making the call later. For information about limits, see Kinesis +// Video Streams Limits (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). // // * NotAuthorizedException // Status Code: 403, The caller is not authorized to perform an operation on @@ -981,7 +956,8 @@ func (c *KinesisVideoArchivedMedia) ListFragmentsPagesWithContext(ctx aws.Contex } // Kinesis Video Streams has throttled the request because you have exceeded -// the limit of allowed client calls. Try making the call later. +// a limit. Try making the call later. For information about limits, see Kinesis +// Video Streams Limits (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). type ClientLimitExceededException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -1103,16 +1079,12 @@ func (s *ClipFragmentSelector) SetTimestampRange(v *ClipTimestampRange) *ClipFra } // The range of timestamps for which to return fragments. -// -// The values in the ClipTimestampRange are inclusive. Fragments that begin -// before the start time but continue past it, or fragments that begin before -// the end time but continue past it, are included in the session. type ClipTimestampRange struct { _ struct{} `type:"structure"` // The end of the timestamp range for the requested media. // - // This value must be within 3 hours of the specified StartTimestamp, and it + // This value must be within 24 hours of the specified StartTimestamp, and it // must be later than the StartTimestamp value. If FragmentSelectorType for // the request is SERVER_TIMESTAMP, this value must be in the past. // @@ -1125,9 +1097,10 @@ type ClipTimestampRange struct { // The starting timestamp in the range of timestamps for which to return fragments. // - // This value is inclusive. Fragments that start before the StartTimestamp and - // continue past it are included in the session. If FragmentSelectorType is - // SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head. + // Only fragments that start exactly at or after StartTimestamp are included + // in the session. Fragments that start before StartTimestamp and continue past + // it aren't included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, + // the StartTimestamp must be later than the stream head. // // StartTimestamp is a required field StartTimestamp *time.Time `type:"timestamp" required:"true"` @@ -1236,14 +1209,14 @@ func (s *DASHFragmentSelector) SetTimestampRange(v *DASHTimestampRange) *DASHFra // // This value should not be present if PlaybackType is LIVE. // -// The values in the DASHimestampRange are inclusive. Fragments that begin before -// the start time but continue past it, or fragments that begin before the end -// time but continue past it, are included in the session. +// The values in DASHimestampRange are inclusive. Fragments that start exactly +// at or after the start time are included in the session. Fragments that start +// before the start time and continue past it are not included in the session. type DASHTimestampRange struct { _ struct{} `type:"structure"` // The end of the timestamp range for the requested media. This value must be - // within 3 hours of the specified StartTimestamp, and it must be later than + // within 24 hours of the specified StartTimestamp, and it must be later than // the StartTimestamp value. // // If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must @@ -1263,9 +1236,10 @@ type DASHTimestampRange struct { // If the DASHTimestampRange value is specified, the StartTimestamp value is // required. // - // This value is inclusive. Fragments that start before the StartTimestamp and - // continue past it are included in the session. If FragmentSelectorType is - // SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head. + // Only fragments that start exactly at or after StartTimestamp are included + // in the session. Fragments that start before StartTimestamp and continue past + // it aren't included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, + // the StartTimestamp must be later than the stream head. StartTimestamp *time.Time `type:"timestamp"` } @@ -1632,9 +1606,9 @@ type GetDASHStreamingSessionURLInput struct { // // * ON_DEMAND : For sessions of this type, the MPEG-DASH manifest contains // all the fragments for the session, up to the number that is specified - // in MaxMediaPlaylistFragmentResults. The manifest must be retrieved only - // once for each session. When this type of session is played in a media - // player, the user interface typically displays a scrubber control for choosing + // in MaxManifestFragmentResults. The manifest must be retrieved only once + // for each session. When this type of session is played in a media player, + // the user interface typically displays a scrubber control for choosing // the position in the playback window to display. // // In all playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and @@ -1859,9 +1833,9 @@ type GetHLSStreamingSessionURLInput struct { // The default is 5 fragments if PlaybackMode is LIVE or LIVE_REPLAY, and 1,000 // if PlaybackMode is ON_DEMAND. // - // The maximum value of 1,000 fragments corresponds to more than 16 minutes - // of video on streams with 1-second fragments, and more than 2 1/2 hours of - // video on streams with 10-second fragments. + // The maximum value of 5,000 fragments corresponds to more than 80 minutes + // of video on streams with 1-second fragments, and more than 13 hours of video + // on streams with 10-second fragments. MaxMediaPlaylistFragmentResults *int64 `min:"1" type:"long"` // Whether to retrieve live, live replay, or archived, on-demand data. @@ -1903,7 +1877,7 @@ type GetHLSStreamingSessionURLInput struct { // // In all playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and // if there are multiple fragments with the same start timestamp, the fragment - // that has the larger fragment number (that is, the newer fragment) is included + // that has the largest fragment number (that is, the newest fragment) is included // in the HLS media playlist. The other fragments are not included. Fragments // that have different timestamps but have overlapping durations are still included // in the HLS media playlist. This can lead to unexpected behavior in the media @@ -2218,15 +2192,11 @@ func (s *HLSFragmentSelector) SetTimestampRange(v *HLSTimestampRange) *HLSFragme // The start and end of the timestamp range for the requested media. // // This value should not be present if PlaybackType is LIVE. -// -// The values in the HLSTimestampRange are inclusive. Fragments that begin before -// the start time but continue past it, or fragments that begin before the end -// time but continue past it, are included in the session. type HLSTimestampRange struct { _ struct{} `type:"structure"` // The end of the timestamp range for the requested media. This value must be - // within 3 hours of the specified StartTimestamp, and it must be later than + // within 24 hours of the specified StartTimestamp, and it must be later than // the StartTimestamp value. // // If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must @@ -2246,9 +2216,10 @@ type HLSTimestampRange struct { // If the HLSTimestampRange value is specified, the StartTimestamp value is // required. // - // This value is inclusive. Fragments that start before the StartTimestamp and - // continue past it are included in the session. If FragmentSelectorType is - // SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head. + // Only fragments that start exactly at or after StartTimestamp are included + // in the session. Fragments that start before StartTimestamp and continue past + // it aren't included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, + // the StartTimestamp must be later than the stream head. StartTimestamp *time.Time `type:"timestamp"` } diff --git a/service/kinesisvideoarchivedmedia/errors.go b/service/kinesisvideoarchivedmedia/errors.go index 90c1ff89156..ab84634ac39 100644 --- a/service/kinesisvideoarchivedmedia/errors.go +++ b/service/kinesisvideoarchivedmedia/errors.go @@ -12,7 +12,8 @@ const ( // "ClientLimitExceededException". // // Kinesis Video Streams has throttled the request because you have exceeded - // the limit of allowed client calls. Try making the call later. + // a limit. Try making the call later. For information about limits, see Kinesis + // Video Streams Limits (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). ErrCodeClientLimitExceededException = "ClientLimitExceededException" // ErrCodeInvalidArgumentException for service response error code diff --git a/service/lambda/api.go b/service/lambda/api.go index 9b2c8b5653c..44dc043ab69 100644 --- a/service/lambda/api.go +++ b/service/lambda/api.go @@ -3710,8 +3710,12 @@ func (c *Lambda) ListFunctionsRequest(input *ListFunctionsInput) (req *request.R // of each. Lambda returns up to 50 functions per call. // // Set FunctionVersion to ALL to include all published versions of each function -// in addition to the unpublished version. To get more information about a function -// or version, use GetFunction. +// in addition to the unpublished version. +// +// The ListFunctions action returns a subset of the FunctionConfiguration fields. +// To get the additional fields (State, StateReasonCode, StateReason, LastUpdateStatus, +// LastUpdateStatusReason, LastUpdateStatusReasonCode) for a function or version, +// use GetFunction. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13030,7 +13034,9 @@ type ListFunctionsInput struct { // you must set FunctionVersion to ALL. MasterRegion *string `location:"querystring" locationName:"MasterRegion" type:"string"` - // The maximum number of functions to return. + // The maximum number of functions to return in the response. Note that ListFunctions + // returns a maximum of 50 items in each response, even if you set the number + // higher. MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` } diff --git a/service/s3/api.go b/service/s3/api.go index cc1f3dbf52e..8182416c3e9 100644 --- a/service/s3/api.go +++ b/service/s3/api.go @@ -67,7 +67,7 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // AbortMultipartUpload API operation for Amazon Simple Storage Service. // -// This operation aborts a multipart upload. After a multipart upload is aborted, +// This action aborts a multipart upload. After a multipart upload is aborted, // no additional parts can be uploaded using that upload ID. The storage consumed // by any previously uploaded parts will be freed. However, if any part uploads // are currently in progress, those part uploads might or might not succeed. @@ -76,10 +76,10 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // // To verify that all parts have been removed, so you don't get charged for // the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// operation and ensure that the parts list is empty. +// action and ensure that the parts list is empty. // -// For information about permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// For information about permissions required to use the multipart upload, see +// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to AbortMultipartUpload: // @@ -175,10 +175,10 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // You first initiate the multipart upload and then upload all parts using the // UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // operation. After successfully uploading all relevant parts of an upload, -// you call this operation to complete the upload. Upon receiving this request, +// you call this action to complete the upload. Upon receiving this request, // Amazon S3 concatenates all the parts in ascending order by part number to // create a new object. In the Complete Multipart Upload request, you must provide -// the parts list. You must ensure that the parts list is complete. This operation +// the parts list. You must ensure that the parts list is complete. This action // concatenates the parts that you provide in the list. For each part in the // list, you must provide the part number and the ETag value, returned after // that part was uploaded. @@ -199,7 +199,7 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // // For information about permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // CompleteMultipartUpload has the following special errors: // @@ -306,10 +306,10 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // Creates a copy of an object that is already stored in Amazon S3. // // You can store individual objects of up to 5 TB in Amazon S3. You create a -// copy of your object up to 5 GB in size in a single atomic operation using -// this API. However, to copy an object greater than 5 GB, you must use the -// multipart upload Upload Part - Copy API. For more information, see Copy Object -// Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// copy of your object up to 5 GB in size in a single atomic action using this +// API. However, to copy an object greater than 5 GB, you must use the multipart +// upload Upload Part - Copy API. For more information, see Copy Object Using +// the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). // // All copy requests must be authenticated. Additionally, you must have read // access to the source object and write access to the destination bucket. For @@ -319,7 +319,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // A copy request might return an error when Amazon S3 receives the copy request // or while Amazon S3 is copying the files. If the error occurs before the copy -// operation starts, you receive a standard Amazon S3 error. If the error occurs +// action starts, you receive a standard Amazon S3 error. If the error occurs // during the copy operation, the error response is embedded in the 200 OK response. // This means that a 200 OK response can contain either a success or an error. // Design your application to parse the contents of the response and handle @@ -334,7 +334,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // The copy request charge is based on the storage class and Region that you // specify for the destination object. For pricing information, see Amazon S3 -// pricing (https://aws.amazon.com/s3/pricing/). +// pricing (http://aws.amazon.com/s3/pricing/). // // Amazon S3 transfer acceleration does not support cross-Region copies. If // you request a cross-Region copy using a transfer acceleration endpoint, you @@ -404,7 +404,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the // object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Access Control List (ACL)-Specific Request Headers // @@ -418,7 +418,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // Storage Class Options // -// You can use the CopyObject operation to change the storage class of an object +// You can use the CopyObject action to change the storage class of an object // that is already stored in Amazon S3 using the StorageClass parameter. For // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 Service Developer Guide. @@ -459,8 +459,8 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // Returned Error Codes: // * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" -// The source object of the COPY operation is not in the active tier and is -// only stored in Amazon S3 Glacier. +// The source object of the COPY action is not in the active tier and is only +// stored in Amazon S3 Glacier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { @@ -678,10 +678,10 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // CreateMultipartUpload API operation for Amazon Simple Storage Service. // -// This operation initiates a multipart upload and returns an upload ID. This -// upload ID is used to associate all of the parts in the specific multipart -// upload. You specify this upload ID in each of your subsequent upload part -// requests (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). +// This action initiates a multipart upload and returns an upload ID. This upload +// ID is used to associate all of the parts in the specific multipart upload. +// You specify this upload ID in each of your subsequent upload part requests +// (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). // You also include this upload ID in the final request to either complete or // abort the multipart upload request. // @@ -691,12 +691,12 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // If you have configured a lifecycle rule to abort incomplete multipart uploads, // the upload must complete within the number of days specified in the bucket // lifecycle configuration. Otherwise, the incomplete multipart upload becomes -// eligible for an abort operation and Amazon S3 aborts the multipart upload. -// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// eligible for an abort action and Amazon S3 aborts the multipart upload. For +// more information, see Aborting Incomplete Multipart Uploads Using a Bucket // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). // // For information about the permissions required to use the multipart upload -// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // For request signing, multipart upload is just a series of regular requests. // You initiate a multipart upload, send one or more requests to upload parts, @@ -716,7 +716,7 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // and decrypts it when you access it. You can provide your own encryption key, // or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or // Amazon S3-managed encryption keys. If you choose to provide your own encryption -// key, the request headers you provide in UploadPart (AmazonS3/latest/API/API_UploadPart.html) +// key, the request headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // requests must match the headers you used in the request to initiate the upload // by using CreateMultipartUpload. @@ -1083,7 +1083,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // permission to others. // // For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Related Resources: // @@ -1164,17 +1164,17 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( // DeleteBucketEncryption API operation for Amazon Simple Storage Service. // -// This implementation of the DELETE operation removes default encryption from +// This implementation of the DELETE action removes default encryption from // the bucket. For information about the Amazon S3 default encryption feature, // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, // see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Related Resources // @@ -1725,9 +1725,9 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // DeleteBucketPolicy API operation for Amazon Simple Storage Service. // -// This implementation of the DELETE operation uses the policy subresource to -// delete the policy of a specified bucket. If you are using an identity other -// than the root user of the AWS account that owns the bucket, the calling identity +// This implementation of the DELETE action uses the policy subresource to delete +// the policy of a specified bucket. If you are using an identity other than +// the root user of the AWS account that owns the bucket, the calling identity // must have the DeleteBucketPolicy permissions on the specified bucket and // belong to the bucket owner's account to use this operation. // @@ -2000,15 +2000,15 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // DeleteBucketWebsite API operation for Amazon Simple Storage Service. // -// This operation removes the website configuration for a bucket. Amazon S3 -// returns a 200 OK response upon successfully deleting a website configuration -// on the specified bucket. You will get a 200 OK response if the website configuration +// This action removes the website configuration for a bucket. Amazon S3 returns +// a 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration // you are trying to delete does not exist on the bucket. Amazon S3 returns // a 404 response if the bucket specified in the request does not exist. // -// This DELETE operation requires the S3:DeleteBucketWebsite permission. By -// default, only the bucket owner can delete the website configuration attached -// to a bucket. However, bucket owners can grant other users permission to delete +// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, +// only the bucket owner can delete the website configuration attached to a +// bucket. However, bucket owners can grant other users permission to delete // the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite // permission. // @@ -2110,14 +2110,14 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). // To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). // -// You can delete objects by explicitly calling the DELETE Object API or configure -// its lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// You can delete objects by explicitly calling DELETE Object or configure its +// lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) // to enable Amazon S3 to remove them for you. If you want to block users or // accounts from removing or deleting objects from your bucket, you must deny // them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration // actions. // -// The following operation is related to DeleteObject: +// The following action is related to DeleteObject: // // * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // @@ -2285,27 +2285,27 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // DeleteObjects API operation for Amazon Simple Storage Service. // -// This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. If you know the object keys that you want to delete, -// then this operation provides a suitable alternative to sending individual -// delete requests, reducing per-request overhead. +// This action enables you to delete multiple objects from a bucket using a +// single HTTP request. If you know the object keys that you want to delete, +// then this action provides a suitable alternative to sending individual delete +// requests, reducing per-request overhead. // // The request contains a list of up to 1000 keys that you want to delete. In // the XML, you provide the object key names, and optionally, version IDs if // you want to delete a specific version of the object from a versioning-enabled -// bucket. For each key, Amazon S3 performs a delete operation and returns the +// bucket. For each key, Amazon S3 performs a delete action and returns the // result of that delete, success, or failure, in the response. Note that if // the object specified in the request is not found, Amazon S3 returns the result // as deleted. // -// The operation supports two modes for the response: verbose and quiet. By -// default, the operation uses verbose mode in which the response includes the -// result of deletion of each key in your request. In quiet mode the response -// includes only keys where the delete operation encountered an error. For a -// successful deletion, the operation does not return any information about -// the delete in the response body. +// The action supports two modes for the response: verbose and quiet. By default, +// the action uses verbose mode in which the response includes the result of +// deletion of each key in your request. In quiet mode the response includes +// only keys where the delete action encountered an error. For a successful +// deletion, the action does not return any information about the delete in +// the response body. // -// When performing this operation on an MFA Delete enabled bucket, that attempts +// When performing this action on an MFA Delete enabled bucket, that attempts // to delete any versioned objects, you must include an MFA token. If you do // not provide one, the entire request will fail, even if there are non-versioned // objects you are trying to delete. If you provide an invalid token, whether @@ -2489,8 +2489,8 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // -// This implementation of the GET operation uses the accelerate subresource -// to return the Transfer Acceleration state of a bucket, which is either Enabled +// This implementation of the GET action uses the accelerate subresource to +// return the Transfer Acceleration state of a bucket, which is either Enabled // or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that // enables you to perform faster data transfers to and from Amazon S3. // @@ -2499,7 +2499,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // can grant this permission to others. For more information about permissions, // see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // You set the Transfer Acceleration state of an existing bucket to Enabled // or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) @@ -2511,7 +2511,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // // For more information about transfer acceleration, see Transfer Acceleration // (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Related Resources // @@ -2589,7 +2589,7 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // GetBucketAcl API operation for Amazon Simple Storage Service. // -// This implementation of the GET operation uses the acl subresource to return +// This implementation of the GET action uses the acl subresource to return // the access control list (ACL) of a bucket. To use GET to return the ACL of // the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission // is granted to the anonymous user, you can return the ACL of the bucket without @@ -2671,7 +2671,7 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // -// This implementation of the GET operation returns an analytics configuration +// This implementation of the GET action returns an analytics configuration // (identified by the analytics configuration ID) from the bucket. // // To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration @@ -2679,11 +2679,11 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // can grant this permission to others. For more information about permissions, // see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // For information about Amazon S3 analytics feature, see Amazon S3 Analytics // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Related Resources // @@ -3250,8 +3250,8 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // Accordingly, this section describes the latest API. The response describes // the new filter element that you can use to specify a filter to select a subset // of objects to which the rule applies. If you are using a previous version -// of the lifecycle configuration, it still works. For the earlier API description, -// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// of the lifecycle configuration, it still works. For the earlier action, see +// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). // // Returns the lifecycle configuration information set on the bucket. For information // about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). @@ -3692,8 +3692,8 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // Returns the notification configuration of a bucket. // -// If notifications are not enabled on the bucket, the operation returns an -// empty NotificationConfiguration element. +// If notifications are not enabled on the bucket, the action returns an empty +// NotificationConfiguration element. // // By default, you must be the bucket owner to read the notification configuration // of a bucket. However, the bucket owner can use a bucket policy to grant permission @@ -3704,7 +3704,7 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). // For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). // -// The following operation is related to GetBucketNotification: +// The following action is related to GetBucketNotification: // // * PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) // @@ -3882,7 +3882,7 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // For more information about bucket policies, see Using Bucket Policies and // User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). // -// The following operation is related to GetBucketPolicy: +// The following action is related to GetBucketPolicy: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -4055,11 +4055,11 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // can return a wrong result. // // For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // -// This operation requires permissions for the s3:GetReplicationConfiguration -// action. For more information about permissions, see Using Bucket Policies -// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// This action requires permissions for the s3:GetReplicationConfiguration action. +// For more information about permissions, see Using Bucket Policies and User +// Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). // // If you include the Filter element in a replication configuration, you must // also include the DeleteMarkerReplication and Priority elements. The response @@ -4408,7 +4408,7 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // For more information about hosting websites, see Hosting Websites on Amazon // S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). // -// This GET operation requires the S3:GetBucketWebsite permission. By default, +// This GET action requires the S3:GetBucketWebsite permission. By default, // only the bucket owner can read the bucket website configuration. However, // bucket owners can allow other users to read the website configuration by // writing a bucket policy granting them the S3:GetBucketWebsite permission. @@ -4518,7 +4518,7 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering // Deep Archive tiers, before you can retrieve the object you must first restore // a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). -// Otherwise, this operation returns an InvalidObjectStateError error. For information +// Otherwise, this action returns an InvalidObjectStateError error. For information // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). // // Encryption request headers, like x-amz-server-side-encryption, should not @@ -4561,8 +4561,8 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // // Versioning // -// By default, the GET operation returns the current version of an object. To -// return a different version, use the versionId subresource. +// By default, the GET action returns the current version of an object. To return +// a different version, use the versionId subresource. // // If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the @@ -5029,7 +5029,7 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // subresource associated with the object. // // To use this operation, you must have permission to perform the s3:GetObjectTagging -// action. By default, the GET operation returns information about current version +// action. By default, the GET action returns information about current version // of an object. For a versioned bucket, you can have multiple versions of an // object in your bucket. To retrieve tags of any other version, use the versionId // query parameter. You also need permission for the s3:GetObjectVersionTagging @@ -5041,7 +5041,7 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // For information about the Amazon S3 object tagging feature, see Object Tagging // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). // -// The following operation is related to GetObjectTagging: +// The following action is related to GetObjectTagging: // // * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) // @@ -5131,7 +5131,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // // This action is not supported by Amazon S3 on Outposts. // -// The following operation is related to GetObjectTorrent: +// The following action is related to GetObjectTorrent: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -5305,9 +5305,9 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // HeadBucket API operation for Amazon Simple Storage Service. // -// This operation is useful to determine if a bucket exists and you have permission -// to access it. The operation returns a 200 OK if the bucket exists and you -// have permission to access it. +// This action is useful to determine if a bucket exists and you have permission +// to access it. The action returns a 200 OK if the bucket exists and you have +// permission to access it. // // If the bucket does not exist or you do not have permission to access it, // the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A @@ -5397,15 +5397,15 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // HeadObject API operation for Amazon Simple Storage Service. // -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're only interested in an object's -// metadata. To use HEAD, you must have READ access to the object. +// The HEAD action retrieves metadata from an object without returning the object +// itself. This action is useful if you're only interested in an object's metadata. +// To use HEAD, you must have READ access to the object. // -// A HEAD request has the same options as a GET operation on an object. The -// response is identical to the GET response except that there is no response -// body. Because of this, if the HEAD request generates an error, it returns -// a generic 404 Not Found or 403 Forbidden code. It is not possible to retrieve -// the exact exception beyond these error codes. +// A HEAD request has the same options as a GET action on an object. The response +// is identical to the GET response except that there is no response body. Because +// of this, if the HEAD request generates an error, it returns a generic 404 +// Not Found or 403 Forbidden code. It is not possible to retrieve the exact +// exception beyond these error codes. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when @@ -5459,7 +5459,7 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // * If you don’t have the s3:ListBucket permission, Amazon S3 returns // an HTTP status code 403 ("access denied") error. // -// The following operation is related to HeadObject: +// The following action is related to HeadObject: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -5541,13 +5541,13 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // Lists the analytics configurations for the bucket. You can have up to 1,000 // analytics configurations per bucket. // -// This operation supports list pagination and does not return more than 100 -// configurations at a time. You should always check the IsTruncated element -// in the response. If there are no more configurations to list, IsTruncated -// is set to false. If there are more configurations to list, IsTruncated is -// set to true, and there will be a value in NextContinuationToken. You use -// the NextContinuationToken value to continue the pagination of the list by -// passing the value in continuation-token in the request to GET the next page. +// This action supports list pagination and does not return more than 100 configurations +// at a time. You should always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there will be a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. // // To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner @@ -5740,12 +5740,12 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // Returns a list of inventory configurations for the bucket. You can have up // to 1,000 analytics configurations per bucket. // -// This operation supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. -// If there are more configurations to list, IsTruncated is set to true, and -// there is a value in NextContinuationToken. You use the NextContinuationToken -// value to continue the pagination of the list by passing the value in continuation-token +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token // in the request to GET the next page. // // To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration @@ -5841,12 +5841,12 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // are only for the request metrics of the bucket and do not provide information // on daily storage metrics. You can have up to 1,000 configurations per bucket. // -// This operation supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. -// If there are more configurations to list, IsTruncated is set to true, and -// there is a value in NextContinuationToken. You use the NextContinuationToken -// value to continue the pagination of the list by passing the value in continuation-token +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token // in the request to GET the next page. // // To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration @@ -6018,11 +6018,11 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // ListMultipartUploads API operation for Amazon Simple Storage Service. // -// This operation lists in-progress multipart uploads. An in-progress multipart +// This action lists in-progress multipart uploads. An in-progress multipart // upload is a multipart upload that has been initiated using the Initiate Multipart // Upload request, but has not yet been completed or aborted. // -// This operation returns at most 1,000 multipart uploads in the response. 1,000 +// This action returns at most 1,000 multipart uploads in the response. 1,000 // multipart uploads is the maximum number of uploads a response can include, // which is also the default value. You can further limit the number of uploads // in a response by specifying the max-uploads parameter in the response. If @@ -6039,7 +6039,7 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // // For information on permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to ListMultipartUploads: // @@ -6340,8 +6340,8 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // to design your application to parse the contents of the response and handle // it appropriately. // -// This API has been revised. We recommend that you use the newer version, ListObjectsV2 -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), +// This action has been revised. We recommend that you use the newer version, +// ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), // when developing applications. For backward compatibility, Amazon S3 continues // to support ListObjects. // @@ -6501,14 +6501,14 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // // To use this operation, you must have READ access to the bucket. // -// To use this operation in an AWS Identity and Access Management (IAM) policy, +// To use this action in an AWS Identity and Access Management (IAM) policy, // you must have permissions to perform the s3:ListBucket action. The bucket // owner has this permission by default and can grant this permission to others. // For more information about permissions, see Permissions Related to Bucket // Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // -// This section describes the latest revision of the API. We recommend that +// This section describes the latest revision of this action. We recommend that // you use this revised API for application development. For backward compatibility, // Amazon S3 continues to support the prior version of this API, ListObjects // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). @@ -6673,7 +6673,7 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // // For information on permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to ListParts: // @@ -6830,7 +6830,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // * Suspended – Disables accelerated data transfers to the bucket. // // The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// operation returns the transfer acceleration state of a bucket. +// action returns the transfer acceleration state of a bucket. // // After setting the Transfer Acceleration state of a bucket to Enabled, it // might take up to thirty minutes before the data transfer rates to the bucket @@ -7242,7 +7242,7 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // // For more information about CORS, go to Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon -// Simple Storage Service Developer Guide. +// Simple Storage Service User Guide. // // Related Resources // @@ -7329,7 +7329,7 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // PutBucketEncryption API operation for Amazon Simple Storage Service. // -// This operation uses the encryption subresource to configure default encryption +// This action uses the encryption subresource to configure default encryption // and Amazon S3 Bucket Key for an existing bucket. // // Default encryption for a bucket can use server-side encryption with Amazon @@ -7337,19 +7337,19 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // specify default encryption using SSE-KMS, you can also configure Amazon S3 // Bucket Key. For information about default encryption, see Amazon S3 default // bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. For more information -// about S3 Bucket Keys, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. For more information about +// S3 Bucket Keys, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon Simple Storage Service User Guide. // -// This operation requires AWS Signature Version 4. For more information, see -// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). +// This action requires AWS Signature Version 4. For more information, see Authenticating +// Requests (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). // // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, // see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Related Resources // @@ -7547,9 +7547,9 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // -// This implementation of the PUT operation adds an inventory configuration -// (identified by the inventory ID) to the bucket. You can have up to 1,000 -// inventory configurations per bucket. +// This implementation of the PUT action adds an inventory configuration (identified +// by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations +// per bucket. // // Amazon S3 inventory generates inventories of the objects in the bucket on // a daily or weekly basis, and the results are published to a flat file. The @@ -7562,7 +7562,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // the inventory daily or weekly. You can also configure what object metadata // to include and whether to inventory all object versions or only current versions. // For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // You must create a bucket policy on the destination bucket to grant permissions // to Amazon S3 to write objects to the bucket in the defined location. For @@ -7574,7 +7574,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // permission to others. For more information about permissions, see Permissions // Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Special Errors // @@ -7686,7 +7686,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // Creates a new lifecycle configuration for the bucket or replaces an existing // lifecycle configuration. For information about lifecycle configuration, see // Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // By default, all Amazon S3 resources, including buckets, objects, and related // subresources (for example, lifecycle configuration and website configuration) @@ -7708,7 +7708,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // // For more information about permissions, see Managing Access Permissions to // your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // For more examples of transitioning objects to storage classes such as STANDARD_IA // or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). @@ -7725,7 +7725,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // the AWS account that created the bucket—can perform any of the operations. // A resource owner can also grant others permission to perform the operation. // For more information, see the following topics in the Amazon Simple Storage -// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Service User Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) // Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -8277,8 +8277,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // // // -// This operation replaces the existing notification configuration with the -// configuration you include in the request body. +// This action replaces the existing notification configuration with the configuration +// you include in the request body. // // After Amazon S3 receives this request, it first verifies that any Amazon // Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon @@ -8298,8 +8298,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // The PUT notification is an atomic operation. For example, suppose your notification // configuration includes SNS topic, SQS queue, and Lambda function configurations. // When you send a PUT request with this configuration, Amazon S3 sends test -// messages to your SNS topic. If the message fails, the entire PUT operation -// will fail, and Amazon S3 will not add the configuration to your bucket. +// messages to your SNS topic. If the message fails, the entire PUT action will +// fail, and Amazon S3 will not add the configuration to your bucket. // // Responses // @@ -8308,7 +8308,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // will also include the x-amz-sns-test-message-id header containing the message // ID of the test notification sent to the topic. // -// The following operation is related to PutBucketNotificationConfiguration: +// The following action is related to PutBucketNotificationConfiguration: // // * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // @@ -8584,8 +8584,8 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. // -// To perform this operation, the user or role performing the operation must -// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// To perform this operation, the user or role performing the action must have +// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) // permission. // // Specify the replication configuration in the request body. In the replication @@ -8833,7 +8833,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // match the schema. // // * Error code: OperationAbortedError Description: A conflicting conditional -// operation is currently in progress against this resource. Please try again. +// action is currently in progress against this resource. Please try again. // // * Error code: InternalError Description: The service was unable to apply // the provided tag to the bucket. @@ -9040,7 +9040,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // document and any redirect rules. For more information, see Hosting Websites // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). // -// This PUT operation requires the S3:PutBucketWebsite permission. By default, +// This PUT action requires the S3:PutBucketWebsite permission. By default, // only the bucket owner can configure the website attached to a bucket; however, // bucket owners can allow other users to set the website configuration by writing // a bucket policy that grants them the S3:PutBucketWebsite permission. @@ -9099,7 +9099,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // Amazon S3 has a limitation of 50 routing rules per website configuration. // If you require more than 50 routing rules, you can use object redirect. For // more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9206,7 +9206,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // If you request server-side encryption using AWS Key Management Service (SSE-KMS), // you can enable an S3 Bucket Key at the object-level. For more information, // see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Access Control List (ACL)-Specific Request Headers // @@ -9325,7 +9325,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // for a new or existing object in an S3 bucket. You must have WRITE_ACP permission // to set the ACL of an object. For more information, see What permissions can // I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // This action is not supported by Amazon S3 on Outposts. // @@ -9773,7 +9773,7 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // // * Code: MalformedXMLError Cause: The XML provided does not match the schema. // -// * Code: OperationAbortedError Cause: A conflicting conditional operation +// * Code: OperationAbortedError Cause: A conflicting conditional action // is currently in progress against this resource. Please try again. // // * Code: InternalError Cause: The service was unable to apply the provided @@ -9974,7 +9974,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // permission to others. For more information about permissions, see Permissions // Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Querying Archives with Select Requests // @@ -9984,7 +9984,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // queries and custom analytics on your archived data without having to restore // your data to a hotter Amazon S3 tier. For an overview about select requests, // see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // When making a select request, do the following: // @@ -9995,13 +9995,13 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // the storage class and encryption for the output objects stored in the // bucket. For more information about output, see Querying Archived Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon Simple Storage Service Developer Guide. For more information +// in the Amazon Simple Storage Service User Guide. For more information // about the S3 structure in the request body, see the following: PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) Managing // Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// in the Amazon Simple Storage Service Developer Guide Protecting Data Using +// in the Amazon Simple Storage Service User Guide Protecting Data Using // Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon Simple Storage Service Developer Guide +// in the Amazon Simple Storage Service User Guide // // * Define the SQL expression for the SELECT type of restoration for your // query in the request body's SelectParameters structure. You can use expressions @@ -10017,7 +10017,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // For more information about using SQL with S3 Glacier Select restore, see // SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // When making a select request, you can also do the following: // @@ -10088,19 +10088,19 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // For more information about archive retrieval options and provisioned capacity // for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // You can use Amazon S3 restore speed upgrade to change the restore speed to // a faster speed while it is in progress. For more information, see Upgrading // the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // To get the status of object restoration, you can send a HEAD request. Operations // return the x-amz-restore header, which provides information about the restoration // status, in the response. You can use Amazon S3 event notifications to notify // you when a restore is initiated or completed. For more information, see Configuring // Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // After restoring an archived object, you can update the restoration period // by reissuing the request with a new period. Amazon S3 updates the restoration @@ -10115,11 +10115,11 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // the object in 3 days. For more information about lifecycle configuration, // see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) // and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in Amazon Simple Storage Service Developer Guide. +// in Amazon Simple Storage Service User Guide. // // Responses // -// A successful operation returns either the 200 OK or 202 Accepted status code. +// A successful action returns either the 200 OK or 202 Accepted status code. // // * If the object is not previously restored, then Amazon S3 returns 202 // Accepted in the response. @@ -10146,7 +10146,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // // * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide +// in the Amazon Simple Storage Service User Guide // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10157,7 +10157,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Returned Error Codes: // * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier. +// This action is not allowed against this storage tier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { @@ -10234,7 +10234,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // SelectObjectContent API operation for Amazon Simple Storage Service. // -// This operation filters the contents of an Amazon S3 object based on a simple +// This action filters the contents of an Amazon S3 object based on a simple // structured query language (SQL) statement. In the request, along with the // SQL expression, you must also specify a data serialization format (JSON, // CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse @@ -10246,18 +10246,18 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // // For more information about Amazon S3 Select, see Selecting Content from Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // For more information about using SQL with Amazon S3 Select, see SQL Reference // for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Permissions // // You must have s3:GetObject permission for this operation. Amazon S3 Select // does not support anonymous access. For more information about permissions, // see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Object Data Formats // @@ -10280,13 +10280,13 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon Simple Storage Service Developer Guide. For objects that -// are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer +// in the Amazon Simple Storage Service User Guide. For objects that are +// encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer // master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side // encryption is handled transparently, so you don't need to specify anything. // For more information about server-side encryption, including SSE-S3 and // SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Working with the Response Body // @@ -10297,8 +10297,8 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // // GetObject Support // -// The SelectObjectContent operation does not support the following GetObject -// functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// The SelectObjectContent action does not support the following GetObject functionality. +// For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). // // * Range: Although you can specify a scan range for an Amazon S3 Select // request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) @@ -10308,7 +10308,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot // specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. // For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Special Errors // @@ -10601,11 +10601,11 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // For more information on multipart uploads, go to Multipart Upload Overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the -// Amazon Simple Storage Service Developer Guide . +// Amazon Simple Storage Service User Guide . // // For information on the permissions required to use the multipart upload API, -// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon Simple Storage Service Developer Guide. +// go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service User Guide. // // You can optionally request server-side encryption where Amazon S3 encrypts // your data as it writes it to disks in its data centers and decrypts it for @@ -10615,7 +10615,7 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // match the headers you used in the request to initiate the upload by using // CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). // For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Server-side encryption is supported by the S3 Multipart Upload actions. Unless // you are using a customer-provided encryption key, you don't need to specify @@ -10731,10 +10731,10 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // The minimum allowable part size for a multipart upload is 5 MB. For more // information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Instead of using an existing object as part data, you might use the UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action // and provide data in your request. // // You must initiate a multipart upload before you can upload any part. In response @@ -10745,15 +10745,15 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // * For conceptual information about multipart uploads, see Uploading Objects // Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // * For information about permissions required to use the multipart upload -// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon Simple Storage Service Developer Guide. +// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service User Guide. // -// * For information about copying objects using a single atomic operation -// vs. the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon Simple Storage Service Developer Guide. +// * For information about copying objects using a single atomic action vs. +// the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon Simple Storage Service User Guide. // // * For information about using server-side encryption with customer-provided // encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) @@ -10876,17 +10876,17 @@ type AbortMultipartUploadInput struct { // The bucket name to which the upload was taking place. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -12161,17 +12161,17 @@ type CompleteMultipartUploadOutput struct { // The name of the bucket that contains the newly created object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -12447,17 +12447,17 @@ type CopyObjectInput struct { // The name of the destination bucket. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -12470,8 +12470,8 @@ type CopyObjectInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with a COPY operation doesn’t affect bucket-level - // settings for S3 Bucket Key. + // Specifying this header with a COPY action doesn’t affect bucket-level settings + // for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Specifies caching behavior along the request/reply chain. @@ -13364,17 +13364,17 @@ type CreateMultipartUploadInput struct { // The name of the bucket to which to initiate the upload // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -13387,7 +13387,7 @@ type CreateMultipartUploadInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with an object operation doesn’t affect bucket-level + // Specifying this header with an object action doesn’t affect bucket-level // settings for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` @@ -13485,7 +13485,7 @@ type CreateMultipartUploadInput struct { // object encryption. All GET and PUT requests for an object protected by AWS // KMS will fail if not made via SSL or using SigV4. For information about configuring // using any of the officially supported AWS SDKs and AWS CLI, see Specifying - // the Signature Version in Request Authentication (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 Developer Guide. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` @@ -13778,17 +13778,17 @@ type CreateMultipartUploadOutput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -15476,17 +15476,17 @@ type DeleteObjectInput struct { // The bucket name of the bucket containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -15682,17 +15682,17 @@ type DeleteObjectTaggingInput struct { // The bucket name containing the objects from which to remove the tags. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -15833,17 +15833,17 @@ type DeleteObjectsInput struct { // The bucket name containing the objects to delete. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -15991,7 +15991,7 @@ type DeleteObjectsOutput struct { // was successfully deleted. Deleted []*DeletedObject `type:"list" flattened:"true"` - // Container for a failed delete operation that describes the object that Amazon + // Container for a failed delete action that describes the object that Amazon // S3 attempted to delete and the error it encountered. Errors []*Error `locationName:"Error" type:"list" flattened:"true"` @@ -16462,9 +16462,9 @@ type Error struct { // Forbidden SOAP Fault Code Prefix: Client // // * Code: AccountProblem Description: There is a problem with your AWS account - // that prevents the operation from completing successfully. Contact AWS - // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault - // Code Prefix: Client + // that prevents the action from completing successfully. Contact AWS Support + // for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault Code + // Prefix: Client // // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource // has been disabled. Contact AWS Support for further assistance. HTTP Status @@ -16567,9 +16567,9 @@ type Error struct { // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client // - // * Code: InvalidObjectState Description: The operation is not valid for - // the current state of the object. HTTP Status Code: 403 Forbidden SOAP - // Fault Code Prefix: Client + // * Code: InvalidObjectState Description: The action is not valid for the + // current state of the object. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client // // * Code: InvalidPart Description: One or more of the specified parts could // not be found. The part might not have been uploaded, or the specified @@ -16734,7 +16734,7 @@ type Error struct { // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status // Code: 403 Forbidden SOAP Fault Code Prefix: Client // - // * Code: OperationAborted Description: A conflicting conditional operation + // * Code: OperationAborted Description: A conflicting conditional action // is currently in progress against this resource. Try again. HTTP Status // Code: 409 Conflict SOAP Fault Code Prefix: Client // @@ -19353,11 +19353,11 @@ type GetObjectAclInput struct { // The bucket name that contains the object for which to get the ACL information. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -19527,17 +19527,17 @@ type GetObjectInput struct { // The bucket name containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -19827,11 +19827,11 @@ type GetObjectLegalHoldInput struct { // The bucket name containing the object whose Legal Hold status you want to // retrieve. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -19982,11 +19982,11 @@ type GetObjectLockConfigurationInput struct { // The bucket whose Object Lock configuration you want to retrieve. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -20183,7 +20183,7 @@ type GetObjectOutput struct { // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Provides information about object restoration operation and expiration time + // Provides information about object restoration action and expiration time // of the restored object copy. Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` @@ -20430,11 +20430,11 @@ type GetObjectRetentionInput struct { // The bucket name containing the object whose retention settings you want to // retrieve. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -20585,17 +20585,17 @@ type GetObjectTaggingInput struct { // The bucket name containing the object for which to get the tagging information. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -21205,17 +21205,17 @@ type HeadBucketInput struct { // The bucket name. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -21320,17 +21320,17 @@ type HeadObjectInput struct { // The name of the bucket containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -23849,17 +23849,17 @@ type ListMultipartUploadsInput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -24206,7 +24206,7 @@ type ListObjectVersionsInput struct { KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. If additional keys satisfy the search criteria, // but were not returned because max-keys was exceeded, the response contains //