diff --git a/.changes/2.16.14.json b/.changes/2.16.14.json new file mode 100644 index 000000000000..7291a0826738 --- /dev/null +++ b/.changes/2.16.14.json @@ -0,0 +1,42 @@ +{ + "version": "2.16.14", + "date": "2021-03-08", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Amazon S3 Documentation updates" + }, + { + "type": "feature", + "category": "AWS S3 Control", + "contributor": "", + "description": "Documentation updates for Amazon S3" + }, + { + "type": "feature", + "category": "Amazon Kinesis Video Streams Archived Media", + "contributor": "", + "description": "Increase the maximum HLS and MPEG-DASH manifest size from 1,000 to 5,000 fragments." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Documentation updates for Lambda. Constraint updates to AddLayerVersionPermission's Action and OrganizationId parameters, and AddPermission's Principal and SourceAccount parameters." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Documentation updates for autoscaling for capacity-optimized-prioritized SpotAllocationStrategy" + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "contributor": "", + "description": "Amazon EMR customers can now specify how EC2 On-Demand Capacity Reservations are used in their EMR clusters with instance fleets using allocation strategy." + } + ] +} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index b71e32bd86ba..a004a255cfc4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,28 @@ +# __2.16.14__ __2021-03-08__ +## __AWS Lambda__ + - ### Features + - Documentation updates for Lambda. Constraint updates to AddLayerVersionPermission's Action and OrganizationId parameters, and AddPermission's Principal and SourceAccount parameters. + +## __AWS S3 Control__ + - ### Features + - Documentation updates for Amazon S3 + +## __Amazon Elastic MapReduce__ + - ### Features + - Amazon EMR customers can now specify how EC2 On-Demand Capacity Reservations are used in their EMR clusters with instance fleets using allocation strategy. + +## __Amazon Kinesis Video Streams Archived Media__ + - ### Features + - Increase the maximum HLS and MPEG-DASH manifest size from 1,000 to 5,000 fragments. + +## __Amazon Simple Storage Service__ + - ### Features + - Amazon S3 Documentation updates + +## __Auto Scaling__ + - ### Features + - Documentation updates for autoscaling for capacity-optimized-prioritized SpotAllocationStrategy + # __2.16.13__ __2021-03-05__ ## __AWS CodePipeline__ - ### Features diff --git a/README.md b/README.md index 3c69bbd3a2b5..6a941518031e 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.16.13 + 2.16.14 pom import @@ -83,12 +83,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.16.13 + 2.16.14 software.amazon.awssdk s3 - 2.16.13 + 2.16.14 ``` @@ -100,7 +100,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.16.13 + 2.16.14 ``` diff --git a/archetypes/archetype-app-quickstart/pom.xml b/archetypes/archetype-app-quickstart/pom.xml index d61bdadce9b5..34300fa66bb4 100644 --- a/archetypes/archetype-app-quickstart/pom.xml +++ b/archetypes/archetype-app-quickstart/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml index a44a51c729c9..fb89ca058dc5 100644 --- a/archetypes/archetype-lambda/pom.xml +++ b/archetypes/archetype-lambda/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 archetype-lambda diff --git a/archetypes/archetype-tools/pom.xml b/archetypes/archetype-tools/pom.xml index 4f556a7fb975..552e3150049f 100644 --- a/archetypes/archetype-tools/pom.xml +++ b/archetypes/archetype-tools/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/archetypes/pom.xml b/archetypes/pom.xml index 9fe141de58d4..4a788d719026 100644 --- a/archetypes/pom.xml +++ b/archetypes/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 archetypes diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index 6cdbdba12755..29b6b24c96af 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 ../pom.xml aws-sdk-java diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index 31775f45939c..24c10c01cd75 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/bom/pom.xml b/bom/pom.xml index 6f112531f72a..b1ff24d7b0a5 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 ../pom.xml bom diff --git a/bundle/pom.xml b/bundle/pom.xml index 631c3e309c7a..2c45d41757d9 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 bundle jar diff --git a/codegen-lite-maven-plugin/pom.xml b/codegen-lite-maven-plugin/pom.xml index 5b276192da4c..fcdddc7a52e9 100644 --- a/codegen-lite-maven-plugin/pom.xml +++ b/codegen-lite-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 ../pom.xml codegen-lite-maven-plugin diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index 2d66d726fc8a..9f7c5368d457 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index ed1d5cb4364d..1838af0f8ae6 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 ../pom.xml codegen-maven-plugin diff --git a/codegen/pom.xml b/codegen/pom.xml index 77ec54931205..d5f4d606365a 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 codegen AWS Java SDK :: Code Generator diff --git a/core/annotations/pom.xml b/core/annotations/pom.xml index 842bcdbd5dea..4077b17f81e4 100644 --- a/core/annotations/pom.xml +++ b/core/annotations/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/core/arns/pom.xml b/core/arns/pom.xml index 8bff9918cc6b..cf9205e38d1c 100644 --- a/core/arns/pom.xml +++ b/core/arns/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/core/auth/pom.xml b/core/auth/pom.xml index 1a35b9cd8dca..0f798d3c5383 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.16.13 + 2.16.14 auth diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index c604381f182e..757e5bc87b5c 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.16.13 + 2.16.14 aws-core diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml index c8ea79724f83..b82d8d82bc1f 100644 --- a/core/metrics-spi/pom.xml +++ b/core/metrics-spi/pom.xml @@ -5,7 +5,7 @@ core software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/core/pom.xml b/core/pom.xml index dd78a1251365..da9cacba6b6b 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 core diff --git a/core/profiles/pom.xml b/core/profiles/pom.xml index b5db38d9a0d4..4143041b9346 100644 --- a/core/profiles/pom.xml +++ b/core/profiles/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.16.13 + 2.16.14 profiles diff --git a/core/protocols/aws-cbor-protocol/pom.xml b/core/protocols/aws-cbor-protocol/pom.xml index c9fca8096b66..81d91850139a 100644 --- a/core/protocols/aws-cbor-protocol/pom.xml +++ b/core/protocols/aws-cbor-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/core/protocols/aws-ion-protocol/pom.xml b/core/protocols/aws-ion-protocol/pom.xml index 1387fb667577..692ef2f69126 100644 --- a/core/protocols/aws-ion-protocol/pom.xml +++ b/core/protocols/aws-ion-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index ded24e456d67..b71f96287e1f 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index b7b734d2f790..e343349e9b61 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index b2c7f39bfced..b25b4c536f0e 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index a43ebf44dea1..64e29958b902 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index 34f37cd4d437..9e499b3c2d91 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/core/regions/pom.xml b/core/regions/pom.xml index 19761473257b..fe00cd9b50fc 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.16.13 + 2.16.14 regions diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index 1443622dc3fb..1194968cce53 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.16.13 + 2.16.14 sdk-core AWS Java SDK :: SDK Core diff --git a/http-client-spi/pom.xml b/http-client-spi/pom.xml index fda7cd31cf72..731048ce33c5 100644 --- a/http-client-spi/pom.xml +++ b/http-client-spi/pom.xml @@ -22,7 +22,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 http-client-spi AWS Java SDK :: HTTP Client Interface diff --git a/http-clients/apache-client/pom.xml b/http-clients/apache-client/pom.xml index 06d6617a231c..5a1a60c4c7b6 100644 --- a/http-clients/apache-client/pom.xml +++ b/http-clients/apache-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.16.13 + 2.16.14 apache-client diff --git a/http-clients/aws-crt-client/pom.xml b/http-clients/aws-crt-client/pom.xml index 57743063f586..4eb49dec0ab0 100644 --- a/http-clients/aws-crt-client/pom.xml +++ b/http-clients/aws-crt-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index d1e355d19d34..0db59d98ece3 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/http-clients/pom.xml b/http-clients/pom.xml index f90735929c49..4cac7d2af2a4 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/http-clients/url-connection-client/pom.xml b/http-clients/url-connection-client/pom.xml index 93faa5af1bc7..2aca40ae5976 100644 --- a/http-clients/url-connection-client/pom.xml +++ b/http-clients/url-connection-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 diff --git a/metric-publishers/cloudwatch-metric-publisher/pom.xml b/metric-publishers/cloudwatch-metric-publisher/pom.xml index 5dd21595257e..ad9887cc2d60 100644 --- a/metric-publishers/cloudwatch-metric-publisher/pom.xml +++ b/metric-publishers/cloudwatch-metric-publisher/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk metric-publishers - 2.16.13 + 2.16.14 cloudwatch-metric-publisher diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml index bfb47dd2ffce..43407e2f7f52 100644 --- a/metric-publishers/pom.xml +++ b/metric-publishers/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 metric-publishers diff --git a/pom.xml b/pom.xml index 8843c5949b61..c14c6f3d6254 100644 --- a/pom.xml +++ b/pom.xml @@ -20,7 +20,7 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 pom AWS Java SDK :: Parent The Amazon Web Services SDK for Java provides Java APIs @@ -85,7 +85,7 @@ ${project.version} - 2.16.12 + 2.16.13 2.12.1 2.12.1 2.11.2 diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index ec0225797da2..1f0bd7015549 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 ../pom.xml release-scripts diff --git a/services-custom/dynamodb-enhanced/pom.xml b/services-custom/dynamodb-enhanced/pom.xml index cd7cb7162c44..1826abd3481f 100644 --- a/services-custom/dynamodb-enhanced/pom.xml +++ b/services-custom/dynamodb-enhanced/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services-custom - 2.16.13 + 2.16.14 dynamodb-enhanced AWS Java SDK :: DynamoDB :: Enhanced Client diff --git a/services-custom/pom.xml b/services-custom/pom.xml index f33abec09386..4effafe47f87 100644 --- a/services-custom/pom.xml +++ b/services-custom/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 services-custom AWS Java SDK :: Custom Services diff --git a/services/accessanalyzer/pom.xml b/services/accessanalyzer/pom.xml index aeab850f5416..b68d505b3c52 100644 --- a/services/accessanalyzer/pom.xml +++ b/services/accessanalyzer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 accessanalyzer AWS Java SDK :: Services :: AccessAnalyzer diff --git a/services/acm/pom.xml b/services/acm/pom.xml index 53e6d54c5f57..707f4cf94858 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 acm AWS Java SDK :: Services :: AWS Certificate Manager diff --git a/services/acmpca/pom.xml b/services/acmpca/pom.xml index 8450de999512..538a97457ee9 100644 --- a/services/acmpca/pom.xml +++ b/services/acmpca/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 acmpca AWS Java SDK :: Services :: ACM PCA diff --git a/services/alexaforbusiness/pom.xml b/services/alexaforbusiness/pom.xml index af225a085373..3d2eed8d5ccb 100644 --- a/services/alexaforbusiness/pom.xml +++ b/services/alexaforbusiness/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 alexaforbusiness diff --git a/services/amp/pom.xml b/services/amp/pom.xml index bd78d826d1aa..765cf0bef066 100644 --- a/services/amp/pom.xml +++ b/services/amp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 amp AWS Java SDK :: Services :: Amp diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index ba9e0e2ef1b7..5b8ad0c3a76e 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 amplify AWS Java SDK :: Services :: Amplify diff --git a/services/amplifybackend/pom.xml b/services/amplifybackend/pom.xml index 74b24cb76f58..b9f5e89a92ec 100644 --- a/services/amplifybackend/pom.xml +++ b/services/amplifybackend/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 amplifybackend AWS Java SDK :: Services :: Amplify Backend diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index 6377df2a5bf2..799e4def02c2 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 apigateway AWS Java SDK :: Services :: Amazon API Gateway diff --git a/services/apigatewaymanagementapi/pom.xml b/services/apigatewaymanagementapi/pom.xml index 402e41132a74..300469fbb671 100644 --- a/services/apigatewaymanagementapi/pom.xml +++ b/services/apigatewaymanagementapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 apigatewaymanagementapi AWS Java SDK :: Services :: ApiGatewayManagementApi diff --git a/services/apigatewayv2/pom.xml b/services/apigatewayv2/pom.xml index 21daf7a085b8..ebf5f538ac14 100644 --- a/services/apigatewayv2/pom.xml +++ b/services/apigatewayv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 apigatewayv2 AWS Java SDK :: Services :: ApiGatewayV2 diff --git a/services/appconfig/pom.xml b/services/appconfig/pom.xml index 39ca29948875..89fb577a4f1a 100644 --- a/services/appconfig/pom.xml +++ b/services/appconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 appconfig AWS Java SDK :: Services :: AppConfig diff --git a/services/appflow/pom.xml b/services/appflow/pom.xml index 46c22188c531..0591e4d0a52a 100644 --- a/services/appflow/pom.xml +++ b/services/appflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 appflow AWS Java SDK :: Services :: Appflow diff --git a/services/appintegrations/pom.xml b/services/appintegrations/pom.xml index cfabd12eac88..c5150b7c89cd 100644 --- a/services/appintegrations/pom.xml +++ b/services/appintegrations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 appintegrations AWS Java SDK :: Services :: App Integrations diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index 0db465d59f25..6b17a402ec32 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 applicationautoscaling AWS Java SDK :: Services :: AWS Application Auto Scaling diff --git a/services/applicationdiscovery/pom.xml b/services/applicationdiscovery/pom.xml index fe34d49776cc..bc5a24b10967 100644 --- a/services/applicationdiscovery/pom.xml +++ b/services/applicationdiscovery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 applicationdiscovery AWS Java SDK :: Services :: AWS Application Discovery Service diff --git a/services/applicationinsights/pom.xml b/services/applicationinsights/pom.xml index 39fb50cfb61e..9ebf7c6d75e8 100644 --- a/services/applicationinsights/pom.xml +++ b/services/applicationinsights/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 applicationinsights AWS Java SDK :: Services :: Application Insights diff --git a/services/appmesh/pom.xml b/services/appmesh/pom.xml index ee0553f95b4f..5a4e00727af3 100644 --- a/services/appmesh/pom.xml +++ b/services/appmesh/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 appmesh AWS Java SDK :: Services :: App Mesh diff --git a/services/appstream/pom.xml b/services/appstream/pom.xml index e0da0bc569fa..f4e882912ba8 100644 --- a/services/appstream/pom.xml +++ b/services/appstream/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 appstream AWS Java SDK :: Services :: Amazon AppStream diff --git a/services/appsync/pom.xml b/services/appsync/pom.xml index 1a5ddcd5d99a..8492f615110d 100644 --- a/services/appsync/pom.xml +++ b/services/appsync/pom.xml @@ -21,7 +21,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 appsync diff --git a/services/athena/pom.xml b/services/athena/pom.xml index 2c6c64d98bfd..ee52403b3f18 100644 --- a/services/athena/pom.xml +++ b/services/athena/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 athena AWS Java SDK :: Services :: Amazon Athena diff --git a/services/auditmanager/pom.xml b/services/auditmanager/pom.xml index 5f204e3ebf47..6009c997cb71 100644 --- a/services/auditmanager/pom.xml +++ b/services/auditmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 auditmanager AWS Java SDK :: Services :: Audit Manager diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index 46d3c8b04013..0f7cb09f280a 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 autoscaling AWS Java SDK :: Services :: Auto Scaling diff --git a/services/autoscaling/src/main/resources/codegen-resources/service-2.json b/services/autoscaling/src/main/resources/codegen-resources/service-2.json index f40c7cbca5e1..89f68f31ba62 100644 --- a/services/autoscaling/src/main/resources/codegen-resources/service-2.json +++ b/services/autoscaling/src/main/resources/codegen-resources/service-2.json @@ -1531,7 +1531,7 @@ }, "MixedInstancesPolicy":{ "shape":"MixedInstancesPolicy", - "documentation":"

An embedded object that specifies a mixed instances policy. The required parameters must be specified. If optional parameters are unspecified, their default values are used.

The policy includes parameters that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities, but also the parameters that specify the instance configuration information—the launch template and instance types. The policy can also include a weight for each instance type and different launch templates for individual instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

An embedded object that specifies a mixed instances policy. The required properties must be specified. If optional properties are unspecified, their default values are used.

The policy includes properties that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities, but also the properties that specify the instance configuration information—the launch template and instance types. The policy can also include a weight for each instance type and different launch templates for individual instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

" }, "InstanceId":{ "shape":"XmlStringMaxLen19", @@ -2628,7 +2628,7 @@ "members":{ "OnDemandAllocationStrategy":{ "shape":"XmlString", - "documentation":"

Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized, which is also the default value. This strategy uses the order of instance types in the overrides to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.

" + "documentation":"

Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized, which is also the default value. This strategy uses the order of instance types in the LaunchTemplateOverrides to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.

" }, "OnDemandBaseCapacity":{ "shape":"OnDemandBaseCapacity", @@ -2640,7 +2640,7 @@ }, "SpotAllocationStrategy":{ "shape":"XmlString", - "documentation":"

Indicates how to allocate instances across Spot Instance pools. If the allocation strategy is capacity-optimized (recommended), the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity. If the allocation strategy is lowest-price, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. Defaults to lowest-price if not specified.

" + "documentation":"

Indicates how to allocate instances across Spot Instance pools.

If the allocation strategy is lowest-price, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. Defaults to lowest-price if not specified.

If the allocation strategy is capacity-optimized (recommended), the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity. Alternatively, you can use capacity-optimized-prioritized and set the order of instance types in the list of launch template overrides from highest to lowest priority (from first to last in the list). Amazon EC2 Auto Scaling honors the instance type priorities on a best-effort basis but optimizes for capacity first.

" }, "SpotInstancePools":{ "shape":"SpotInstancePools", @@ -2828,10 +2828,10 @@ }, "Overrides":{ "shape":"Overrides", - "documentation":"

Any parameters that you specify override the same parameters in the launch template. If not provided, Amazon EC2 Auto Scaling uses the instance type specified in the launch template when it launches an instance.

" + "documentation":"

Any properties that you specify override the same properties in the launch template. If not provided, Amazon EC2 Auto Scaling uses the instance type specified in the launch template when it launches an instance.

" } }, - "documentation":"

Describes a launch template and overrides.

You specify these parameters as part of a mixed instances policy.

When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" + "documentation":"

Describes a launch template and overrides.

You specify these properties as part of a mixed instances policy.

When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" }, "LaunchTemplateName":{ "type":"string", @@ -3151,10 +3151,10 @@ }, "InstancesDistribution":{ "shape":"InstancesDistribution", - "documentation":"

Specifies the instances distribution. If not provided, the value for each parameter in InstancesDistribution uses a default value.

" + "documentation":"

Specifies the instances distribution. If not provided, the value for each property in InstancesDistribution uses a default value.

" } }, - "documentation":"

Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy as the top-level parameter instead of a launch configuration or launch template.

" + "documentation":"

Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy as the top-level property instead of a launch configuration or launch template.

" }, "MonitoringEnabled":{"type":"boolean"}, "NoDevice":{"type":"boolean"}, @@ -4079,7 +4079,7 @@ }, "MixedInstancesPolicy":{ "shape":"MixedInstancesPolicy", - "documentation":"

An embedded object that specifies a mixed instances policy. When you make changes to an existing policy, all optional parameters are left unchanged if not specified. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

An embedded object that specifies a mixed instances policy. When you make changes to an existing policy, all optional properties are left unchanged if not specified. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", diff --git a/services/autoscalingplans/pom.xml b/services/autoscalingplans/pom.xml index 225b216f5ddd..bcd228c35d18 100644 --- a/services/autoscalingplans/pom.xml +++ b/services/autoscalingplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 autoscalingplans AWS Java SDK :: Services :: Auto Scaling Plans diff --git a/services/backup/pom.xml b/services/backup/pom.xml index 3a88de002ec9..37a7e555e968 100644 --- a/services/backup/pom.xml +++ b/services/backup/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 backup AWS Java SDK :: Services :: Backup diff --git a/services/batch/pom.xml b/services/batch/pom.xml index 9bebd847372d..8ce8bb9a0194 100644 --- a/services/batch/pom.xml +++ b/services/batch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 batch AWS Java SDK :: Services :: AWS Batch diff --git a/services/braket/pom.xml b/services/braket/pom.xml index 622d82d8ac15..fa25a71680e1 100644 --- a/services/braket/pom.xml +++ b/services/braket/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 braket AWS Java SDK :: Services :: Braket diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index f203456c8eaa..8cd47c1093a4 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 budgets AWS Java SDK :: Services :: AWS Budgets diff --git a/services/chime/pom.xml b/services/chime/pom.xml index 9a43d221134c..5d6f3dd946de 100644 --- a/services/chime/pom.xml +++ b/services/chime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 chime AWS Java SDK :: Services :: Chime diff --git a/services/cloud9/pom.xml b/services/cloud9/pom.xml index 6ef857baa67c..603a136420c5 100644 --- a/services/cloud9/pom.xml +++ b/services/cloud9/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 cloud9 diff --git a/services/clouddirectory/pom.xml b/services/clouddirectory/pom.xml index 5bcec088bc22..13e672bbc33a 100644 --- a/services/clouddirectory/pom.xml +++ b/services/clouddirectory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 clouddirectory AWS Java SDK :: Services :: Amazon CloudDirectory diff --git a/services/cloudformation/pom.xml b/services/cloudformation/pom.xml index 11bfb3d68e9b..d1f6928a8ddc 100644 --- a/services/cloudformation/pom.xml +++ b/services/cloudformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 cloudformation AWS Java SDK :: Services :: AWS CloudFormation diff --git a/services/cloudfront/pom.xml b/services/cloudfront/pom.xml index 6fdbb1595b5d..6bffe6a8a60f 100644 --- a/services/cloudfront/pom.xml +++ b/services/cloudfront/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 cloudfront AWS Java SDK :: Services :: Amazon CloudFront diff --git a/services/cloudhsm/pom.xml b/services/cloudhsm/pom.xml index f7524dd8e51f..f2c5da7f917f 100644 --- a/services/cloudhsm/pom.xml +++ b/services/cloudhsm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 cloudhsm AWS Java SDK :: Services :: AWS CloudHSM diff --git a/services/cloudhsmv2/pom.xml b/services/cloudhsmv2/pom.xml index 6dddbf059379..712c3e7146f6 100644 --- a/services/cloudhsmv2/pom.xml +++ b/services/cloudhsmv2/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 cloudhsmv2 diff --git a/services/cloudsearch/pom.xml b/services/cloudsearch/pom.xml index b7d83a4b3554..fe0456189ebb 100644 --- a/services/cloudsearch/pom.xml +++ b/services/cloudsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 cloudsearch AWS Java SDK :: Services :: Amazon CloudSearch diff --git a/services/cloudsearchdomain/pom.xml b/services/cloudsearchdomain/pom.xml index e334381b36e3..0bb78535ce03 100644 --- a/services/cloudsearchdomain/pom.xml +++ b/services/cloudsearchdomain/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 cloudsearchdomain AWS Java SDK :: Services :: Amazon CloudSearch Domain diff --git a/services/cloudtrail/pom.xml b/services/cloudtrail/pom.xml index 79d20b76232d..af5fb67e458d 100644 --- a/services/cloudtrail/pom.xml +++ b/services/cloudtrail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 cloudtrail AWS Java SDK :: Services :: AWS CloudTrail diff --git a/services/cloudwatch/pom.xml b/services/cloudwatch/pom.xml index c63a315f3677..60e2b92703de 100644 --- a/services/cloudwatch/pom.xml +++ b/services/cloudwatch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 cloudwatch AWS Java SDK :: Services :: Amazon CloudWatch diff --git a/services/cloudwatchevents/pom.xml b/services/cloudwatchevents/pom.xml index 081749813bb1..855a4faa7111 100644 --- a/services/cloudwatchevents/pom.xml +++ b/services/cloudwatchevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 cloudwatchevents AWS Java SDK :: Services :: Amazon CloudWatch Events diff --git a/services/cloudwatchlogs/pom.xml b/services/cloudwatchlogs/pom.xml index bdd909054845..42d497c5d64c 100644 --- a/services/cloudwatchlogs/pom.xml +++ b/services/cloudwatchlogs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 cloudwatchlogs AWS Java SDK :: Services :: Amazon CloudWatch Logs diff --git a/services/codeartifact/pom.xml b/services/codeartifact/pom.xml index 13b464da008d..12deac57cc0c 100644 --- a/services/codeartifact/pom.xml +++ b/services/codeartifact/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 codeartifact AWS Java SDK :: Services :: Codeartifact diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index 7eedfbc720a6..676d504544ae 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 codebuild AWS Java SDK :: Services :: AWS Code Build diff --git a/services/codecommit/pom.xml b/services/codecommit/pom.xml index 771f47e23739..d8461d07cdd9 100644 --- a/services/codecommit/pom.xml +++ b/services/codecommit/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 codecommit AWS Java SDK :: Services :: AWS CodeCommit diff --git a/services/codedeploy/pom.xml b/services/codedeploy/pom.xml index b92a6d5264cf..108d1e586c6b 100644 --- a/services/codedeploy/pom.xml +++ b/services/codedeploy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 codedeploy AWS Java SDK :: Services :: AWS CodeDeploy diff --git a/services/codeguruprofiler/pom.xml b/services/codeguruprofiler/pom.xml index 9a12e950cc57..0e6631ee3229 100644 --- a/services/codeguruprofiler/pom.xml +++ b/services/codeguruprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 codeguruprofiler AWS Java SDK :: Services :: CodeGuruProfiler diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml index 1631eb3f9ece..1795187f6183 100644 --- a/services/codegurureviewer/pom.xml +++ b/services/codegurureviewer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 codegurureviewer AWS Java SDK :: Services :: CodeGuru Reviewer diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index ee61577d9bb4..d6b72e8ce2d2 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 codepipeline AWS Java SDK :: Services :: AWS CodePipeline diff --git a/services/codestar/pom.xml b/services/codestar/pom.xml index 5cf386085dd1..a1635dca9b21 100644 --- a/services/codestar/pom.xml +++ b/services/codestar/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 codestar AWS Java SDK :: Services :: AWS CodeStar diff --git a/services/codestarconnections/pom.xml b/services/codestarconnections/pom.xml index e69e747b943d..81e3c34796db 100644 --- a/services/codestarconnections/pom.xml +++ b/services/codestarconnections/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 codestarconnections AWS Java SDK :: Services :: CodeStar connections diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml index d0c030b12881..f9ce601c302c 100644 --- a/services/codestarnotifications/pom.xml +++ b/services/codestarnotifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 codestarnotifications AWS Java SDK :: Services :: Codestar Notifications diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index 186f5b245eb6..fb337a656948 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 cognitoidentity AWS Java SDK :: Services :: Amazon Cognito Identity diff --git a/services/cognitoidentityprovider/pom.xml b/services/cognitoidentityprovider/pom.xml index 558b30b6e220..adc7664a9828 100644 --- a/services/cognitoidentityprovider/pom.xml +++ b/services/cognitoidentityprovider/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 cognitoidentityprovider AWS Java SDK :: Services :: Amazon Cognito Identity Provider Service diff --git a/services/cognitosync/pom.xml b/services/cognitosync/pom.xml index 7c9fc1b0a581..65ebac57c379 100644 --- a/services/cognitosync/pom.xml +++ b/services/cognitosync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 cognitosync AWS Java SDK :: Services :: Amazon Cognito Sync diff --git a/services/comprehend/pom.xml b/services/comprehend/pom.xml index 085683d880fc..7177f82f1e07 100644 --- a/services/comprehend/pom.xml +++ b/services/comprehend/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 comprehend diff --git a/services/comprehendmedical/pom.xml b/services/comprehendmedical/pom.xml index 161e93f20315..7bf0610ce806 100644 --- a/services/comprehendmedical/pom.xml +++ b/services/comprehendmedical/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 comprehendmedical AWS Java SDK :: Services :: ComprehendMedical diff --git a/services/computeoptimizer/pom.xml b/services/computeoptimizer/pom.xml index 42504fa5dafd..e1ed2d63bd5a 100644 --- a/services/computeoptimizer/pom.xml +++ b/services/computeoptimizer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 computeoptimizer AWS Java SDK :: Services :: Compute Optimizer diff --git a/services/config/pom.xml b/services/config/pom.xml index f8c4890023a8..dd3e2e52ebad 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 config AWS Java SDK :: Services :: AWS Config diff --git a/services/connect/pom.xml b/services/connect/pom.xml index 2b8947ce21b9..5ab9df70e7e7 100644 --- a/services/connect/pom.xml +++ b/services/connect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 connect AWS Java SDK :: Services :: Connect diff --git a/services/connectcontactlens/pom.xml b/services/connectcontactlens/pom.xml index f077dd6231ad..ef8445120652 100644 --- a/services/connectcontactlens/pom.xml +++ b/services/connectcontactlens/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 connectcontactlens AWS Java SDK :: Services :: Connect Contact Lens diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml index 832e7be8283a..681f0a80294e 100644 --- a/services/connectparticipant/pom.xml +++ b/services/connectparticipant/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 connectparticipant AWS Java SDK :: Services :: ConnectParticipant diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index 634b6f81c9fb..6c8794090c90 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 costandusagereport AWS Java SDK :: Services :: AWS Cost and Usage Report diff --git a/services/costexplorer/pom.xml b/services/costexplorer/pom.xml index dbdd02eefdbd..fe7506cf6dba 100644 --- a/services/costexplorer/pom.xml +++ b/services/costexplorer/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 costexplorer diff --git a/services/customerprofiles/pom.xml b/services/customerprofiles/pom.xml index eb000d5f7c4e..2fdc7a60bf60 100644 --- a/services/customerprofiles/pom.xml +++ b/services/customerprofiles/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 customerprofiles AWS Java SDK :: Services :: Customer Profiles diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index 094899811df8..ed44f44702db 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 databasemigration AWS Java SDK :: Services :: AWS Database Migration Service diff --git a/services/databrew/pom.xml b/services/databrew/pom.xml index 647d0d2c684a..f402294fb8da 100644 --- a/services/databrew/pom.xml +++ b/services/databrew/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 databrew AWS Java SDK :: Services :: Data Brew diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml index 1097f7a8ff1d..291c689c8caf 100644 --- a/services/dataexchange/pom.xml +++ b/services/dataexchange/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 dataexchange AWS Java SDK :: Services :: DataExchange diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index a7268e698378..e53ac11e98ba 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 datapipeline AWS Java SDK :: Services :: AWS Data Pipeline diff --git a/services/datasync/pom.xml b/services/datasync/pom.xml index a876672c706f..80f58d28dc8f 100644 --- a/services/datasync/pom.xml +++ b/services/datasync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 datasync AWS Java SDK :: Services :: DataSync diff --git a/services/dax/pom.xml b/services/dax/pom.xml index 52761eb4425c..5c823fc76665 100644 --- a/services/dax/pom.xml +++ b/services/dax/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 dax AWS Java SDK :: Services :: Amazon DynamoDB Accelerator (DAX) diff --git a/services/detective/pom.xml b/services/detective/pom.xml index a5e0bbcfb2cc..0f003c36eb99 100644 --- a/services/detective/pom.xml +++ b/services/detective/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 detective AWS Java SDK :: Services :: Detective diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index f669332de08f..15cbfdafe00b 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 devicefarm AWS Java SDK :: Services :: AWS Device Farm diff --git a/services/devopsguru/pom.xml b/services/devopsguru/pom.xml index 71b691c3432f..9de9b17f02cb 100644 --- a/services/devopsguru/pom.xml +++ b/services/devopsguru/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 devopsguru AWS Java SDK :: Services :: Dev Ops Guru diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index db37bb0f1bd2..300d7f5a64a2 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 directconnect AWS Java SDK :: Services :: AWS Direct Connect diff --git a/services/directory/pom.xml b/services/directory/pom.xml index 72476a6c6154..072f7696deeb 100644 --- a/services/directory/pom.xml +++ b/services/directory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 directory AWS Java SDK :: Services :: AWS Directory Service diff --git a/services/dlm/pom.xml b/services/dlm/pom.xml index 459362fd64d1..3f0ed78050bc 100644 --- a/services/dlm/pom.xml +++ b/services/dlm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 dlm AWS Java SDK :: Services :: DLM diff --git a/services/docdb/pom.xml b/services/docdb/pom.xml index 6611bb42f9c8..85168132e6f2 100644 --- a/services/docdb/pom.xml +++ b/services/docdb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 docdb AWS Java SDK :: Services :: DocDB diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index c505185c9520..345bf1469847 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 dynamodb AWS Java SDK :: Services :: Amazon DynamoDB diff --git a/services/ebs/pom.xml b/services/ebs/pom.xml index b434d1001fb9..80c9994c5961 100644 --- a/services/ebs/pom.xml +++ b/services/ebs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ebs AWS Java SDK :: Services :: EBS diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index 32ddf586ea50..a9adce1658dc 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ec2 AWS Java SDK :: Services :: Amazon EC2 diff --git a/services/ec2instanceconnect/pom.xml b/services/ec2instanceconnect/pom.xml index 3d2850c65988..f9c7a6bb3a0a 100644 --- a/services/ec2instanceconnect/pom.xml +++ b/services/ec2instanceconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ec2instanceconnect AWS Java SDK :: Services :: EC2 Instance Connect diff --git a/services/ecr/pom.xml b/services/ecr/pom.xml index b722c6e84331..e80d3a0729fb 100644 --- a/services/ecr/pom.xml +++ b/services/ecr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ecr AWS Java SDK :: Services :: Amazon EC2 Container Registry diff --git a/services/ecrpublic/pom.xml b/services/ecrpublic/pom.xml index fe6150c8bf5c..d9b8448eae50 100644 --- a/services/ecrpublic/pom.xml +++ b/services/ecrpublic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ecrpublic AWS Java SDK :: Services :: ECR PUBLIC diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index 682d91d2df32..62ae59082281 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ecs AWS Java SDK :: Services :: Amazon EC2 Container Service diff --git a/services/efs/pom.xml b/services/efs/pom.xml index 078b797746d0..2f29b7e6aed8 100644 --- a/services/efs/pom.xml +++ b/services/efs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 efs AWS Java SDK :: Services :: Amazon Elastic File System diff --git a/services/eks/pom.xml b/services/eks/pom.xml index 17d21b84845e..ec56b72e7b2a 100644 --- a/services/eks/pom.xml +++ b/services/eks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 eks AWS Java SDK :: Services :: EKS diff --git a/services/elasticache/pom.xml b/services/elasticache/pom.xml index cf8db6fa31ec..493b7fc23cb0 100644 --- a/services/elasticache/pom.xml +++ b/services/elasticache/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 elasticache AWS Java SDK :: Services :: Amazon ElastiCache diff --git a/services/elasticbeanstalk/pom.xml b/services/elasticbeanstalk/pom.xml index 08223863e35a..71a7f7272dcf 100644 --- a/services/elasticbeanstalk/pom.xml +++ b/services/elasticbeanstalk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 elasticbeanstalk AWS Java SDK :: Services :: AWS Elastic Beanstalk diff --git a/services/elasticinference/pom.xml b/services/elasticinference/pom.xml index 18496dbdc395..ee361b848ffc 100644 --- a/services/elasticinference/pom.xml +++ b/services/elasticinference/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 elasticinference AWS Java SDK :: Services :: Elastic Inference diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index 01d91f069d54..a9ccb1c4cd7f 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 elasticloadbalancing AWS Java SDK :: Services :: Elastic Load Balancing diff --git a/services/elasticloadbalancingv2/pom.xml b/services/elasticloadbalancingv2/pom.xml index b1c94b314274..af1f112b557e 100644 --- a/services/elasticloadbalancingv2/pom.xml +++ b/services/elasticloadbalancingv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 elasticloadbalancingv2 AWS Java SDK :: Services :: Elastic Load Balancing V2 diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml index 942840c5a64e..0a13dbd7ad14 100644 --- a/services/elasticsearch/pom.xml +++ b/services/elasticsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 elasticsearch AWS Java SDK :: Services :: Amazon Elasticsearch Service diff --git a/services/elastictranscoder/pom.xml b/services/elastictranscoder/pom.xml index f87242d7898c..b19690a331b7 100644 --- a/services/elastictranscoder/pom.xml +++ b/services/elastictranscoder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 elastictranscoder AWS Java SDK :: Services :: Amazon Elastic Transcoder diff --git a/services/emr/pom.xml b/services/emr/pom.xml index d449f80113d0..3c1f17d1edc8 100644 --- a/services/emr/pom.xml +++ b/services/emr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 emr AWS Java SDK :: Services :: Amazon EMR diff --git a/services/emr/src/main/resources/codegen-resources/service-2.json b/services/emr/src/main/resources/codegen-resources/service-2.json index 7c29d3605392..0e79329e2b9b 100644 --- a/services/emr/src/main/resources/codegen-resources/service-2.json +++ b/services/emr/src/main/resources/codegen-resources/service-2.json @@ -2322,7 +2322,7 @@ }, "BidPrice":{ "shape":"String", - "documentation":"

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" + "documentation":"

If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice to set the amount equal to the On-Demand price, or specify an amount in USD.

" }, "InstanceType":{ "shape":"InstanceType", @@ -2397,7 +2397,7 @@ }, "BidPrice":{ "shape":"XmlStringMaxLen256", - "documentation":"

The bid price for each EC2 Spot Instance as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" + "documentation":"

If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice to set the amount equal to the On-Demand price, or specify an amount in USD.

" }, "InstanceType":{ "shape":"InstanceType", @@ -2456,7 +2456,7 @@ }, "BidPrice":{ "shape":"XmlStringMaxLen256", - "documentation":"

The bid price for each EC2 Spot Instance as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" + "documentation":"

If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice to set the amount equal to the On-Demand price, or specify an amount in USD.

" }, "InstanceType":{ "shape":"InstanceType", @@ -3526,7 +3526,7 @@ }, "StepConcurrencyLevel":{ "shape":"Integer", - "documentation":"

The number of steps that can be executed concurrently. You can specify a maximum of 256 steps.

" + "documentation":"

The number of steps that can be executed concurrently. You can specify a minimum of 1 step and a maximum of 256 steps.

" } } }, @@ -3685,6 +3685,31 @@ "type":"list", "member":{"shape":"NotebookExecutionSummary"} }, + "OnDemandCapacityReservationOptions":{ + "type":"structure", + "members":{ + "UsageStrategy":{ + "shape":"OnDemandCapacityReservationUsageStrategy", + "documentation":"

Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.

If you specify use-capacity-reservations-first, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price).

If you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy.

" + }, + "CapacityReservationPreference":{ + "shape":"OnDemandCapacityReservationPreference", + "documentation":"

Indicates the instance's Capacity Reservation preferences. Possible preferences include:

" + } + }, + "documentation":"

Describes the strategy for using unused Capacity Reservations for fulfilling On-Demand capacity.

" + }, + "OnDemandCapacityReservationPreference":{ + "type":"string", + "enum":[ + "open", + "none" + ] + }, + "OnDemandCapacityReservationUsageStrategy":{ + "type":"string", + "enum":["use-capacity-reservations-first"] + }, "OnDemandProvisioningAllocationStrategy":{ "type":"string", "enum":["lowest-price"] @@ -3695,7 +3720,11 @@ "members":{ "AllocationStrategy":{ "shape":"OnDemandProvisioningAllocationStrategy", - "documentation":"

Specifies the strategy to use in launching On-Demand Instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first.

" + "documentation":"

Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first.

" + }, + "CapacityReservationOptions":{ + "shape":"OnDemandCapacityReservationOptions", + "documentation":"

The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy.

" } }, "documentation":"

The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR version 5.12.1 and later.

" diff --git a/services/emrcontainers/pom.xml b/services/emrcontainers/pom.xml index 0e4aced9cf22..fc6da33c0399 100644 --- a/services/emrcontainers/pom.xml +++ b/services/emrcontainers/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 emrcontainers AWS Java SDK :: Services :: EMR Containers diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index 4e24e8450ea5..82e3c14aaba0 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 eventbridge AWS Java SDK :: Services :: EventBridge diff --git a/services/firehose/pom.xml b/services/firehose/pom.xml index 6ad65c8b5834..abadde0179b4 100644 --- a/services/firehose/pom.xml +++ b/services/firehose/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 firehose AWS Java SDK :: Services :: Amazon Kinesis Firehose diff --git a/services/fms/pom.xml b/services/fms/pom.xml index 3ef3772c7049..6063c2f5a859 100644 --- a/services/fms/pom.xml +++ b/services/fms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 fms AWS Java SDK :: Services :: FMS diff --git a/services/forecast/pom.xml b/services/forecast/pom.xml index 2bfdad753647..69bd930a2d42 100644 --- a/services/forecast/pom.xml +++ b/services/forecast/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 forecast AWS Java SDK :: Services :: Forecast diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml index fa0a2a850ba3..86870703dd00 100644 --- a/services/forecastquery/pom.xml +++ b/services/forecastquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 forecastquery AWS Java SDK :: Services :: Forecastquery diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml index 0cc059b42aa3..ce0799ee5b6f 100644 --- a/services/frauddetector/pom.xml +++ b/services/frauddetector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 frauddetector AWS Java SDK :: Services :: FraudDetector diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index a0ed67a52c85..e34433a11e29 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 fsx AWS Java SDK :: Services :: FSx diff --git a/services/gamelift/pom.xml b/services/gamelift/pom.xml index 9072a09c4535..4621585261f7 100644 --- a/services/gamelift/pom.xml +++ b/services/gamelift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 gamelift AWS Java SDK :: Services :: AWS GameLift diff --git a/services/glacier/pom.xml b/services/glacier/pom.xml index b094d70c33bc..5069c7b714cf 100644 --- a/services/glacier/pom.xml +++ b/services/glacier/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 glacier AWS Java SDK :: Services :: Amazon Glacier diff --git a/services/globalaccelerator/pom.xml b/services/globalaccelerator/pom.xml index 7a9d947c27a8..216bfee1c533 100644 --- a/services/globalaccelerator/pom.xml +++ b/services/globalaccelerator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 globalaccelerator AWS Java SDK :: Services :: Global Accelerator diff --git a/services/glue/pom.xml b/services/glue/pom.xml index 49a2c5c84170..18109435ad1c 100644 --- a/services/glue/pom.xml +++ b/services/glue/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 glue diff --git a/services/greengrass/pom.xml b/services/greengrass/pom.xml index 2843f7ce9227..d006289cfe7c 100644 --- a/services/greengrass/pom.xml +++ b/services/greengrass/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 greengrass AWS Java SDK :: Services :: AWS Greengrass diff --git a/services/greengrassv2/pom.xml b/services/greengrassv2/pom.xml index 82a78f5977ed..018e22e417f0 100644 --- a/services/greengrassv2/pom.xml +++ b/services/greengrassv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 greengrassv2 AWS Java SDK :: Services :: Greengrass V2 diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index 4a0abab9820b..457c8460d42b 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 groundstation AWS Java SDK :: Services :: GroundStation diff --git a/services/guardduty/pom.xml b/services/guardduty/pom.xml index 46aff0ccc771..2ebd547685de 100644 --- a/services/guardduty/pom.xml +++ b/services/guardduty/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 guardduty diff --git a/services/health/pom.xml b/services/health/pom.xml index bdac6ed025d8..3c824e6e640f 100644 --- a/services/health/pom.xml +++ b/services/health/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 health AWS Java SDK :: Services :: AWS Health APIs and Notifications diff --git a/services/healthlake/pom.xml b/services/healthlake/pom.xml index b859ccc477a9..3f1f5c384e85 100644 --- a/services/healthlake/pom.xml +++ b/services/healthlake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 healthlake AWS Java SDK :: Services :: Health Lake diff --git a/services/honeycode/pom.xml b/services/honeycode/pom.xml index 3c162dbfdcbc..f24f17abe9c9 100644 --- a/services/honeycode/pom.xml +++ b/services/honeycode/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 honeycode AWS Java SDK :: Services :: Honeycode diff --git a/services/iam/pom.xml b/services/iam/pom.xml index 4b13bc892472..8b618ce43aaf 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iam AWS Java SDK :: Services :: AWS IAM diff --git a/services/identitystore/pom.xml b/services/identitystore/pom.xml index 1454bef1f54f..b5350f9da22a 100644 --- a/services/identitystore/pom.xml +++ b/services/identitystore/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 identitystore AWS Java SDK :: Services :: Identitystore diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml index 316e399490ec..34e0d022c945 100644 --- a/services/imagebuilder/pom.xml +++ b/services/imagebuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 imagebuilder AWS Java SDK :: Services :: Imagebuilder diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index 8a4ca0447b04..67e744345457 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 inspector AWS Java SDK :: Services :: Amazon Inspector Service diff --git a/services/iot/pom.xml b/services/iot/pom.xml index 8076f6619242..d21d1ff4e303 100644 --- a/services/iot/pom.xml +++ b/services/iot/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iot AWS Java SDK :: Services :: AWS IoT diff --git a/services/iot1clickdevices/pom.xml b/services/iot1clickdevices/pom.xml index ebc34df7b31f..35d967ce0744 100644 --- a/services/iot1clickdevices/pom.xml +++ b/services/iot1clickdevices/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iot1clickdevices AWS Java SDK :: Services :: IoT 1Click Devices Service diff --git a/services/iot1clickprojects/pom.xml b/services/iot1clickprojects/pom.xml index 5efe236566b0..c712bf3d90e1 100644 --- a/services/iot1clickprojects/pom.xml +++ b/services/iot1clickprojects/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iot1clickprojects AWS Java SDK :: Services :: IoT 1Click Projects diff --git a/services/iotanalytics/pom.xml b/services/iotanalytics/pom.xml index 38db6baf36d8..1ec73c6242f6 100644 --- a/services/iotanalytics/pom.xml +++ b/services/iotanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iotanalytics AWS Java SDK :: Services :: IoTAnalytics diff --git a/services/iotdataplane/pom.xml b/services/iotdataplane/pom.xml index 33a1e2280884..8590d6b5e8e7 100644 --- a/services/iotdataplane/pom.xml +++ b/services/iotdataplane/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iotdataplane AWS Java SDK :: Services :: AWS IoT Data Plane diff --git a/services/iotdeviceadvisor/pom.xml b/services/iotdeviceadvisor/pom.xml index 3782a6073443..8603c22e5ea8 100644 --- a/services/iotdeviceadvisor/pom.xml +++ b/services/iotdeviceadvisor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iotdeviceadvisor AWS Java SDK :: Services :: Iot Device Advisor diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index 444594b4de6a..209943ed10b1 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iotevents AWS Java SDK :: Services :: IoT Events diff --git a/services/ioteventsdata/pom.xml b/services/ioteventsdata/pom.xml index 36446a6b7278..1aea924bf9ac 100644 --- a/services/ioteventsdata/pom.xml +++ b/services/ioteventsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ioteventsdata AWS Java SDK :: Services :: IoT Events Data diff --git a/services/iotfleethub/pom.xml b/services/iotfleethub/pom.xml index ded35874aa98..1f36ab7d886a 100644 --- a/services/iotfleethub/pom.xml +++ b/services/iotfleethub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iotfleethub AWS Java SDK :: Services :: Io T Fleet Hub diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index 8c9cd6a6bc93..6d49370268ef 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iotjobsdataplane AWS Java SDK :: Services :: IoT Jobs Data Plane diff --git a/services/iotsecuretunneling/pom.xml b/services/iotsecuretunneling/pom.xml index ad8ef78a3275..6a88131ae79a 100644 --- a/services/iotsecuretunneling/pom.xml +++ b/services/iotsecuretunneling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iotsecuretunneling AWS Java SDK :: Services :: IoTSecureTunneling diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml index 5eaee124318c..dad43139f003 100644 --- a/services/iotsitewise/pom.xml +++ b/services/iotsitewise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iotsitewise AWS Java SDK :: Services :: Io T Site Wise diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index e9d4e9e6251d..2e34818c24ff 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iotthingsgraph AWS Java SDK :: Services :: IoTThingsGraph diff --git a/services/iotwireless/pom.xml b/services/iotwireless/pom.xml index 34af59ba0e34..a87cc96ec871 100644 --- a/services/iotwireless/pom.xml +++ b/services/iotwireless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 iotwireless AWS Java SDK :: Services :: IoT Wireless diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml index 20c038a27729..4a80fc0b37a7 100644 --- a/services/ivs/pom.xml +++ b/services/ivs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ivs AWS Java SDK :: Services :: Ivs diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index 1ef0d872612b..52c993e6448c 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 kafka AWS Java SDK :: Services :: Kafka diff --git a/services/kendra/pom.xml b/services/kendra/pom.xml index 7d2e5d868942..49ad81ebc15f 100644 --- a/services/kendra/pom.xml +++ b/services/kendra/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 kendra AWS Java SDK :: Services :: Kendra diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index 8609bb6a2cc1..821e6a5f5bda 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 kinesis AWS Java SDK :: Services :: Amazon Kinesis diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml index 9014c355d0a3..6b69dfdd3d16 100644 --- a/services/kinesisanalytics/pom.xml +++ b/services/kinesisanalytics/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 kinesisanalytics AWS Java SDK :: Services :: Amazon Kinesis Analytics diff --git a/services/kinesisanalyticsv2/pom.xml b/services/kinesisanalyticsv2/pom.xml index 61696a900d92..c10311709987 100644 --- a/services/kinesisanalyticsv2/pom.xml +++ b/services/kinesisanalyticsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 kinesisanalyticsv2 AWS Java SDK :: Services :: Kinesis Analytics V2 diff --git a/services/kinesisvideo/pom.xml b/services/kinesisvideo/pom.xml index 2e132620429f..5711f1ebbd93 100644 --- a/services/kinesisvideo/pom.xml +++ b/services/kinesisvideo/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 kinesisvideo diff --git a/services/kinesisvideoarchivedmedia/pom.xml b/services/kinesisvideoarchivedmedia/pom.xml index 1a978fad5f8a..9c1f30e4f5b7 100644 --- a/services/kinesisvideoarchivedmedia/pom.xml +++ b/services/kinesisvideoarchivedmedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 kinesisvideoarchivedmedia AWS Java SDK :: Services :: Kinesis Video Archived Media diff --git a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/service-2.json b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/service-2.json index b40ec7bbe345..564c813c3d3c 100644 --- a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/service-2.json +++ b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/service-2.json @@ -50,7 +50,7 @@ {"shape":"MissingCodecPrivateDataException"}, {"shape":"InvalidCodecPrivateDataException"} ], - "documentation":"

Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH:

The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_DASH_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

The following restrictions apply to MPEG-DASH sessions:

  • A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

  • A Kinesis video stream can have a maximum of ten active MPEG-DASH streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia connections on a Kinesis video stream does not count against this limit, and the number of active MPEG-DASH sessions does not count against the active GetMedia connection limit.

    The maximum limits for active HLS and MPEG-DASH streaming sessions are independent of each other.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" + "documentation":"

Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH:

The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_DASH_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).

    Don't share or store this token where an unauthorized entity can access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you use with your AWS credentials.

    The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

For restrictions that apply to MPEG-DASH sessions, see Kinesis Video Streams Limits.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" }, "GetHLSStreamingSessionURL":{ "name":"GetHLSStreamingSessionURL", @@ -70,7 +70,7 @@ {"shape":"MissingCodecPrivateDataException"}, {"shape":"InvalidCodecPrivateDataException"} ], - "documentation":"

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

    • GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.

      If the ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.

      Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.

The following restrictions apply to HLS sessions:

  • A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

  • A Kinesis video stream can have a maximum of ten active HLS streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia connections on a Kinesis video stream does not count against this limit, and the number of active HLS sessions does not count against the active GetMedia connection limit.

    The maximum limits for active HLS and MPEG-DASH streaming sessions are independent of each other.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" + "documentation":"

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

    • GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.

      If the ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.

      Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.

A streaming session URL must not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" }, "GetMediaForFragmentList":{ "name":"GetMediaForFragmentList", @@ -86,7 +86,7 @@ {"shape":"ClientLimitExceededException"}, {"shape":"NotAuthorizedException"} ], - "documentation":"

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.

The following limits apply when using the GetMediaForFragmentList API:

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" + "documentation":"

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.

For limits, see Kinesis Video Streams Limits.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" }, "ListFragments":{ "name":"ListFragments", @@ -111,7 +111,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Kinesis Video Streams has throttled the request because you have exceeded the limit of allowed client calls. Try making the call later.

", + "documentation":"

Kinesis Video Streams has throttled the request because you have exceeded a limit. Try making the call later. For information about limits, see Kinesis Video Streams Limits.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -149,14 +149,14 @@ "members":{ "StartTimestamp":{ "shape":"Timestamp", - "documentation":"

The starting timestamp in the range of timestamps for which to return fragments.

This value is inclusive. Fragments that start before the StartTimestamp and continue past it are included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" + "documentation":"

The starting timestamp in the range of timestamps for which to return fragments.

Only fragments that start exactly at or after StartTimestamp are included in the session. Fragments that start before StartTimestamp and continue past it aren't included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" }, "EndTimestamp":{ "shape":"Timestamp", - "documentation":"

The end of the timestamp range for the requested media.

This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value. If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" + "documentation":"

The end of the timestamp range for the requested media.

This value must be within 24 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value. If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" } }, - "documentation":"

The range of timestamps for which to return fragments.

The values in the ClipTimestampRange are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.

" + "documentation":"

The range of timestamps for which to return fragments.

" }, "ContainerFormat":{ "type":"string", @@ -206,6 +206,11 @@ "SERVER_TIMESTAMP" ] }, + "DASHMaxResults":{ + "type":"long", + "max":5000, + "min":1 + }, "DASHPlaybackMode":{ "type":"string", "enum":[ @@ -220,14 +225,14 @@ "members":{ "StartTimestamp":{ "shape":"Timestamp", - "documentation":"

The start of the timestamp range for the requested media.

If the DASHTimestampRange value is specified, the StartTimestamp value is required.

This value is inclusive. Fragments that start before the StartTimestamp and continue past it are included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" + "documentation":"

The start of the timestamp range for the requested media.

If the DASHTimestampRange value is specified, the StartTimestamp value is required.

Only fragments that start exactly at or after StartTimestamp are included in the session. Fragments that start before StartTimestamp and continue past it aren't included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" }, "EndTimestamp":{ "shape":"Timestamp", - "documentation":"

The end of the timestamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

The EndTimestamp value is required for ON_DEMAND mode, but optional for LIVE_REPLAY mode. If the EndTimestamp is not set for LIVE_REPLAY mode then the session will continue to include newly ingested fragments until the session expires.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" + "documentation":"

The end of the timestamp range for the requested media. This value must be within 24 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

The EndTimestamp value is required for ON_DEMAND mode, but optional for LIVE_REPLAY mode. If the EndTimestamp is not set for LIVE_REPLAY mode then the session will continue to include newly ingested fragments until the session expires.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" } }, - "documentation":"

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

The values in the DASHimestampRange are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.

" + "documentation":"

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

The values in DASHimestampRange are inclusive. Fragments that start exactly at or after the start time are included in the session. Fragments that start before the start time and continue past it are not included in the session.

" }, "ErrorMessage":{"type":"string"}, "Expires":{ @@ -349,7 +354,7 @@ }, "PlaybackMode":{ "shape":"DASHPlaybackMode", - "documentation":"

Whether to retrieve live, live replay, or archived, on-demand data.

Features of the three types of sessions include the following:

In all playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the MPEG-DASH manifest. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the MPEG-DASH manifest. This can lead to unexpected behavior in the media player.

The default is LIVE.

" + "documentation":"

Whether to retrieve live, live replay, or archived, on-demand data.

Features of the three types of sessions include the following:

In all playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the MPEG-DASH manifest. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the MPEG-DASH manifest. This can lead to unexpected behavior in the media player.

The default is LIVE.

" }, "DisplayFragmentTimestamp":{ "shape":"DASHDisplayFragmentTimestamp", @@ -368,7 +373,7 @@ "documentation":"

The time in seconds until the requested session expires. This value can be between 300 (5 minutes) and 43200 (12 hours).

When a session expires, no new calls to GetDashManifest, GetMP4InitFragment, or GetMP4MediaFragment can be made for that session.

The default is 300 (5 minutes).

" }, "MaxManifestFragmentResults":{ - "shape":"PageLimit", + "shape":"DASHMaxResults", "documentation":"

The maximum number of fragments that are returned in the MPEG-DASH manifest.

When the PlaybackMode is LIVE, the most recent fragments are returned up to this value. When the PlaybackMode is ON_DEMAND, the oldest fragments are returned, up to this maximum number.

When there are a higher number of fragments available in a live MPEG-DASH manifest, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live MPEG-DASH manifest have a minimum of 3 fragments and a maximum of 10 fragments.

The default is 5 fragments if PlaybackMode is LIVE or LIVE_REPLAY, and 1,000 if PlaybackMode is ON_DEMAND.

The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.

" } } @@ -395,7 +400,7 @@ }, "PlaybackMode":{ "shape":"HLSPlaybackMode", - "documentation":"

Whether to retrieve live, live replay, or archived, on-demand data.

Features of the three types of sessions include the following:

In all playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.

The default is LIVE.

" + "documentation":"

Whether to retrieve live, live replay, or archived, on-demand data.

Features of the three types of sessions include the following:

In all playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start timestamp, the fragment that has the largest fragment number (that is, the newest fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.

The default is LIVE.

" }, "HLSFragmentSelector":{ "shape":"HLSFragmentSelector", @@ -418,8 +423,8 @@ "documentation":"

The time in seconds until the requested session expires. This value can be between 300 (5 minutes) and 43200 (12 hours).

When a session expires, no new calls to GetHLSMasterPlaylist, GetHLSMediaPlaylist, GetMP4InitFragment, GetMP4MediaFragment, or GetTSFragment can be made for that session.

The default is 300 (5 minutes).

" }, "MaxMediaPlaylistFragmentResults":{ - "shape":"PageLimit", - "documentation":"

The maximum number of fragments that are returned in the HLS media playlists.

When the PlaybackMode is LIVE, the most recent fragments are returned up to this value. When the PlaybackMode is ON_DEMAND, the oldest fragments are returned, up to this maximum number.

When there are a higher number of fragments available in a live HLS media playlist, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live HLS media playlist have a minimum of 3 fragments and a maximum of 10 fragments.

The default is 5 fragments if PlaybackMode is LIVE or LIVE_REPLAY, and 1,000 if PlaybackMode is ON_DEMAND.

The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.

" + "shape":"HLSMaxResults", + "documentation":"

The maximum number of fragments that are returned in the HLS media playlists.

When the PlaybackMode is LIVE, the most recent fragments are returned up to this value. When the PlaybackMode is ON_DEMAND, the oldest fragments are returned, up to this maximum number.

When there are a higher number of fragments available in a live HLS media playlist, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live HLS media playlist have a minimum of 3 fragments and a maximum of 10 fragments.

The default is 5 fragments if PlaybackMode is LIVE or LIVE_REPLAY, and 1,000 if PlaybackMode is ON_DEMAND.

The maximum value of 5,000 fragments corresponds to more than 80 minutes of video on streams with 1-second fragments, and more than 13 hours of video on streams with 10-second fragments.

" } } }, @@ -502,6 +507,11 @@ "SERVER_TIMESTAMP" ] }, + "HLSMaxResults":{ + "type":"long", + "max":5000, + "min":1 + }, "HLSPlaybackMode":{ "type":"string", "enum":[ @@ -516,14 +526,14 @@ "members":{ "StartTimestamp":{ "shape":"Timestamp", - "documentation":"

The start of the timestamp range for the requested media.

If the HLSTimestampRange value is specified, the StartTimestamp value is required.

This value is inclusive. Fragments that start before the StartTimestamp and continue past it are included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" + "documentation":"

The start of the timestamp range for the requested media.

If the HLSTimestampRange value is specified, the StartTimestamp value is required.

Only fragments that start exactly at or after StartTimestamp are included in the session. Fragments that start before StartTimestamp and continue past it aren't included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" }, "EndTimestamp":{ "shape":"Timestamp", - "documentation":"

The end of the timestamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

The EndTimestamp value is required for ON_DEMAND mode, but optional for LIVE_REPLAY mode. If the EndTimestamp is not set for LIVE_REPLAY mode then the session will continue to include newly ingested fragments until the session expires.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" + "documentation":"

The end of the timestamp range for the requested media. This value must be within 24 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

The EndTimestamp value is required for ON_DEMAND mode, but optional for LIVE_REPLAY mode. If the EndTimestamp is not set for LIVE_REPLAY mode then the session will continue to include newly ingested fragments until the session expires.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" } }, - "documentation":"

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

The values in the HLSTimestampRange are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.

" + "documentation":"

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

" }, "InvalidArgumentException":{ "type":"structure", @@ -564,7 +574,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the stream from which to retrieve a fragment list. Specify either this parameter or the StreamName parameter.

" }, "MaxResults":{ - "shape":"PageLimit", + "shape":"ListFragmentsMaxResults", "documentation":"

The total number of fragments to return. If the total number of fragments available is more than the value specified in max-results, then a ListFragmentsOutput$NextToken is provided in the output that you can use to resume pagination.

" }, "NextToken":{ @@ -577,6 +587,11 @@ } } }, + "ListFragmentsMaxResults":{ + "type":"long", + "max":1000, + "min":1 + }, "ListFragmentsOutput":{ "type":"structure", "members":{ @@ -624,11 +639,6 @@ "error":{"httpStatusCode":401}, "exception":true }, - "PageLimit":{ - "type":"long", - "max":1000, - "min":1 - }, "Payload":{ "type":"blob", "streaming":true diff --git a/services/kinesisvideomedia/pom.xml b/services/kinesisvideomedia/pom.xml index e2c0911ebd23..d126e5117919 100644 --- a/services/kinesisvideomedia/pom.xml +++ b/services/kinesisvideomedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 kinesisvideomedia AWS Java SDK :: Services :: Kinesis Video Media diff --git a/services/kinesisvideosignaling/pom.xml b/services/kinesisvideosignaling/pom.xml index d9ee8f249e94..4b87379fca17 100644 --- a/services/kinesisvideosignaling/pom.xml +++ b/services/kinesisvideosignaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 kinesisvideosignaling AWS Java SDK :: Services :: Kinesis Video Signaling diff --git a/services/kms/pom.xml b/services/kms/pom.xml index c82909306731..10e6832bc70a 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 kms AWS Java SDK :: Services :: AWS KMS diff --git a/services/lakeformation/pom.xml b/services/lakeformation/pom.xml index dd7e44bb5164..b3a7966ff703 100644 --- a/services/lakeformation/pom.xml +++ b/services/lakeformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 lakeformation AWS Java SDK :: Services :: LakeFormation diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index 794306c57173..19f2baf50e9c 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 lambda AWS Java SDK :: Services :: AWS Lambda diff --git a/services/lambda/src/main/resources/codegen-resources/service-2.json b/services/lambda/src/main/resources/codegen-resources/service-2.json index 893a06423102..bb9557206b96 100644 --- a/services/lambda/src/main/resources/codegen-resources/service-2.json +++ b/services/lambda/src/main/resources/codegen-resources/service-2.json @@ -646,7 +646,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.

Set FunctionVersion to ALL to include all published versions of each function in addition to the unpublished version. To get more information about a function or version, use GetFunction.

" + "documentation":"

Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.

Set FunctionVersion to ALL to include all published versions of each function in addition to the unpublished version.

The ListFunctions action returns a subset of the FunctionConfiguration fields. To get the additional fields (State, StateReasonCode, StateReason, LastUpdateStatus, LastUpdateStatusReason, LastUpdateStatusReasonCode) for a function or version, use GetFunction.

" }, "ListFunctionsByCodeSigningConfig":{ "name":"ListFunctionsByCodeSigningConfig", @@ -3105,6 +3105,7 @@ }, "LayerPermissionAllowedAction":{ "type":"string", + "max":22, "pattern":"lambda:GetLayerVersion" }, "LayerPermissionAllowedPrincipal":{ @@ -3442,7 +3443,7 @@ }, "MaxItems":{ "shape":"MaxListItems", - "documentation":"

The maximum number of functions to return.

", + "documentation":"

The maximum number of functions to return in the response. Note that ListFunctions returns a maximum of 50 items in each response, even if you set the number higher.

", "location":"querystring", "locationName":"MaxItems" } @@ -3745,6 +3746,7 @@ }, "OrganizationId":{ "type":"string", + "max":34, "pattern":"o-[a-z0-9]{10,32}" }, "PackageType":{ @@ -3791,7 +3793,7 @@ }, "Principal":{ "type":"string", - "pattern":".*" + "pattern":"[^\\s]+" }, "ProvisionedConcurrencyConfigList":{ "type":"list", @@ -4353,6 +4355,7 @@ }, "SourceOwner":{ "type":"string", + "max":12, "pattern":"\\d{12}" }, "State":{ diff --git a/services/lexmodelbuilding/pom.xml b/services/lexmodelbuilding/pom.xml index 4b86840fe932..81bf127ef7c7 100644 --- a/services/lexmodelbuilding/pom.xml +++ b/services/lexmodelbuilding/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 lexmodelbuilding AWS Java SDK :: Services :: Amazon Lex Model Building diff --git a/services/lexmodelsv2/pom.xml b/services/lexmodelsv2/pom.xml index 79292f0f7991..dbaf0d613c36 100644 --- a/services/lexmodelsv2/pom.xml +++ b/services/lexmodelsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 lexmodelsv2 AWS Java SDK :: Services :: Lex Models V2 diff --git a/services/lexruntime/pom.xml b/services/lexruntime/pom.xml index dda0cabdf7e4..bf5755603d57 100644 --- a/services/lexruntime/pom.xml +++ b/services/lexruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 lexruntime AWS Java SDK :: Services :: Amazon Lex Runtime diff --git a/services/lexruntimev2/pom.xml b/services/lexruntimev2/pom.xml index 1dadadf311f2..b5f055a42627 100644 --- a/services/lexruntimev2/pom.xml +++ b/services/lexruntimev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 lexruntimev2 AWS Java SDK :: Services :: Lex Runtime V2 diff --git a/services/licensemanager/pom.xml b/services/licensemanager/pom.xml index b74d95444ef1..26cbcf1d2134 100644 --- a/services/licensemanager/pom.xml +++ b/services/licensemanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 licensemanager AWS Java SDK :: Services :: License Manager diff --git a/services/lightsail/pom.xml b/services/lightsail/pom.xml index 26a2f9b8a189..abcc2b74dc9e 100644 --- a/services/lightsail/pom.xml +++ b/services/lightsail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 lightsail AWS Java SDK :: Services :: Amazon Lightsail diff --git a/services/location/pom.xml b/services/location/pom.xml index 4ec479576f32..8ae0491879fd 100644 --- a/services/location/pom.xml +++ b/services/location/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 location AWS Java SDK :: Services :: Location diff --git a/services/lookoutvision/pom.xml b/services/lookoutvision/pom.xml index fbd8e4187969..d38b7d533bac 100644 --- a/services/lookoutvision/pom.xml +++ b/services/lookoutvision/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 lookoutvision AWS Java SDK :: Services :: Lookout Vision diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index efdb541bb42e..c83d583bb89f 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 machinelearning AWS Java SDK :: Services :: Amazon Machine Learning diff --git a/services/macie/pom.xml b/services/macie/pom.xml index 7b5007ffce54..8caa49c44baf 100644 --- a/services/macie/pom.xml +++ b/services/macie/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 macie AWS Java SDK :: Services :: Macie diff --git a/services/macie2/pom.xml b/services/macie2/pom.xml index eca3a8a78e50..07f6a2ba1a0e 100644 --- a/services/macie2/pom.xml +++ b/services/macie2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 macie2 AWS Java SDK :: Services :: Macie2 diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index 1ba8f9c09a0f..4ca621001be7 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 managedblockchain AWS Java SDK :: Services :: ManagedBlockchain diff --git a/services/marketplacecatalog/pom.xml b/services/marketplacecatalog/pom.xml index c44039818d46..cc561c42f6a7 100644 --- a/services/marketplacecatalog/pom.xml +++ b/services/marketplacecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 marketplacecatalog AWS Java SDK :: Services :: Marketplace Catalog diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index 1e1a0d0a9dca..bca16b7b3254 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 marketplacecommerceanalytics AWS Java SDK :: Services :: AWS Marketplace Commerce Analytics diff --git a/services/marketplaceentitlement/pom.xml b/services/marketplaceentitlement/pom.xml index 1ebc8a3fdac1..4d8cf2f141f6 100644 --- a/services/marketplaceentitlement/pom.xml +++ b/services/marketplaceentitlement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 marketplaceentitlement AWS Java SDK :: Services :: AWS Marketplace Entitlement diff --git a/services/marketplacemetering/pom.xml b/services/marketplacemetering/pom.xml index dcfde92546ab..0fb98da69b1d 100644 --- a/services/marketplacemetering/pom.xml +++ b/services/marketplacemetering/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 marketplacemetering AWS Java SDK :: Services :: AWS Marketplace Metering Service diff --git a/services/mediaconnect/pom.xml b/services/mediaconnect/pom.xml index 6b0acfece261..7facb6e18aa4 100644 --- a/services/mediaconnect/pom.xml +++ b/services/mediaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 mediaconnect AWS Java SDK :: Services :: MediaConnect diff --git a/services/mediaconvert/pom.xml b/services/mediaconvert/pom.xml index 850ce6727d18..a36b6470a18c 100644 --- a/services/mediaconvert/pom.xml +++ b/services/mediaconvert/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 mediaconvert diff --git a/services/medialive/pom.xml b/services/medialive/pom.xml index a145ed6cba35..c94ed93dfc7b 100644 --- a/services/medialive/pom.xml +++ b/services/medialive/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 medialive diff --git a/services/mediapackage/pom.xml b/services/mediapackage/pom.xml index 3f94607e6a2c..43fd39430d39 100644 --- a/services/mediapackage/pom.xml +++ b/services/mediapackage/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 mediapackage diff --git a/services/mediapackagevod/pom.xml b/services/mediapackagevod/pom.xml index b31c852683b9..0841a20cd1f3 100644 --- a/services/mediapackagevod/pom.xml +++ b/services/mediapackagevod/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 mediapackagevod AWS Java SDK :: Services :: MediaPackage Vod diff --git a/services/mediastore/pom.xml b/services/mediastore/pom.xml index 933aaf9aaf04..d017248d7160 100644 --- a/services/mediastore/pom.xml +++ b/services/mediastore/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 mediastore diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index a42893bb8747..0ad913d81cc1 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 mediastoredata diff --git a/services/mediatailor/pom.xml b/services/mediatailor/pom.xml index fa88d57c8972..f815512c694c 100644 --- a/services/mediatailor/pom.xml +++ b/services/mediatailor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 mediatailor AWS Java SDK :: Services :: MediaTailor diff --git a/services/migrationhub/pom.xml b/services/migrationhub/pom.xml index 148f9a2be19b..a08a722b090b 100644 --- a/services/migrationhub/pom.xml +++ b/services/migrationhub/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 migrationhub diff --git a/services/migrationhubconfig/pom.xml b/services/migrationhubconfig/pom.xml index 1ab4f7a80bd6..8488918cdf02 100644 --- a/services/migrationhubconfig/pom.xml +++ b/services/migrationhubconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 migrationhubconfig AWS Java SDK :: Services :: MigrationHub Config diff --git a/services/mobile/pom.xml b/services/mobile/pom.xml index 95e367a8ce32..ae91539a7efc 100644 --- a/services/mobile/pom.xml +++ b/services/mobile/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 mobile diff --git a/services/mq/pom.xml b/services/mq/pom.xml index 8815d016ba05..af10f303fc7a 100644 --- a/services/mq/pom.xml +++ b/services/mq/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 mq diff --git a/services/mturk/pom.xml b/services/mturk/pom.xml index 0b0d20340e49..3dac65fd0c57 100644 --- a/services/mturk/pom.xml +++ b/services/mturk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 mturk AWS Java SDK :: Services :: Amazon Mechanical Turk Requester diff --git a/services/mwaa/pom.xml b/services/mwaa/pom.xml index 97a4d00231c9..1f85ef9adb57 100644 --- a/services/mwaa/pom.xml +++ b/services/mwaa/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 mwaa AWS Java SDK :: Services :: MWAA diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index 5cb370eac2d7..4fc022c53b59 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 neptune AWS Java SDK :: Services :: Neptune diff --git a/services/networkfirewall/pom.xml b/services/networkfirewall/pom.xml index 74255d730a56..01fd5427d131 100644 --- a/services/networkfirewall/pom.xml +++ b/services/networkfirewall/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 networkfirewall AWS Java SDK :: Services :: Network Firewall diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml index 9d590f0f752a..e022fbf790ef 100644 --- a/services/networkmanager/pom.xml +++ b/services/networkmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 networkmanager AWS Java SDK :: Services :: NetworkManager diff --git a/services/opsworks/pom.xml b/services/opsworks/pom.xml index 11d1f553ad19..a358cc52a329 100644 --- a/services/opsworks/pom.xml +++ b/services/opsworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 opsworks AWS Java SDK :: Services :: AWS OpsWorks diff --git a/services/opsworkscm/pom.xml b/services/opsworkscm/pom.xml index 5f45d8fe2cf1..efa6cb6cb923 100644 --- a/services/opsworkscm/pom.xml +++ b/services/opsworkscm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 opsworkscm AWS Java SDK :: Services :: AWS OpsWorks for Chef Automate diff --git a/services/organizations/pom.xml b/services/organizations/pom.xml index 45941cddc44a..a1057d0d5fb4 100644 --- a/services/organizations/pom.xml +++ b/services/organizations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 organizations AWS Java SDK :: Services :: AWS Organizations diff --git a/services/outposts/pom.xml b/services/outposts/pom.xml index d0d402f46968..969569600978 100644 --- a/services/outposts/pom.xml +++ b/services/outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 outposts AWS Java SDK :: Services :: Outposts diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index 8287a75c49ba..e73614aa17fb 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 personalize AWS Java SDK :: Services :: Personalize diff --git a/services/personalizeevents/pom.xml b/services/personalizeevents/pom.xml index 5d8147558546..471a272f8f2f 100644 --- a/services/personalizeevents/pom.xml +++ b/services/personalizeevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 personalizeevents AWS Java SDK :: Services :: Personalize Events diff --git a/services/personalizeruntime/pom.xml b/services/personalizeruntime/pom.xml index 9d16c72ee827..a7971d532429 100644 --- a/services/personalizeruntime/pom.xml +++ b/services/personalizeruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 personalizeruntime AWS Java SDK :: Services :: Personalize Runtime diff --git a/services/pi/pom.xml b/services/pi/pom.xml index 59166c7aae46..c42db7acf586 100644 --- a/services/pi/pom.xml +++ b/services/pi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 pi AWS Java SDK :: Services :: PI diff --git a/services/pinpoint/pom.xml b/services/pinpoint/pom.xml index f1731c490333..c1e103b0dee7 100644 --- a/services/pinpoint/pom.xml +++ b/services/pinpoint/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 pinpoint AWS Java SDK :: Services :: Amazon Pinpoint diff --git a/services/pinpointemail/pom.xml b/services/pinpointemail/pom.xml index 75b241f601f6..4b55ee9ca184 100644 --- a/services/pinpointemail/pom.xml +++ b/services/pinpointemail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 pinpointemail AWS Java SDK :: Services :: Pinpoint Email diff --git a/services/pinpointsmsvoice/pom.xml b/services/pinpointsmsvoice/pom.xml index 6e2f493ac649..5e8dd27124f0 100644 --- a/services/pinpointsmsvoice/pom.xml +++ b/services/pinpointsmsvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 pinpointsmsvoice AWS Java SDK :: Services :: Pinpoint SMS Voice diff --git a/services/polly/pom.xml b/services/polly/pom.xml index 54fcfba45b22..847039e957c7 100644 --- a/services/polly/pom.xml +++ b/services/polly/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 polly AWS Java SDK :: Services :: Amazon Polly diff --git a/services/pom.xml b/services/pom.xml index 22e236ea04cc..376b8edb7b05 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 services AWS Java SDK :: Services diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index 377a32409bc4..1e9629500a77 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 pricing diff --git a/services/qldb/pom.xml b/services/qldb/pom.xml index 2089fbec4a53..0ce071e32f09 100644 --- a/services/qldb/pom.xml +++ b/services/qldb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 qldb AWS Java SDK :: Services :: QLDB diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml index ee1352397928..312645debf92 100644 --- a/services/qldbsession/pom.xml +++ b/services/qldbsession/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 qldbsession AWS Java SDK :: Services :: QLDB Session diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index d18664e45f70..3e62f1d67f08 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 quicksight AWS Java SDK :: Services :: QuickSight diff --git a/services/ram/pom.xml b/services/ram/pom.xml index a45d5f1532c4..3cdf35d14882 100644 --- a/services/ram/pom.xml +++ b/services/ram/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ram AWS Java SDK :: Services :: RAM diff --git a/services/rds/pom.xml b/services/rds/pom.xml index 4c909ac80325..13a5e35b367e 100644 --- a/services/rds/pom.xml +++ b/services/rds/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 rds AWS Java SDK :: Services :: Amazon RDS diff --git a/services/rdsdata/pom.xml b/services/rdsdata/pom.xml index 84bc416aa1e1..200900067ffe 100644 --- a/services/rdsdata/pom.xml +++ b/services/rdsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 rdsdata AWS Java SDK :: Services :: RDS Data diff --git a/services/redshift/pom.xml b/services/redshift/pom.xml index 23d49a9064b5..c6cb89237c02 100644 --- a/services/redshift/pom.xml +++ b/services/redshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 redshift AWS Java SDK :: Services :: Amazon Redshift diff --git a/services/redshiftdata/pom.xml b/services/redshiftdata/pom.xml index cc3bc7f5d32f..cf96212a6308 100644 --- a/services/redshiftdata/pom.xml +++ b/services/redshiftdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 redshiftdata AWS Java SDK :: Services :: Redshift Data diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index 69751eed3892..91f015561d6f 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 rekognition AWS Java SDK :: Services :: Amazon Rekognition diff --git a/services/resourcegroups/pom.xml b/services/resourcegroups/pom.xml index 05bdca5b3e6b..4b8059ec9ae2 100644 --- a/services/resourcegroups/pom.xml +++ b/services/resourcegroups/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 resourcegroups diff --git a/services/resourcegroupstaggingapi/pom.xml b/services/resourcegroupstaggingapi/pom.xml index 1bf69b068c16..4c0566c4e643 100644 --- a/services/resourcegroupstaggingapi/pom.xml +++ b/services/resourcegroupstaggingapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 resourcegroupstaggingapi AWS Java SDK :: Services :: AWS Resource Groups Tagging API diff --git a/services/robomaker/pom.xml b/services/robomaker/pom.xml index 2f739c86298f..7b8f44f6a702 100644 --- a/services/robomaker/pom.xml +++ b/services/robomaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 robomaker AWS Java SDK :: Services :: RoboMaker diff --git a/services/route53/pom.xml b/services/route53/pom.xml index 8c87448f36f5..161493147302 100644 --- a/services/route53/pom.xml +++ b/services/route53/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 route53 AWS Java SDK :: Services :: Amazon Route53 diff --git a/services/route53domains/pom.xml b/services/route53domains/pom.xml index 5f8ecd423deb..97ff05e24a9f 100644 --- a/services/route53domains/pom.xml +++ b/services/route53domains/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 route53domains AWS Java SDK :: Services :: Amazon Route53 Domains diff --git a/services/route53resolver/pom.xml b/services/route53resolver/pom.xml index 02c20d876b26..484671021aa3 100644 --- a/services/route53resolver/pom.xml +++ b/services/route53resolver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 route53resolver AWS Java SDK :: Services :: Route53Resolver diff --git a/services/s3/pom.xml b/services/s3/pom.xml index de157eed6609..686ec14a0993 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 s3 AWS Java SDK :: Services :: Amazon S3 diff --git a/services/s3/src/main/resources/codegen-resources/service-2.json b/services/s3/src/main/resources/codegen-resources/service-2.json index b6a8fc351310..5915716c70e7 100644 --- a/services/s3/src/main/resources/codegen-resources/service-2.json +++ b/services/s3/src/main/resources/codegen-resources/service-2.json @@ -26,7 +26,7 @@ {"shape":"NoSuchUpload"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html", - "documentation":"

This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.

To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts operation and ensure that the parts list is empty.

For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

The following operations are related to AbortMultipartUpload:

" + "documentation":"

This action aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.

To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts action and ensure that the parts list is empty.

For information about permissions required to use the multipart upload, see Multipart Upload and Permissions.

The following operations are related to AbortMultipartUpload:

" }, "CompleteMultipartUpload":{ "name":"CompleteMultipartUpload", @@ -37,7 +37,7 @@ "input":{"shape":"CompleteMultipartUploadRequest"}, "output":{"shape":"CompleteMultipartUploadOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadComplete.html", - "documentation":"

Completes a multipart upload by assembling previously uploaded parts.

You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This operation concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag value, returned after that part was uploaded.

Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.

Note that if CompleteMultipartUpload fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.

For more information about multipart uploads, see Uploading Objects Using Multipart Upload.

For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

CompleteMultipartUpload has the following special errors:

The following operations are related to CompleteMultipartUpload:

" + "documentation":"

Completes a multipart upload by assembling previously uploaded parts.

You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this action to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This action concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag value, returned after that part was uploaded.

Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.

Note that if CompleteMultipartUpload fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.

For more information about multipart uploads, see Uploading Objects Using Multipart Upload.

For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions.

CompleteMultipartUpload has the following special errors:

The following operations are related to CompleteMultipartUpload:

" }, "CopyObject":{ "name":"CopyObject", @@ -51,7 +51,7 @@ {"shape":"ObjectNotInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.

All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.

If the copy is successful, you receive a response with information about the copied object.

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Metadata

When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

x-amz-copy-source-if Headers

To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

Server-side encryption

When you perform a CopyObject operation, you can optionally use the appropriate encryption-related headers to encrypt the object using server-side encryption with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption.

If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

Access Control List (ACL)-Specific Request Headers

When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

You can use the CopyObject operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

By default, x-amz-copy-source identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.

The following operations are related to CopyObject:

For more information, see Copying Objects.

", + "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.

All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.

If the copy is successful, you receive a response with information about the copied object.

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Metadata

When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

x-amz-copy-source-if Headers

To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

Server-side encryption

When you perform a CopyObject operation, you can optionally use the appropriate encryption-related headers to encrypt the object using server-side encryption with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption.

If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service User Guide.

Access Control List (ACL)-Specific Request Headers

When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

You can use the CopyObject action to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

By default, x-amz-copy-source identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.

The following operations are related to CopyObject:

For more information, see Copying Objects.

", "alias":"PutObjectCopy" }, "CreateBucket":{ @@ -79,7 +79,7 @@ "input":{"shape":"CreateMultipartUploadRequest"}, "output":{"shape":"CreateMultipartUploadOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html", - "documentation":"

This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.

For more information about multipart uploads, see Multipart Upload Overview.

If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

For information about the permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).

After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.

You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload.

To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, and kms:DescribeKey actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.

If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.

For more information, see Protecting Data Using Server-Side Encryption.

Access Permissions

When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    • x-amz-server-side-encryption-customer-algorithm

    • x-amz-server-side-encryption-customer-key

    • x-amz-server-side-encryption-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    • emailAddress – if the value specified is the email address of an AWS account

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • South America (SĂŁo Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

    For example, the following x-amz-grant-read header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:

    x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"

The following operations are related to CreateMultipartUpload:

", + "documentation":"

This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.

For more information about multipart uploads, see Multipart Upload Overview.

If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

For information about the permissions required to use the multipart upload API, see Multipart Upload and Permissions.

For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).

After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.

You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload.

To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, and kms:DescribeKey actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.

If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.

For more information, see Protecting Data Using Server-Side Encryption.

Access Permissions

When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    • x-amz-server-side-encryption-customer-algorithm

    • x-amz-server-side-encryption-customer-key

    • x-amz-server-side-encryption-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    • emailAddress – if the value specified is the email address of an AWS account

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • South America (SĂŁo Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

    For example, the following x-amz-grant-read header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:

    x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"

The following operations are related to CreateMultipartUpload:

", "alias":"InitiateMultipartUpload" }, "DeleteBucket":{ @@ -112,7 +112,7 @@ }, "input":{"shape":"DeleteBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html", - "documentation":"

Deletes the cors configuration information set for the bucket.

To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

For information about cors, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

Related Resources:

" + "documentation":"

Deletes the cors configuration information set for the bucket.

To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

For information about cors, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service User Guide.

Related Resources:

" }, "DeleteBucketEncryption":{ "name":"DeleteBucketEncryption", @@ -122,7 +122,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketEncryptionRequest"}, - "documentation":"

This implementation of the DELETE operation removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

" + "documentation":"

This implementation of the DELETE action removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service User Guide.

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.

Related Resources

" }, "DeleteBucketIntelligentTieringConfiguration":{ "name":"DeleteBucketIntelligentTieringConfiguration", @@ -184,7 +184,7 @@ }, "input":{"shape":"DeleteBucketPolicyRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html", - "documentation":"

This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the DeleteBucketPolicy permissions on the specified bucket and belong to the bucket owner's account to use this operation.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and UserPolicies.

The following operations are related to DeleteBucketPolicy

" + "documentation":"

This implementation of the DELETE action uses the policy subresource to delete the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the DeleteBucketPolicy permissions on the specified bucket and belong to the bucket owner's account to use this operation.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and UserPolicies.

The following operations are related to DeleteBucketPolicy

" }, "DeleteBucketReplication":{ "name":"DeleteBucketReplication", @@ -194,7 +194,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketReplicationRequest"}, - "documentation":"

Deletes the replication configuration from the bucket.

To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

It can take a while for the deletion of a replication configuration to fully propagate.

For information about replication configuration, see Replication in the Amazon S3 Developer Guide.

The following operations are related to DeleteBucketReplication:

" + "documentation":"

Deletes the replication configuration from the bucket.

To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

It can take a while for the deletion of a replication configuration to fully propagate.

For information about replication configuration, see Replication in the Amazon S3 Developer Guide.

The following operations are related to DeleteBucketReplication:

" }, "DeleteBucketTagging":{ "name":"DeleteBucketTagging", @@ -216,7 +216,7 @@ }, "input":{"shape":"DeleteBucketWebsiteRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html", - "documentation":"

This operation removes the website configuration for a bucket. Amazon S3 returns a 200 OK response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 response if the bucket specified in the request does not exist.

This DELETE operation requires the S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite permission.

For more information about hosting websites, see Hosting Websites on Amazon S3.

The following operations are related to DeleteBucketWebsite:

" + "documentation":"

This action removes the website configuration for a bucket. Amazon S3 returns a 200 OK response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 response if the bucket specified in the request does not exist.

This DELETE action requires the S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite permission.

For more information about hosting websites, see Hosting Websites on Amazon S3.

The following operations are related to DeleteBucketWebsite:

" }, "DeleteObject":{ "name":"DeleteObject", @@ -228,7 +228,7 @@ "input":{"shape":"DeleteObjectRequest"}, "output":{"shape":"DeleteObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectDELETE.html", - "documentation":"

Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.

To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, x-amz-delete-marker, to true.

If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS.

For more information about MFA Delete, see Using MFA Delete. To see sample requests that use versioning, see Sample Request.

You can delete objects by explicitly calling the DELETE Object API or configure its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions.

The following operation is related to DeleteObject:

" + "documentation":"

Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.

To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, x-amz-delete-marker, to true.

If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS.

For more information about MFA Delete, see Using MFA Delete. To see sample requests that use versioning, see Sample Request.

You can delete objects by explicitly calling DELETE Object or configure its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions.

The following action is related to DeleteObject:

" }, "DeleteObjectTagging":{ "name":"DeleteObjectTagging", @@ -250,7 +250,7 @@ "input":{"shape":"DeleteObjectsRequest"}, "output":{"shape":"DeleteObjectsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html", - "documentation":"

This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.

The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion, the operation does not return any information about the delete in the response body.

When performing this operation on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete.

Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.

The following operations are related to DeleteObjects:

", + "documentation":"

This action enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this action provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete action and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.

The action supports two modes for the response: verbose and quiet. By default, the action uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete action encountered an error. For a successful deletion, the action does not return any information about the delete in the response body.

When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete.

Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.

The following operations are related to DeleteObjects:

", "alias":"DeleteMultipleObjects", "httpChecksumRequired":true }, @@ -272,7 +272,7 @@ }, "input":{"shape":"GetBucketAccelerateConfigurationRequest"}, "output":{"shape":"GetBucketAccelerateConfigurationOutput"}, - "documentation":"

This implementation of the GET operation uses the accelerate subresource to return the Transfer Acceleration state of a bucket, which is either Enabled or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.

To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

You set the Transfer Acceleration state of an existing bucket to Enabled or Suspended by using the PutBucketAccelerateConfiguration operation.

A GET accelerate request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.

For more information about transfer acceleration, see Transfer Acceleration in the Amazon Simple Storage Service Developer Guide.

Related Resources

" + "documentation":"

This implementation of the GET action uses the accelerate subresource to return the Transfer Acceleration state of a bucket, which is either Enabled or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.

To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.

You set the Transfer Acceleration state of an existing bucket to Enabled or Suspended by using the PutBucketAccelerateConfiguration operation.

A GET accelerate request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.

For more information about transfer acceleration, see Transfer Acceleration in the Amazon Simple Storage Service User Guide.

Related Resources

" }, "GetBucketAcl":{ "name":"GetBucketAcl", @@ -283,7 +283,7 @@ "input":{"shape":"GetBucketAclRequest"}, "output":{"shape":"GetBucketAclOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETacl.html", - "documentation":"

This implementation of the GET operation uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

Related Resources

" + "documentation":"

This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

Related Resources

" }, "GetBucketAnalyticsConfiguration":{ "name":"GetBucketAnalyticsConfiguration", @@ -293,7 +293,7 @@ }, "input":{"shape":"GetBucketAnalyticsConfigurationRequest"}, "output":{"shape":"GetBucketAnalyticsConfigurationOutput"}, - "documentation":"

This implementation of the GET operation returns an analytics configuration (identified by the analytics configuration ID) from the bucket.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon Simple Storage Service Developer Guide.

Related Resources

" + "documentation":"

This implementation of the GET action returns an analytics configuration (identified by the analytics configuration ID) from the bucket.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon Simple Storage Service User Guide.

Related Resources

" }, "GetBucketCors":{ "name":"GetBucketCors", @@ -356,7 +356,7 @@ }, "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, "output":{"shape":"GetBucketLifecycleConfigurationOutput"}, - "documentation":"

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier API description, see GetBucketLifecycle.

Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

GetBucketLifecycleConfiguration has the following special error:

The following operations are related to GetBucketLifecycleConfiguration:

" + "documentation":"

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier action, see GetBucketLifecycle.

Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

GetBucketLifecycleConfiguration has the following special error:

The following operations are related to GetBucketLifecycleConfiguration:

" }, "GetBucketLocation":{ "name":"GetBucketLocation", @@ -410,7 +410,7 @@ }, "input":{"shape":"GetBucketNotificationConfigurationRequest"}, "output":{"shape":"NotificationConfiguration"}, - "documentation":"

Returns the notification configuration of a bucket.

If notifications are not enabled on the bucket, the operation returns an empty NotificationConfiguration element.

By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification permission.

For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.

The following operation is related to GetBucketNotification:

" + "documentation":"

Returns the notification configuration of a bucket.

If notifications are not enabled on the bucket, the action returns an empty NotificationConfiguration element.

By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification permission.

For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.

The following action is related to GetBucketNotification:

" }, "GetBucketOwnershipControls":{ "name":"GetBucketOwnershipControls", @@ -431,7 +431,7 @@ "input":{"shape":"GetBucketPolicyRequest"}, "output":{"shape":"GetBucketPolicyOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETpolicy.html", - "documentation":"

Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

The following operation is related to GetBucketPolicy:

" + "documentation":"

Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

The following action is related to GetBucketPolicy:

" }, "GetBucketPolicyStatus":{ "name":"GetBucketPolicyStatus", @@ -451,7 +451,7 @@ }, "input":{"shape":"GetBucketReplicationRequest"}, "output":{"shape":"GetBucketReplicationOutput"}, - "documentation":"

Returns the replication configuration of a bucket.

It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.

For information about replication configuration, see Replication in the Amazon Simple Storage Service Developer Guide.

This operation requires permissions for the s3:GetReplicationConfiguration action. For more information about permissions, see Using Bucket Policies and User Policies.

If you include the Filter element in a replication configuration, you must also include the DeleteMarkerReplication and Priority elements. The response also returns those elements.

For information about GetBucketReplication errors, see List of replication-related error codes

The following operations are related to GetBucketReplication:

" + "documentation":"

Returns the replication configuration of a bucket.

It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.

For information about replication configuration, see Replication in the Amazon Simple Storage Service User Guide.

This action requires permissions for the s3:GetReplicationConfiguration action. For more information about permissions, see Using Bucket Policies and User Policies.

If you include the Filter element in a replication configuration, you must also include the DeleteMarkerReplication and Priority elements. The response also returns those elements.

For information about GetBucketReplication errors, see List of replication-related error codes

The following operations are related to GetBucketReplication:

" }, "GetBucketRequestPayment":{ "name":"GetBucketRequestPayment", @@ -495,7 +495,7 @@ "input":{"shape":"GetBucketWebsiteRequest"}, "output":{"shape":"GetBucketWebsiteOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETwebsite.html", - "documentation":"

Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.

This GET operation requires the S3:GetBucketWebsite permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite permission.

The following operations are related to DeleteBucketWebsite:

" + "documentation":"

Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.

This GET action requires the S3:GetBucketWebsite permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite permission.

The following operations are related to DeleteBucketWebsite:

" }, "GetObject":{ "name":"GetObject", @@ -510,7 +510,7 @@ {"shape":"InvalidObjectState"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html", - "documentation":"

Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectStateError error. For information about restoring archived objects, see Restoring Archived Objects.

Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging action), the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

Versioning

By default, the GET operation returns the current version of an object. To return a different version, use the versionId subresource.

If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

For more information about versioning, see PutBucketVersioning.

Overriding Response Header Values

There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

Additional Considerations about Request Headers

If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

For more information about conditional requests, see RFC 7232.

The following operations are related to GetObject:

" + "documentation":"

Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectStateError error. For information about restoring archived objects, see Restoring Archived Objects.

Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging action), the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

Versioning

By default, the GET action returns the current version of an object. To return a different version, use the versionId subresource.

If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

For more information about versioning, see PutBucketVersioning.

Overriding Response Header Values

There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

Additional Considerations about Request Headers

If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

For more information about conditional requests, see RFC 7232.

The following operations are related to GetObject:

" }, "GetObjectAcl":{ "name":"GetObjectAcl", @@ -564,7 +564,7 @@ }, "input":{"shape":"GetObjectTaggingRequest"}, "output":{"shape":"GetObjectTaggingOutput"}, - "documentation":"

Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.

To use this operation, you must have permission to perform the s3:GetObjectTagging action. By default, the GET operation returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging action.

By default, the bucket owner has this permission and can grant this permission to others.

For information about the Amazon S3 object tagging feature, see Object Tagging.

The following operation is related to GetObjectTagging:

" + "documentation":"

Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.

To use this operation, you must have permission to perform the s3:GetObjectTagging action. By default, the GET action returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging action.

By default, the bucket owner has this permission and can grant this permission to others.

For information about the Amazon S3 object tagging feature, see Object Tagging.

The following action is related to GetObjectTagging:

" }, "GetObjectTorrent":{ "name":"GetObjectTorrent", @@ -575,7 +575,7 @@ "input":{"shape":"GetObjectTorrentRequest"}, "output":{"shape":"GetObjectTorrentOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETtorrent.html", - "documentation":"

Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Using BitTorrent with Amazon S3.

You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.

To use GET, you must have READ access to the object.

This action is not supported by Amazon S3 on Outposts.

The following operation is related to GetObjectTorrent:

" + "documentation":"

Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Using BitTorrent with Amazon S3.

You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.

To use GET, you must have READ access to the object.

This action is not supported by Amazon S3 on Outposts.

The following action is related to GetObjectTorrent:

" }, "GetPublicAccessBlock":{ "name":"GetPublicAccessBlock", @@ -598,7 +598,7 @@ {"shape":"NoSuchBucket"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketHEAD.html", - "documentation":"

This operation is useful to determine if a bucket exists and you have permission to access it. The operation returns a 200 OK if the bucket exists and you have permission to access it.

If the bucket does not exist or you do not have permission to access it, the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A message body is not included, so you cannot determine the exception beyond these error codes.

To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

" + "documentation":"

This action is useful to determine if a bucket exists and you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission to access it.

If the bucket does not exist or you do not have permission to access it, the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A message body is not included, so you cannot determine the exception beyond these error codes.

To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

" }, "HeadObject":{ "name":"HeadObject", @@ -612,7 +612,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html", - "documentation":"

The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic 404 Not Found or 403 Forbidden code. It is not possible to retrieve the exact exception beyond these error codes.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

  • Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

  • The last modified property in this case is the creation date of the object.

Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

Consider the following when using request headers:

For more information about conditional requests, see RFC 7232.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

The following operation is related to HeadObject:

" + "documentation":"

The HEAD action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

A HEAD request has the same options as a GET action on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic 404 Not Found or 403 Forbidden code. It is not possible to retrieve the exact exception beyond these error codes.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

  • Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

  • The last modified property in this case is the creation date of the object.

Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

Consider the following when using request headers:

For more information about conditional requests, see RFC 7232.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

The following action is related to HeadObject:

" }, "ListBucketAnalyticsConfigurations":{ "name":"ListBucketAnalyticsConfigurations", @@ -622,7 +622,7 @@ }, "input":{"shape":"ListBucketAnalyticsConfigurationsRequest"}, "output":{"shape":"ListBucketAnalyticsConfigurationsOutput"}, - "documentation":"

Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This operation supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there will be a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

The following operations are related to ListBucketAnalyticsConfigurations:

" + "documentation":"

Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This action supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there will be a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

The following operations are related to ListBucketAnalyticsConfigurations:

" }, "ListBucketIntelligentTieringConfigurations":{ "name":"ListBucketIntelligentTieringConfigurations", @@ -642,7 +642,7 @@ }, "input":{"shape":"ListBucketInventoryConfigurationsRequest"}, "output":{"shape":"ListBucketInventoryConfigurationsOutput"}, - "documentation":"

Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory

The following operations are related to ListBucketInventoryConfigurations:

" + "documentation":"

Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory

The following operations are related to ListBucketInventoryConfigurations:

" }, "ListBucketMetricsConfigurations":{ "name":"ListBucketMetricsConfigurations", @@ -652,7 +652,7 @@ }, "input":{"shape":"ListBucketMetricsConfigurationsRequest"}, "output":{"shape":"ListBucketMetricsConfigurationsOutput"}, - "documentation":"

Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.

This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to ListBucketMetricsConfigurations:

" + "documentation":"

Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.

This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to ListBucketMetricsConfigurations:

" }, "ListBuckets":{ "name":"ListBuckets", @@ -674,7 +674,7 @@ "input":{"shape":"ListMultipartUploadsRequest"}, "output":{"shape":"ListMultipartUploadsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html", - "documentation":"

This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.

This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated element with the value true. To list the additional multipart uploads, use the key-marker and upload-id-marker request parameters.

In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.

For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

The following operations are related to ListMultipartUploads:

" + "documentation":"

This action lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.

This action returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated element with the value true. To list the additional multipart uploads, use the key-marker and upload-id-marker request parameters.

In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.

For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

For information on permissions required to use the multipart upload API, see Multipart Upload and Permissions.

The following operations are related to ListMultipartUploads:

" }, "ListObjectVersions":{ "name":"ListObjectVersions", @@ -700,7 +700,7 @@ {"shape":"NoSuchBucket"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html", - "documentation":"

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

This API has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

The following operations are related to ListObjects:

", + "documentation":"

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

The following operations are related to ListObjects:

", "alias":"GetBucket" }, "ListObjectsV2":{ @@ -714,7 +714,7 @@ "errors":[ {"shape":"NoSuchBucket"} ], - "documentation":"

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list.

To use this operation, you must have READ access to the bucket.

To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

This section describes the latest revision of the API. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.

To get a list of your buckets, see ListBuckets.

The following operations are related to ListObjectsV2:

" + "documentation":"

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list.

To use this operation, you must have READ access to the bucket.

To use this action in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

This section describes the latest revision of this action. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.

To get a list of your buckets, see ListBuckets.

The following operations are related to ListObjectsV2:

" }, "ListParts":{ "name":"ListParts", @@ -725,7 +725,7 @@ "input":{"shape":"ListPartsRequest"}, "output":{"shape":"ListPartsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListParts.html", - "documentation":"

Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.

For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

The following operations are related to ListParts:

" + "documentation":"

Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.

For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

For information on permissions required to use the multipart upload API, see Multipart Upload and Permissions.

The following operations are related to ListParts:

" }, "PutBucketAccelerateConfiguration":{ "name":"PutBucketAccelerateConfiguration", @@ -734,7 +734,7 @@ "requestUri":"/{Bucket}?accelerate" }, "input":{"shape":"PutBucketAccelerateConfigurationRequest"}, - "documentation":"

Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.

To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The Transfer Acceleration state of a bucket can be set to one of the following two values:

The GetBucketAccelerateConfiguration operation returns the transfer acceleration state of a bucket.

After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.

The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").

For more information about transfer acceleration, see Transfer Acceleration.

The following operations are related to PutBucketAccelerateConfiguration:

" + "documentation":"

Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.

To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The Transfer Acceleration state of a bucket can be set to one of the following two values:

The GetBucketAccelerateConfiguration action returns the transfer acceleration state of a bucket.

After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.

The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").

For more information about transfer acceleration, see Transfer Acceleration.

The following operations are related to PutBucketAccelerateConfiguration:

" }, "PutBucketAcl":{ "name":"PutBucketAcl", @@ -764,7 +764,7 @@ }, "input":{"shape":"PutBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html", - "documentation":"

Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.

To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability.

To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the bucket. The cors subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.

When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the bucket and uses the first CORSRule rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:

For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

Related Resources

", + "documentation":"

Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.

To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability.

To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the bucket. The cors subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.

When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the bucket and uses the first CORSRule rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:

For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service User Guide.

Related Resources

", "httpChecksumRequired":true }, "PutBucketEncryption":{ @@ -774,7 +774,7 @@ "requestUri":"/{Bucket}?encryption" }, "input":{"shape":"PutBucketEncryptionRequest"}, - "documentation":"

This operation uses the encryption subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.

Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon Simple Storage Service Developer Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

", + "documentation":"

This action uses the encryption subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.

Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon Simple Storage Service User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service User Guide.

This action requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.

Related Resources

", "httpChecksumRequired":true }, "PutBucketIntelligentTieringConfiguration":{ @@ -793,7 +793,7 @@ "requestUri":"/{Bucket}?inventory" }, "input":{"shape":"PutBucketInventoryConfigurationRequest"}, - "documentation":"

This implementation of the PUT operation adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.

Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.

When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service Developer Guide.

You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Special Errors

Related Resources

" + "documentation":"

This implementation of the PUT action adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.

Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.

When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service User Guide.

You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.

Special Errors

Related Resources

" }, "PutBucketLifecycle":{ "name":"PutBucketLifecycle", @@ -803,7 +803,7 @@ }, "input":{"shape":"PutBucketLifecycleRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html", - "documentation":"

For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.

By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.

Related Resources

", + "documentation":"

For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service User Guide.

By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.

For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.

Related Resources

", "deprecated":true, "httpChecksumRequired":true }, @@ -856,7 +856,7 @@ "requestUri":"/{Bucket}?notification" }, "input":{"shape":"PutBucketNotificationConfigurationRequest"}, - "documentation":"

Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.

Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.

By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration.

<NotificationConfiguration>

</NotificationConfiguration>

This operation replaces the existing notification configuration with the configuration you include in the request body.

After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.

You can disable notifications by adding the empty NotificationConfiguration element.

By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification permission.

The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT operation will fail, and Amazon S3 will not add the configuration to your bucket.

Responses

If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the x-amz-sns-test-message-id header containing the message ID of the test notification sent to the topic.

The following operation is related to PutBucketNotificationConfiguration:

" + "documentation":"

Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.

Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.

By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration.

<NotificationConfiguration>

</NotificationConfiguration>

This action replaces the existing notification configuration with the configuration you include in the request body.

After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.

You can disable notifications by adding the empty NotificationConfiguration element.

By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification permission.

The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.

Responses

If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the x-amz-sns-test-message-id header containing the message ID of the test notification sent to the topic.

The following action is related to PutBucketNotificationConfiguration:

" }, "PutBucketOwnershipControls":{ "name":"PutBucketOwnershipControls", @@ -886,7 +886,7 @@ "requestUri":"/{Bucket}?replication" }, "input":{"shape":"PutBucketReplicationRequest"}, - "documentation":"

Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.

To perform this operation, the user or role performing the operation must have the iam:PassRole permission.

Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.

A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.

To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication, Status, and Priority.

If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.

For information about enabling versioning on a bucket, see Using Versioning.

By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.

Handling Replication of Encrypted Objects

By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.

For information on PutBucketReplication errors, see List of replication-related error codes

The following operations are related to PutBucketReplication:

", + "documentation":"

Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.

To perform this operation, the user or role performing the action must have the iam:PassRole permission.

Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.

A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.

To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication, Status, and Priority.

If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.

For information about enabling versioning on a bucket, see Using Versioning.

By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.

Handling Replication of Encrypted Objects

By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.

For information on PutBucketReplication errors, see List of replication-related error codes

The following operations are related to PutBucketReplication:

", "httpChecksumRequired":true }, "PutBucketRequestPayment":{ @@ -908,7 +908,7 @@ }, "input":{"shape":"PutBucketTaggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTtagging.html", - "documentation":"

Sets the tags for a bucket.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors:

The following operations are related to PutBucketTagging:

", + "documentation":"

Sets the tags for a bucket.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors:

The following operations are related to PutBucketTagging:

", "httpChecksumRequired":true }, "PutBucketVersioning":{ @@ -930,7 +930,7 @@ }, "input":{"shape":"PutBucketWebsiteRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html", - "documentation":"

Sets the configuration of the website that is specified in the website subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.

This PUT operation requires the S3:PutBucketWebsite permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite permission.

To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.

If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.

Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Sets the configuration of the website that is specified in the website subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.

This PUT action requires the S3:PutBucketWebsite permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite permission.

To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.

If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.

Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon Simple Storage Service User Guide.

", "httpChecksumRequired":true }, "PutObject":{ @@ -942,7 +942,7 @@ "input":{"shape":"PutObjectRequest"}, "output":{"shape":"PutObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", - "documentation":"

Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.

Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.

To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon Simple Storage Service Developer Guide.

Server-side Encryption

You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption.

If you request server-side encryption using AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

Access Control List (ACL)-Specific Request Headers

You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.

For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.

Related Resources

" + "documentation":"

Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.

Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.

To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon Simple Storage Service Developer Guide.

Server-side Encryption

You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption.

If you request server-side encryption using AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service User Guide.

Access Control List (ACL)-Specific Request Headers

You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.

For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.

Related Resources

" }, "PutObjectAcl":{ "name":"PutObjectAcl", @@ -956,7 +956,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html", - "documentation":"

Uses the acl subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have WRITE_ACP permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon Simple Storage Service Developer Guide.

This action is not supported by Amazon S3 on Outposts.

Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 Developer Guide.

Access Permissions

You can set access permissions using one of the following methods:

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Grantee Values

You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

Versioning

The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId subresource.

Related Resources

", + "documentation":"

Uses the acl subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have WRITE_ACP permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon Simple Storage Service User Guide.

This action is not supported by Amazon S3 on Outposts.

Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 Developer Guide.

Access Permissions

You can set access permissions using one of the following methods:

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Grantee Values

You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

Versioning

The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId subresource.

Related Resources

", "httpChecksumRequired":true }, "PutObjectLegalHold":{ @@ -1000,7 +1000,7 @@ }, "input":{"shape":"PutObjectTaggingRequest"}, "output":{"shape":"PutObjectTaggingOutput"}, - "documentation":"

Sets the supplied tag-set to an object that already exists in a bucket.

A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.

To use this operation, you must have permission to perform the s3:PutObjectTagging action. By default, the bucket owner has this permission and can grant this permission to others.

To put tags of any other version, use the versionId query parameter. You also need permission for the s3:PutObjectVersionTagging action.

For information about the Amazon S3 object tagging feature, see Object Tagging.

Special Errors

Related Resources

", + "documentation":"

Sets the supplied tag-set to an object that already exists in a bucket.

A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.

To use this operation, you must have permission to perform the s3:PutObjectTagging action. By default, the bucket owner has this permission and can grant this permission to others.

To put tags of any other version, use the versionId query parameter. You also need permission for the s3:PutObjectVersionTagging action.

For information about the Amazon S3 object tagging feature, see Object Tagging.

Special Errors

Related Resources

", "httpChecksumRequired":true }, "PutPublicAccessBlock":{ @@ -1025,7 +1025,7 @@ {"shape":"ObjectAlreadyInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", - "documentation":"

Restores an archived copy of an object back into Amazon S3

This action is not supported by Amazon S3 on Outposts.

This action performs the following types of requests:

To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Querying Archives with Select Requests

You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

When making a select request, do the following:

For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

When making a select request, you can also do the following:

The following are additional important facts about the select feature:

Restoring objects

Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.

To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service Developer Guide.

To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.

After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

Responses

A successful operation returns either the 200 OK or 202 Accepted status code.

Special Errors

Related Resources

", + "documentation":"

Restores an archived copy of an object back into Amazon S3

This action is not supported by Amazon S3 on Outposts.

This action performs the following types of requests:

To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.

Querying Archives with Select Requests

You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service User Guide.

When making a select request, do the following:

For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service User Guide.

When making a select request, you can also do the following:

The following are additional important facts about the select feature:

Restoring objects

Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.

To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon Simple Storage Service User Guide.

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service User Guide.

To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service User Guide.

After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service User Guide.

Responses

A successful action returns either the 200 OK or 202 Accepted status code.

Special Errors

Related Resources

", "alias":"PostObjectRestore" }, "SelectObjectContent":{ @@ -1040,7 +1040,7 @@ "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "output":{"shape":"SelectObjectContentOutput"}, - "documentation":"

This operation filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

This action is not supported by Amazon S3 on Outposts.

For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service Developer Guide.

For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

Permissions

You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service Developer Guide.

Object Data Formats

You can use Amazon S3 Select to query objects that have the following format properties:

Working with the Response Body

Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response .

GetObject Support

The SelectObjectContent operation does not support the following GetObject functionality. For more information, see GetObject.

Special Errors

For a list of special errors for this operation, see List of SELECT Object Content Error Codes

Related Resources

" + "documentation":"

This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

This action is not supported by Amazon S3 on Outposts.

For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service User Guide.

For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service User Guide.

Permissions

You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service User Guide.

Object Data Formats

You can use Amazon S3 Select to query objects that have the following format properties:

Working with the Response Body

Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response .

GetObject Support

The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject.

Special Errors

For a list of special errors for this operation, see List of SELECT Object Content Error Codes

Related Resources

" }, "UploadPart":{ "name":"UploadPart", @@ -1051,7 +1051,7 @@ "input":{"shape":"UploadPartRequest"}, "output":{"shape":"UploadPartOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html", - "documentation":"

Uploads a part in a multipart upload.

In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.

You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.

Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.

To ensure that data is not corrupted when traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.

If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (AWS Signature Version 4).

Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

For more information on multipart uploads, go to Multipart Upload Overview in the Amazon Simple Storage Service Developer Guide .

For information on the permissions required to use the multipart upload API, go to Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.

You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.

Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.

If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.

Special Errors

Related Resources

" + "documentation":"

Uploads a part in a multipart upload.

In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.

You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.

Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.

To ensure that data is not corrupted when traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.

If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (AWS Signature Version 4).

Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

For more information on multipart uploads, go to Multipart Upload Overview in the Amazon Simple Storage Service User Guide .

For information on the permissions required to use the multipart upload API, go to Multipart Upload and Permissions in the Amazon Simple Storage Service User Guide.

You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon Simple Storage Service User Guide.

Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.

If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.

Special Errors

Related Resources

" }, "UploadPartCopy":{ "name":"UploadPartCopy", @@ -1062,7 +1062,7 @@ "input":{"shape":"UploadPartCopyRequest"}, "output":{"shape":"UploadPartCopyOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html", - "documentation":"

Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source in your request and a byte range by adding the request header x-amz-copy-source-range in your request.

The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon Simple Storage Service Developer Guide.

Instead of using an existing object as part data, you might use the UploadPart operation and provide data in your request.

You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.

For more information about using the UploadPartCopy operation, see the following:

Note the following additional considerations about the request headers x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and x-amz-copy-source-if-modified-since:

Versioning

If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source.

You can optionally specify a specific version of the source object to copy by adding the versionId subresource as shown in the following example:

x-amz-copy-source: /bucket/object?versionId=version id

Special Errors

Related Resources

" + "documentation":"

Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source in your request and a byte range by adding the request header x-amz-copy-source-range in your request.

The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon Simple Storage Service User Guide.

Instead of using an existing object as part data, you might use the UploadPart action and provide data in your request.

You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.

For more information about using the UploadPartCopy operation, see the following:

Note the following additional considerations about the request headers x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and x-amz-copy-source-if-modified-since:

Versioning

If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source.

You can optionally specify a specific version of the source object to copy by adding the versionId subresource as shown in the following example:

x-amz-copy-source: /bucket/object?versionId=version id

Special Errors

Related Resources

" } }, "shapes":{ @@ -1097,7 +1097,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name to which the upload was taking place.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name to which the upload was taking place.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -1580,7 +1580,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket that contains the newly created object.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The name of the bucket that contains the newly created object.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

" }, "Key":{ "shape":"ObjectKey", @@ -1825,7 +1825,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the destination bucket.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the destination bucket.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -1993,7 +1993,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with a COPY operation doesn’t affect bucket-level settings for S3 Bucket Key.

", + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key.

", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2201,7 +2201,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket to which the multipart upload was initiated.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket to which the multipart upload was initiated.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "locationName":"Bucket" }, "Key":{ @@ -2270,7 +2270,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket to which to initiate the upload

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket to which to initiate the upload

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -2384,7 +2384,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.

", + "documentation":"

Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2396,7 +2396,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with an object operation doesn’t affect bucket-level settings for S3 Bucket Key.

", + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key.

", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2823,7 +2823,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name of the bucket containing the object.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name of the bucket containing the object.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -2884,7 +2884,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the objects from which to remove the tags.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the objects from which to remove the tags.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -2922,7 +2922,7 @@ }, "Errors":{ "shape":"Errors", - "documentation":"

Container for a failed delete operation that describes the object that Amazon S3 attempted to delete and the error it encountered.

", + "documentation":"

Container for a failed delete action that describes the object that Amazon S3 attempted to delete and the error it encountered.

", "locationName":"Error" } } @@ -2936,7 +2936,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the objects to delete.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the objects to delete.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -3113,7 +3113,7 @@ }, "Code":{ "shape":"Code", - "documentation":"

The error code is a string that uniquely identifies an error condition. It is meant to be read and understood by programs that detect and handle errors by type.

Amazon S3 error codes

" + "documentation":"

The error code is a string that uniquely identifies an error condition. It is meant to be read and understood by programs that detect and handle errors by type.

Amazon S3 error codes

" }, "Message":{ "shape":"Message", @@ -3175,7 +3175,7 @@ "documentation":"

" } }, - "documentation":"

Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects in the Amazon S3 Developer Guide.

" + "documentation":"

Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects in the Amazon S3 Developer Guide.

" }, "ExistingObjectReplicationStatus":{ "type":"string", @@ -3885,7 +3885,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name that contains the object for which to get the ACL information.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name that contains the object for which to get the ACL information.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -3933,7 +3933,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object whose Legal Hold status you want to retrieve.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object whose Legal Hold status you want to retrieve.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -3978,7 +3978,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket whose Object Lock configuration you want to retrieve.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket whose Object Lock configuration you want to retrieve.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -4018,7 +4018,7 @@ }, "Restore":{ "shape":"Restore", - "documentation":"

Provides information about object restoration operation and expiration time of the restored object copy.

", + "documentation":"

Provides information about object restoration action and expiration time of the restored object copy.

", "location":"header", "locationName":"x-amz-restore" }, @@ -4195,7 +4195,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -4333,7 +4333,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object whose retention settings you want to retrieve.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object whose retention settings you want to retrieve.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -4387,7 +4387,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object for which to get the tagging information.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object for which to get the tagging information.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -4568,7 +4568,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -4773,7 +4773,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket containing the object.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket containing the object.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -5590,7 +5590,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket to which the multipart upload was initiated.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket to which the multipart upload was initiated.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -5725,7 +5725,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.

", + "documentation":"

Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.

", "location":"querystring", "locationName":"max-keys" }, @@ -5800,7 +5800,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket containing the objects.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket containing the objects.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -5823,7 +5823,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

", + "documentation":"

Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

", "location":"querystring", "locationName":"max-keys" }, @@ -5860,7 +5860,7 @@ }, "Name":{ "shape":"BucketName", - "documentation":"

The bucket name.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The bucket name.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

" }, "Prefix":{ "shape":"Prefix", @@ -5872,7 +5872,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

" + "documentation":"

Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

" }, "CommonPrefixes":{ "shape":"CommonPrefixList", @@ -5906,7 +5906,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

Bucket name to list.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Bucket name to list.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -5924,7 +5924,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

", + "documentation":"

Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

", "location":"querystring", "locationName":"max-keys" }, @@ -6043,7 +6043,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket to which the parts are being uploaded.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket to which the parts are being uploaded.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -6414,7 +6414,7 @@ "type":"structure", "members":{ }, - "documentation":"

This operation is not allowed against this storage tier.

", + "documentation":"

This action is not allowed against this storage tier.

", "exception":true }, "ObjectCannedACL":{ @@ -6541,7 +6541,7 @@ "type":"structure", "members":{ }, - "documentation":"

The source object of the COPY operation is not in the active tier and is only stored in Amazon S3 Glacier.

", + "documentation":"

The source object of the COPY action is not in the active tier and is only stored in Amazon S3 Glacier.

", "exception":true }, "ObjectOwnership":{ @@ -6808,7 +6808,7 @@ "locationName":"RestrictPublicBuckets" } }, - "documentation":"

The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service User Guide.

" }, "PutBucketAccelerateConfigurationRequest":{ "type":"structure", @@ -6955,7 +6955,7 @@ }, "CORSConfiguration":{ "shape":"CORSConfiguration", - "documentation":"

Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service User Guide.

", "locationName":"CORSConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, @@ -7541,7 +7541,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name that contains the object to which you want to attach the ACL.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name that contains the object to which you want to attach the ACL.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -7583,7 +7583,7 @@ }, "Key":{ "shape":"ObjectKey", - "documentation":"

Key for which the PUT operation was initiated.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Key for which the PUT action was initiated.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Key" }, @@ -7626,7 +7626,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object that you want to place a Legal Hold on.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object that you want to place a Legal Hold on.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -7804,7 +7804,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name to which the PUT operation was initiated.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name to which the PUT action was initiated.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -7882,7 +7882,7 @@ }, "Key":{ "shape":"ObjectKey", - "documentation":"

Object key for which the PUT operation was initiated.

", + "documentation":"

Object key for which the PUT action was initiated.

", "location":"uri", "locationName":"Key" }, @@ -7942,7 +7942,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with a PUT operation doesn’t affect bucket-level settings for S3 Bucket Key.

", + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.

", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -8003,7 +8003,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name that contains the object you want to apply this Object Retention configuration to.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name that contains the object you want to apply this Object Retention configuration to.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -8032,7 +8032,7 @@ }, "BypassGovernanceRetention":{ "shape":"BypassGovernanceRetention", - "documentation":"

Indicates whether this operation should bypass Governance-mode restrictions.

", + "documentation":"

Indicates whether this action should bypass Governance-mode restrictions.

", "location":"header", "locationName":"x-amz-bypass-governance-retention" }, @@ -8072,7 +8072,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -8498,13 +8498,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object to restore.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object to restore.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"

Object key for which the operation was initiated.

", + "documentation":"

Object key for which the action was initiated.

", "location":"uri", "locationName":"Key" }, @@ -8586,7 +8586,7 @@ "documentation":"

Container for redirect information. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can specify a different error code to return.

" } }, - "documentation":"

Specifies the redirect behavior and when a redirect is applied. For more information about routing rules, see Configuring advanced conditional redirects in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Specifies the redirect behavior and when a redirect is applied. For more information about routing rules, see Configuring advanced conditional redirects in the Amazon Simple Storage Service User Guide.

" }, "RoutingRules":{ "type":"list", @@ -8620,7 +8620,7 @@ }, "Transition":{ "shape":"Transition", - "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service User Guide.

" }, "NoncurrentVersionTransition":{"shape":"NoncurrentVersionTransition"}, "NoncurrentVersionExpiration":{"shape":"NoncurrentVersionExpiration"}, @@ -8911,7 +8911,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.

For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.

For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service User Guide.

" } }, "documentation":"

Specifies the default server-side encryption configuration.

" @@ -9149,7 +9149,7 @@ }, "Events":{ "shape":"EventList", - "documentation":"

The Amazon S3 bucket event about which to send notifications. For more information, see Supported Event Types in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The Amazon S3 bucket event about which to send notifications. For more information, see Supported Event Types in the Amazon Simple Storage Service User Guide.

", "locationName":"Event" }, "Filter":{"shape":"NotificationConfigurationFilter"} @@ -9198,7 +9198,7 @@ "documentation":"

The storage class to which you want the object to transition.

" } }, - "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service User Guide.

" }, "TransitionList":{ "type":"list", @@ -9288,7 +9288,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -9463,7 +9463,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket to which the multipart upload was initiated.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket to which the multipart upload was initiated.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml index 0a188b256313..7ef610e16b88 100644 --- a/services/s3control/pom.xml +++ b/services/s3control/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 s3control AWS Java SDK :: Services :: Amazon S3 Control diff --git a/services/s3control/src/main/resources/codegen-resources/service-2.json b/services/s3control/src/main/resources/codegen-resources/service-2.json index 4b3bf0f9a301..7ec80ba17905 100644 --- a/services/s3control/src/main/resources/codegen-resources/service-2.json +++ b/services/s3control/src/main/resources/codegen-resources/service-2.json @@ -23,7 +23,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"CreateAccessPointResult"}, - "documentation":"

Creates an access point and associates it with the specified bucket. For more information, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

Using this action with Amazon S3 on Outposts

This action:

For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide .

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to CreateAccessPoint:

", + "documentation":"

Creates an access point and associates it with the specified bucket. For more information, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service User Guide.

Using this action with Amazon S3 on Outposts

This action:

For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide .

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to CreateAccessPoint:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -40,7 +40,7 @@ {"shape":"BucketAlreadyExists"}, {"shape":"BucketAlreadyOwnedByYou"} ], - "documentation":"

This API operation creates an Amazon S3 on Outposts bucket. To create an S3 bucket, see Create Bucket in the Amazon Simple Storage Service API.

Creates a new Outposts bucket. By creating the bucket, you become the bucket owner. To create an Outposts bucket, you must have S3 on Outposts. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.

S3 on Outposts buckets do not support

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your API request, see the Examples section.

The following actions are related to CreateBucket for Amazon S3 on Outposts:

", + "documentation":"

This action creates an Amazon S3 on Outposts bucket. To create an S3 bucket, see Create Bucket in the Amazon Simple Storage Service API.

Creates a new Outposts bucket. By creating the bucket, you become the bucket owner. To create an Outposts bucket, you must have S3 on Outposts. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.

Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.

S3 on Outposts buckets support:

For a list of Amazon S3 features not supported by Amazon S3 on Outposts, see Unsupported Amazon S3 features.

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your API request, see the Examples section.

The following actions are related to CreateBucket for Amazon S3 on Outposts:

", "httpChecksumRequired":true }, "CreateJob":{ @@ -61,7 +61,7 @@ {"shape":"IdempotencyException"}, {"shape":"InternalServiceException"} ], - "documentation":"

You can use S3 Batch Operations to perform large-scale batch operations on Amazon S3 objects. Batch Operations can run a single operation on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

This operation creates a S3 Batch Operations job.

Related actions include:

", + "documentation":"

You can use S3 Batch Operations to perform large-scale batch actions on Amazon S3 objects. Batch Operations can run a single action on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.

This action creates a S3 Batch Operations job.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -97,7 +97,7 @@ "requestUri":"/v20180820/bucket/{name}" }, "input":{"shape":"DeleteBucketRequest"}, - "documentation":"

This API operation deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see DeleteBucket in the Amazon Simple Storage Service API.

Deletes the Amazon S3 on Outposts bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

Related Resources

", + "documentation":"

This action deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see DeleteBucket in the Amazon Simple Storage Service API.

Deletes the Amazon S3 on Outposts bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

Related Resources

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -109,7 +109,7 @@ "requestUri":"/v20180820/bucket/{name}/lifecycleconfiguration" }, "input":{"shape":"DeleteBucketLifecycleConfigurationRequest"}, - "documentation":"

This API action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon Simple Storage Service API.

Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the s3-outposts:DeleteLifecycleConfiguration action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

For more information about object expiration, see Elements to Describe Lifecycle Actions.

Related actions include:

", + "documentation":"

This action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon Simple Storage Service API.

Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3-outposts:DeleteLifecycleConfiguration action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

For more information about object expiration, see Elements to Describe Lifecycle Actions.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -121,7 +121,7 @@ "requestUri":"/v20180820/bucket/{name}/policy" }, "input":{"shape":"DeleteBucketPolicyRequest"}, - "documentation":"

This API operation deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon Simple Storage Service API.

This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account to use this operation. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketPolicy:

", + "documentation":"

This action deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon Simple Storage Service API.

This implementation of the DELETE action uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account to use this action. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -134,7 +134,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketTaggingRequest"}, - "documentation":"

This operation deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon Simple Storage Service API.

Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketTagging:

", + "documentation":"

This action deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon Simple Storage Service API.

Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketTagging:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -152,7 +152,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"NotFoundException"} ], - "documentation":"

Removes the entire tag set from the specified S3 Batch Operations job. To use this operation, you must have permission to perform the s3:DeleteJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.

Related actions include:

", + "documentation":"

Removes the entire tag set from the specified S3 Batch Operations job. To use this operation, you must have permission to perform the s3:DeleteJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -176,7 +176,7 @@ "requestUri":"/v20180820/storagelens/{storagelensid}" }, "input":{"shape":"DeleteStorageLensConfigurationRequest"}, - "documentation":"

Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -189,7 +189,7 @@ }, "input":{"shape":"DeleteStorageLensConfigurationTaggingRequest"}, "output":{"shape":"DeleteStorageLensConfigurationTaggingResult"}, - "documentation":"

Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -208,7 +208,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves the configuration parameters and status for a Batch Operations job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

", + "documentation":"

Retrieves the configuration parameters and status for a Batch Operations job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -260,7 +260,7 @@ }, "input":{"shape":"GetBucketRequest"}, "output":{"shape":"GetBucketResult"}, - "documentation":"

Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:GetBucket permissions on the specified bucket and belong to the bucket owner's account in order to use this operation. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.

If you don't have s3-outposts:GetBucket permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

The following actions are related to GetBucket for Amazon S3 on Outposts:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

", + "documentation":"

Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.

If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:GetBucket permissions on the specified bucket and belong to the bucket owner's account in order to use this action. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.

If you don't have s3-outposts:GetBucket permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

The following actions are related to GetBucket for Amazon S3 on Outposts:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -273,7 +273,7 @@ }, "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, "output":{"shape":"GetBucketLifecycleConfigurationResult"}, - "documentation":"

This operation gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon Simple Storage Service API.

Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the s3-outposts:GetLifecycleConfiguration action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

GetBucketLifecycleConfiguration has the following special error:

The following actions are related to GetBucketLifecycleConfiguration:

", + "documentation":"

This action gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon Simple Storage Service API.

Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3-outposts:GetLifecycleConfiguration action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

GetBucketLifecycleConfiguration has the following special error:

The following actions are related to GetBucketLifecycleConfiguration:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -286,7 +286,7 @@ }, "input":{"shape":"GetBucketPolicyRequest"}, "output":{"shape":"GetBucketPolicyResult"}, - "documentation":"

This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon Simple Storage Service API.

Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetBucketPolicy:

", + "documentation":"

This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon Simple Storage Service API.

Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.

If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this action.

Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -299,7 +299,7 @@ }, "input":{"shape":"GetBucketTaggingRequest"}, "output":{"shape":"GetBucketTaggingResult"}, - "documentation":"

This operation gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see GetBucketTagging in the Amazon Simple Storage Service API.

Returns the tag set associated with the Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the GetBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

GetBucketTagging has the following special error:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetBucketTagging:

", + "documentation":"

This action gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see GetBucketTagging in the Amazon Simple Storage Service API.

Returns the tag set associated with the Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the GetBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

GetBucketTagging has the following special error:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetBucketTagging:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -317,7 +317,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"NotFoundException"} ], - "documentation":"

Returns the tags on an S3 Batch Operations job. To use this operation, you must have permission to perform the s3:GetJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.

Related actions include:

", + "documentation":"

Returns the tags on an S3 Batch Operations job. To use this operation, you must have permission to perform the s3:GetJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -346,7 +346,7 @@ }, "input":{"shape":"GetStorageLensConfigurationRequest"}, "output":{"shape":"GetStorageLensConfigurationResult"}, - "documentation":"

Gets the Amazon S3 Storage Lens configuration. For more information, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Gets the Amazon S3 Storage Lens configuration. For more information, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -359,7 +359,7 @@ }, "input":{"shape":"GetStorageLensConfigurationTaggingRequest"}, "output":{"shape":"GetStorageLensConfigurationTaggingResult"}, - "documentation":"

Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -390,7 +390,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Lists current S3 Batch Operations jobs and jobs that have ended within the last 30 days for the AWS account making the request. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

", + "documentation":"

Lists current S3 Batch Operations jobs and jobs that have ended within the last 30 days for the AWS account making the request. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -403,7 +403,7 @@ }, "input":{"shape":"ListRegionalBucketsRequest"}, "output":{"shape":"ListRegionalBucketsResult"}, - "documentation":"

Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your request, see the Examples section.

", + "documentation":"

Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your request, see the Examples section.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -416,7 +416,7 @@ }, "input":{"shape":"ListStorageLensConfigurationsRequest"}, "output":{"shape":"ListStorageLensConfigurationsResult"}, - "documentation":"

Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:ListStorageLensConfigurations action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:ListStorageLensConfigurations action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -461,7 +461,7 @@ "locationName":"PutBucketPolicyRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

This action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon Simple Storage Service API.

Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this operation.

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketPolicy:

", + "documentation":"

This action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon Simple Storage Service API.

Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.

If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this action.

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -474,7 +474,7 @@ "requestUri":"/v20180820/bucket/{name}/tagging" }, "input":{"shape":"PutBucketTaggingRequest"}, - "documentation":"

This action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon Simple Storage Service API.

Sets the tags for an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

To use this operation, you must have permissions to perform the s3-outposts:PutBucketTagging action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketTagging:

", + "documentation":"

This action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon Simple Storage Service API.

Sets the tags for an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

To use this action, you must have permissions to perform the s3-outposts:PutBucketTagging action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketTagging:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -498,7 +498,7 @@ {"shape":"NotFoundException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Sets the supplied tag-set on an S3 Batch Operations job.

A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the s3:PutJobTagging action.

Related actions include:

", + "documentation":"

Sets the supplied tag-set on an S3 Batch Operations job.

A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:PutJobTagging action.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -526,7 +526,7 @@ "locationName":"PutStorageLensConfigurationRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -543,7 +543,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"PutStorageLensConfigurationTaggingResult"}, - "documentation":"

Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -562,7 +562,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates an existing S3 Batch Operations job's priority. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

", + "documentation":"

Updates an existing S3 Batch Operations job's priority. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -582,7 +582,7 @@ {"shape":"JobStatusException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates the status for the specified job. Use this operation to confirm that you want to run a job or to cancel an existing job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

", + "documentation":"

Updates the status for the specified job. Use this action to confirm that you want to run a job or to cancel an existing job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -920,7 +920,7 @@ }, "Operation":{ "shape":"JobOperation", - "documentation":"

The operation that you want this job to perform on every object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The action that you want this job to perform on every object listed in the manifest. For more information about the available actions, see Operations in the Amazon Simple Storage Service User Guide.

" }, "Report":{ "shape":"JobReport", @@ -946,7 +946,7 @@ }, "RoleArn":{ "shape":"IAMRoleArn", - "documentation":"

The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that Batch Operations will use to run this job's operation on every object in the manifest.

" + "documentation":"

The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that Batch Operations will use to run this job's action on every object in the manifest.

" }, "Tags":{ "shape":"S3TagSet", @@ -1977,7 +1977,7 @@ "box":true } }, - "documentation":"

The operation that you want this job to perform on every object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The operation that you want this job to perform on every object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service User Guide.

" }, "JobPriority":{ "type":"integer", @@ -2516,7 +2516,7 @@ "locationName":"IsPublic" } }, - "documentation":"

Indicates whether this access point policy is public. For more information about how Amazon S3 evaluates policies to determine whether they are public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Indicates whether this access point policy is public. For more information about how Amazon S3 evaluates policies to determine whether they are public, see The Meaning of \"Public\" in the Amazon Simple Storage Service User Guide.

" }, "Prefix":{"type":"string"}, "PrefixLevel":{ @@ -2591,7 +2591,7 @@ }, "Policy":{ "shape":"Policy", - "documentation":"

The policy that you want to apply to the specified access point. For more information about access point policies, see Managing data access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The policy that you want to apply to the specified access point. For more information about access point policies, see Managing data access with Amazon S3 Access Points in the Amazon Simple Storage Service User Guide.

" } } }, @@ -3253,7 +3253,7 @@ "documentation":"

The Object Lock retention mode to be applied to all objects in the Batch Operations job.

" } }, - "documentation":"

Contains the S3 Object Lock retention mode to be applied to all objects in the S3 Batch Operations job. If you don't provide Mode and RetainUntilDate data types in your operation, you will remove the retention from your objects. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Contains the S3 Object Lock retention mode to be applied to all objects in the S3 Batch Operations job. If you don't provide Mode and RetainUntilDate data types in your operation, you will remove the retention from your objects. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service User Guide.

" }, "S3SSEAlgorithm":{ "type":"string", @@ -3281,7 +3281,7 @@ "documentation":"

Contains the Object Lock legal hold status to be applied to all objects in the Batch Operations job.

" } }, - "documentation":"

Contains the configuration for an S3 Object Lock legal hold operation that an S3 Batch Operations job passes every object to the underlying PutObjectLegalHold API. For more information, see Using S3 Object Lock legal hold with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Contains the configuration for an S3 Object Lock legal hold operation that an S3 Batch Operations job passes every object to the underlying PutObjectLegalHold API. For more information, see Using S3 Object Lock legal hold with S3 Batch Operations in the Amazon Simple Storage Service User Guide.

" }, "S3SetObjectRetentionOperation":{ "type":"structure", @@ -3294,10 +3294,10 @@ }, "Retention":{ "shape":"S3Retention", - "documentation":"

Contains the Object Lock retention mode to be applied to all objects in the Batch Operations job. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Contains the Object Lock retention mode to be applied to all objects in the Batch Operations job. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service User Guide.

" } }, - "documentation":"

Contains the configuration parameters for the Object Lock retention action for an S3 Batch Operations job. Batch Operations passes every object to the underlying PutObjectRetention API. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Contains the configuration parameters for the Object Lock retention action for an S3 Batch Operations job. Batch Operations passes every object to the underlying PutObjectRetention API. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service User Guide.

" }, "S3SetObjectTaggingOperation":{ "type":"structure", @@ -3532,12 +3532,12 @@ "type":"string", "max":1024, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:=+\\-@%]*)$" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*)$" }, "TagValueString":{ "type":"string", "max":1024, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:=+\\-@%]*)$" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*)$" }, "Tagging":{ "type":"structure", @@ -3583,7 +3583,7 @@ "documentation":"

The storage class to which you want the object to transition.

" } }, - "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 Lifecycle configuration rules, see Transitioning objects using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 Lifecycle configuration rules, see Transitioning objects using Amazon S3 Lifecycle in the Amazon Simple Storage Service User Guide.

" }, "TransitionList":{ "type":"list", @@ -3717,5 +3717,5 @@ "min":1 } }, - "documentation":"

AWS S3 Control provides access to Amazon S3 control plane operations.

" + "documentation":"

AWS S3 Control provides access to Amazon S3 control plane actions.

" } diff --git a/services/s3outposts/pom.xml b/services/s3outposts/pom.xml index 7559acd97ab6..29c3d0ce064c 100644 --- a/services/s3outposts/pom.xml +++ b/services/s3outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 s3outposts AWS Java SDK :: Services :: S3 Outposts diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index 64f60de6fb52..713d1d27d00d 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 sagemaker diff --git a/services/sagemakera2iruntime/pom.xml b/services/sagemakera2iruntime/pom.xml index 2e098bf0ff74..a98078ffee77 100644 --- a/services/sagemakera2iruntime/pom.xml +++ b/services/sagemakera2iruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 sagemakera2iruntime AWS Java SDK :: Services :: SageMaker A2I Runtime diff --git a/services/sagemakeredge/pom.xml b/services/sagemakeredge/pom.xml index 193943999df0..4991b13af091 100644 --- a/services/sagemakeredge/pom.xml +++ b/services/sagemakeredge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 sagemakeredge AWS Java SDK :: Services :: Sagemaker Edge diff --git a/services/sagemakerfeaturestoreruntime/pom.xml b/services/sagemakerfeaturestoreruntime/pom.xml index f696c041598e..2d8dd6200d38 100644 --- a/services/sagemakerfeaturestoreruntime/pom.xml +++ b/services/sagemakerfeaturestoreruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 sagemakerfeaturestoreruntime AWS Java SDK :: Services :: Sage Maker Feature Store Runtime diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index fd38f5c6320e..ef7f3a218ddd 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 sagemakerruntime AWS Java SDK :: Services :: SageMaker Runtime diff --git a/services/savingsplans/pom.xml b/services/savingsplans/pom.xml index 0131bd457252..68ce8ba57934 100644 --- a/services/savingsplans/pom.xml +++ b/services/savingsplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 savingsplans AWS Java SDK :: Services :: Savingsplans diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml index 514c36500f9b..a2560ca3a5f8 100644 --- a/services/schemas/pom.xml +++ b/services/schemas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 schemas AWS Java SDK :: Services :: Schemas diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index 5423f6fa60e4..b5fbdcdc4dbb 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 secretsmanager AWS Java SDK :: Services :: AWS Secrets Manager diff --git a/services/securityhub/pom.xml b/services/securityhub/pom.xml index 6f9af97cc284..ec6f9050eafb 100644 --- a/services/securityhub/pom.xml +++ b/services/securityhub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 securityhub AWS Java SDK :: Services :: SecurityHub diff --git a/services/serverlessapplicationrepository/pom.xml b/services/serverlessapplicationrepository/pom.xml index 0899c85404fe..27615dd4418f 100644 --- a/services/serverlessapplicationrepository/pom.xml +++ b/services/serverlessapplicationrepository/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 serverlessapplicationrepository diff --git a/services/servicecatalog/pom.xml b/services/servicecatalog/pom.xml index c8b8c8c6021b..c52b06a25322 100644 --- a/services/servicecatalog/pom.xml +++ b/services/servicecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 servicecatalog AWS Java SDK :: Services :: AWS Service Catalog diff --git a/services/servicecatalogappregistry/pom.xml b/services/servicecatalogappregistry/pom.xml index 3ae2885b75ff..1ff4dd3cd3f3 100644 --- a/services/servicecatalogappregistry/pom.xml +++ b/services/servicecatalogappregistry/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 servicecatalogappregistry AWS Java SDK :: Services :: Service Catalog App Registry diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index 0417051c98a8..ed89010b39e2 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 servicediscovery diff --git a/services/servicequotas/pom.xml b/services/servicequotas/pom.xml index f84ff5474cd6..775fa964d231 100644 --- a/services/servicequotas/pom.xml +++ b/services/servicequotas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 servicequotas AWS Java SDK :: Services :: Service Quotas diff --git a/services/ses/pom.xml b/services/ses/pom.xml index e173c0989e90..814ef445e92d 100644 --- a/services/ses/pom.xml +++ b/services/ses/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ses AWS Java SDK :: Services :: Amazon SES diff --git a/services/sesv2/pom.xml b/services/sesv2/pom.xml index 0bad6efc6576..5e639e7c3f06 100644 --- a/services/sesv2/pom.xml +++ b/services/sesv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 sesv2 AWS Java SDK :: Services :: SESv2 diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index 1c7425a60cef..e4060f9cdf35 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 sfn AWS Java SDK :: Services :: AWS Step Functions diff --git a/services/shield/pom.xml b/services/shield/pom.xml index 4034f292d5e9..26fba6d57e38 100644 --- a/services/shield/pom.xml +++ b/services/shield/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 shield AWS Java SDK :: Services :: AWS Shield diff --git a/services/signer/pom.xml b/services/signer/pom.xml index 61b90a54f60f..7c8340526e49 100644 --- a/services/signer/pom.xml +++ b/services/signer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 signer AWS Java SDK :: Services :: Signer diff --git a/services/sms/pom.xml b/services/sms/pom.xml index 63883845398e..d502de590e4f 100644 --- a/services/sms/pom.xml +++ b/services/sms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 sms AWS Java SDK :: Services :: AWS Server Migration diff --git a/services/snowball/pom.xml b/services/snowball/pom.xml index ad48184e068e..5465f64dbd28 100644 --- a/services/snowball/pom.xml +++ b/services/snowball/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 snowball AWS Java SDK :: Services :: Amazon Snowball diff --git a/services/sns/pom.xml b/services/sns/pom.xml index 9f423620f778..6b87427aa066 100644 --- a/services/sns/pom.xml +++ b/services/sns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 sns AWS Java SDK :: Services :: Amazon SNS diff --git a/services/sqs/pom.xml b/services/sqs/pom.xml index ddec840c6779..c20adc5c70f6 100644 --- a/services/sqs/pom.xml +++ b/services/sqs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 sqs AWS Java SDK :: Services :: Amazon SQS diff --git a/services/ssm/pom.xml b/services/ssm/pom.xml index 37399fa62f65..f39259aabd51 100644 --- a/services/ssm/pom.xml +++ b/services/ssm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ssm AWS Java SDK :: Services :: AWS Simple Systems Management (SSM) diff --git a/services/sso/pom.xml b/services/sso/pom.xml index db322278bdd7..be22d1668848 100644 --- a/services/sso/pom.xml +++ b/services/sso/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 sso AWS Java SDK :: Services :: SSO diff --git a/services/ssoadmin/pom.xml b/services/ssoadmin/pom.xml index 52a01db2bdeb..ee102344f31b 100644 --- a/services/ssoadmin/pom.xml +++ b/services/ssoadmin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ssoadmin AWS Java SDK :: Services :: SSO Admin diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml index 58db3335ff73..854e5c6e2209 100644 --- a/services/ssooidc/pom.xml +++ b/services/ssooidc/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 ssooidc AWS Java SDK :: Services :: SSO OIDC diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index 1f7fe64147d1..f664188d6cbe 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 storagegateway AWS Java SDK :: Services :: AWS Storage Gateway diff --git a/services/sts/pom.xml b/services/sts/pom.xml index 4af822a79756..1db55b37c1a2 100644 --- a/services/sts/pom.xml +++ b/services/sts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 sts AWS Java SDK :: Services :: AWS STS diff --git a/services/support/pom.xml b/services/support/pom.xml index e3a434da448e..230cb76e08f5 100644 --- a/services/support/pom.xml +++ b/services/support/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 support AWS Java SDK :: Services :: AWS Support diff --git a/services/swf/pom.xml b/services/swf/pom.xml index 3dccc443e84d..46b0799fde59 100644 --- a/services/swf/pom.xml +++ b/services/swf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 swf AWS Java SDK :: Services :: Amazon SWF diff --git a/services/synthetics/pom.xml b/services/synthetics/pom.xml index fd9410d81e1a..41ee2201bb15 100644 --- a/services/synthetics/pom.xml +++ b/services/synthetics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 synthetics AWS Java SDK :: Services :: Synthetics diff --git a/services/textract/pom.xml b/services/textract/pom.xml index 0a8d21d1920b..ca7c21d94412 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 textract AWS Java SDK :: Services :: Textract diff --git a/services/timestreamquery/pom.xml b/services/timestreamquery/pom.xml index f9342ef8e977..7aff9a691898 100644 --- a/services/timestreamquery/pom.xml +++ b/services/timestreamquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 timestreamquery AWS Java SDK :: Services :: Timestream Query diff --git a/services/timestreamwrite/pom.xml b/services/timestreamwrite/pom.xml index 2979fb76af76..a41217881a5d 100644 --- a/services/timestreamwrite/pom.xml +++ b/services/timestreamwrite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 timestreamwrite AWS Java SDK :: Services :: Timestream Write diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index 11cf1e1bfc49..28cd54411dec 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 transcribe AWS Java SDK :: Services :: Transcribe diff --git a/services/transcribestreaming/pom.xml b/services/transcribestreaming/pom.xml index 9f465f71c363..92ce33529e9c 100644 --- a/services/transcribestreaming/pom.xml +++ b/services/transcribestreaming/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 transcribestreaming AWS Java SDK :: Services :: AWS Transcribe Streaming diff --git a/services/transfer/pom.xml b/services/transfer/pom.xml index 611bfbc9ecc0..ff5847ed7c3a 100644 --- a/services/transfer/pom.xml +++ b/services/transfer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 transfer AWS Java SDK :: Services :: Transfer diff --git a/services/translate/pom.xml b/services/translate/pom.xml index bb2bd673b58e..5dcf7fe1a724 100644 --- a/services/translate/pom.xml +++ b/services/translate/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 translate diff --git a/services/waf/pom.xml b/services/waf/pom.xml index 22668f90a105..43d687330067 100644 --- a/services/waf/pom.xml +++ b/services/waf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 waf AWS Java SDK :: Services :: AWS WAF diff --git a/services/wafv2/pom.xml b/services/wafv2/pom.xml index f350905961f2..878b9d94e942 100644 --- a/services/wafv2/pom.xml +++ b/services/wafv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 wafv2 AWS Java SDK :: Services :: WAFV2 diff --git a/services/wellarchitected/pom.xml b/services/wellarchitected/pom.xml index 78104ea06909..f17e85c60e64 100644 --- a/services/wellarchitected/pom.xml +++ b/services/wellarchitected/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 wellarchitected AWS Java SDK :: Services :: Well Architected diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index c38187e69d46..8b43b98e22cc 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 workdocs AWS Java SDK :: Services :: Amazon WorkDocs diff --git a/services/worklink/pom.xml b/services/worklink/pom.xml index 8a5c3114b1e0..a9c86ab410f1 100644 --- a/services/worklink/pom.xml +++ b/services/worklink/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 worklink AWS Java SDK :: Services :: WorkLink diff --git a/services/workmail/pom.xml b/services/workmail/pom.xml index be55f38e8004..2f77080ae800 100644 --- a/services/workmail/pom.xml +++ b/services/workmail/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0 workmail diff --git a/services/workmailmessageflow/pom.xml b/services/workmailmessageflow/pom.xml index b053c8607408..5cfbfffe3f5a 100644 --- a/services/workmailmessageflow/pom.xml +++ b/services/workmailmessageflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 workmailmessageflow AWS Java SDK :: Services :: WorkMailMessageFlow diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index 4356938f33f9..8d24e1356bdc 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 workspaces AWS Java SDK :: Services :: Amazon WorkSpaces diff --git a/services/xray/pom.xml b/services/xray/pom.xml index 1ed071c93c25..bcda9412bf36 100644 --- a/services/xray/pom.xml +++ b/services/xray/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.13 + 2.16.14 xray AWS Java SDK :: Services :: AWS X-Ray diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index faedc9f99a25..0dda27e7ece8 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 ../../pom.xml diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index 3d62aaccc6a0..814e242c944b 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 ../../pom.xml http-client-tests diff --git a/test/module-path-tests/pom.xml b/test/module-path-tests/pom.xml index 244d759cd2fc..5805378b608b 100644 --- a/test/module-path-tests/pom.xml +++ b/test/module-path-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 ../../pom.xml 4.0.0 diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index ca6cba5dbdb9..5288a791cf1d 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index 6ee36eee7424..e79b7224fc63 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 ../../pom.xml 4.0.0 diff --git a/test/sdk-benchmarks/pom.xml b/test/sdk-benchmarks/pom.xml index fe7377c3dda5..2d3bb52891db 100755 --- a/test/sdk-benchmarks/pom.xml +++ b/test/sdk-benchmarks/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 ../../pom.xml diff --git a/test/sdk-native-image-test/pom.xml b/test/sdk-native-image-test/pom.xml index 61fd7be45687..b1290b9be887 100644 --- a/test/sdk-native-image-test/pom.xml +++ b/test/sdk-native-image-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 ../../pom.xml 4.0.0 diff --git a/test/service-test-utils/pom.xml b/test/service-test-utils/pom.xml index d6c0b2202357..51b9f2af5cf8 100644 --- a/test/service-test-utils/pom.xml +++ b/test/service-test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 ../../pom.xml service-test-utils diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index 04269c1e119f..f0e91aa001e1 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 ../../pom.xml 4.0.0 diff --git a/test/test-utils/pom.xml b/test/test-utils/pom.xml index 726a4c6c220d..3c431c3323f0 100644 --- a/test/test-utils/pom.xml +++ b/test/test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.13 + 2.16.14 ../../pom.xml test-utils diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 3330ea672032..5272d4052d21 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 ../../pom.xml 4.0.0 diff --git a/utils/pom.xml b/utils/pom.xml index 627cbe1e2e09..da0ad269710b 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.13 + 2.16.14 4.0.0