diff --git a/.brazil.json b/.brazil.json new file mode 100644 index 000000000000..44b8f2b36d80 --- /dev/null +++ b/.brazil.json @@ -0,0 +1,117 @@ +{ + "packageVersion": "2.0", + + "modules": { + "annotations": { "packageName": "AwsJavaSdk-Core-Annotations" }, + "apache-client": { "packageName": "AwsJavaSdk-HttpClient-ApacheClient" }, + "arns": { "packageName": "AwsJavaSdk-Core-Arns" }, + "auth": { "packageName": "AwsJavaSdk-Core-Auth" }, + "auth-crt": { "packageName": "AwsJavaSdk-Core-AuthCrt" }, + "aws-cbor-protocol": { "packageName": "AwsJavaSdk-Core-AwsCborProtocol" }, + "aws-core": { "packageName": "AwsJavaSdk-Core-AwsCore" }, + "aws-crt-client": { "packageName": "AwsJavaSdk-HttpClient-CrtClient" }, + "aws-ion-protocol": { "packageName": "AwsJavaSdk-Core-AwsIonProtocol" }, + "aws-json-protocol": { "packageName": "AwsJavaSdk-Core-AwsJsonProtocol" }, + "aws-query-protocol": { "packageName": "AwsJavaSdk-Core-AwsQueryProtocol" }, + "aws-xml-protocol": { "packageName": "AwsJavaSdk-Core-AwsXmlProtocol" }, + "cloudwatch-metric-publisher": { "packageName": "AwsJavaSdk-MetricPublisher-CloudWatch" }, + "codegen": { "packageName": "AwsJavaSdk-Codegen" }, + "dynamodb-enhanced": { "packageName": "AwsJavaSdk-DynamoDb-Enhanced" }, + "http-client-spi": { "packageName": "AwsJavaSdk-HttpClient" }, + "json-utils": { "packageName": "AwsJavaSdk-Core-JsonUtils" }, + "metrics-spi": { "packageName": "AwsJavaSdk-Core-MetricsSpi" }, + "netty-nio-client": { "packageName": "AwsJavaSdk-HttpClient-NettyNioClient" }, + "profiles": { "packageName": "AwsJavaSdk-Core-Profiles" }, + "protocol-core": { "packageName": "AwsJavaSdk-Core-ProtocolCore" }, + "regions": { "packageName": "AwsJavaSdk-Core-Regions" }, + "s3-transfer-manager": { "packageName": "AwsJavaSdk-S3-TransferManager" }, + "sdk-core": { "packageName": "AwsJavaSdk-Core" }, + "url-connection-client": { "packageName": "AwsJavaSdk-HttpClient-UrlConnectionClient" }, + "utils": { "packageName": "AwsJavaSdk-Core-Utils" }, + + "dynamodb": { "packageName": "AwsJavaSdk-DynamoDb" }, + "waf": { "packageName": "AwsJavaSdk-Waf" }, + + "third-party-jackson-core": { + "packageName": "AwsJavaSdk-ThirdParty-JacksonCore", + "artifactType": "JAR", + "includes": ["target/aws-sdk-java-third-party-jackson-core-*.jar"] + }, + "third-party-jackson-dataformat-cbor": { + "packageName": "AwsJavaSdk-ThirdParty-JacksonDataformatCbor", + "artifactType": "JAR", + "includes": ["target/aws-sdk-java-third-party-jackson-dataformat-cbor-*.jar"] + }, + + "archetype-app-quickstart": { "skipImport": true }, + "archetype-lambda": { "skipImport": true }, + "archetype-tools": { "skipImport": true }, + "archetypes": { "skipImport": true }, + "auth-sts-testing": { "skipImport": true }, + "aws-sdk-java": { "skipImport": true }, + "aws-sdk-java-pom": { "skipImport": true }, + "bom": { "skipImport": true }, + "bom-internal": { "skipImport": true }, + "build-tools": { "skipImport": true }, + "bundle": { "skipImport": true }, + "codegen-generated-classes-test": { "skipImport": true }, + "codegen-lite": { "skipImport": true }, + "codegen-lite-maven-plugin": { "skipImport": true }, + "codegen-maven-plugin": { "skipImport": true }, + "core": { "skipImport": true }, + "dynamodbdocument-v1": { "skipImport": true }, + "dynamodbmapper-v1": { "skipImport": true }, + "http-client-tests": { "skipImport": true }, + "http-clients": { "skipImport": true }, + "metric-publishers": { "skipImport": true }, + "module-path-tests": { "skipImport": true }, + "protocol-tests": { "skipImport": true }, + "protocol-tests-core": { "skipImport": true }, + "protocols": { "skipImport": true }, + "release-scripts": { "skipImport": true }, + "s3-benchmarks": { "skipImport": true }, + "sdk-benchmarks": { "skipImport": true }, + "sdk-native-image-test": { "skipImport": true }, + "service-test-utils": { "skipImport": true }, + "services": { "skipImport": true }, + "services-custom": { "skipImport": true }, + "stability-tests": { "skipImport": true }, + "test-utils": { "skipImport": true }, + "tests-coverage-reporting": { "skipImport": true }, + "third-party": { "skipImport": true } + }, + + "dependencies": { + "com.fasterxml.jackson.core:jackson-annotations": { "packageName": "Jackson-annotations", "packageVersion": "2.9.x" }, + "com.fasterxml.jackson.core:jackson-core": { "packageName": "Jackson-core", "packageVersion": "2.9.x" }, + "com.fasterxml.jackson.core:jackson-databind": { "packageName": "Jackson-databind", "packageVersion": "2.9.x" }, + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { "packageName": "Jackson-dataformat-cbor", "packageVersion": "2.9.x" }, + "com.fasterxml.jackson.jr:jackson-jr-objects": { "packageName": "Maven-com-fasterxml-jackson-jr_jackson-jr-objects", "packageVersion": "2.11.x" }, + "com.fasterxml.jackson.jr:jackson-jr-stree": { "packageName": "Maven-com-fasterxml-jackson-jr_jackson-jr-stree", "packageVersion": "2.11.x" }, + "com.jayway.jsonpath:json-path": { "packageName": "JMESPathJava", "packageVersion": "1.0" }, + "com.squareup:javapoet": { "packageName": "Maven-com-squareup_javapoet", "packageVersion": "1.11.x" }, + "com.typesafe.netty:netty-reactive-streams": { "packageName": "Netty4", "packageVersion": "4.1" }, + "com.typesafe.netty:netty-reactive-streams-http": { "packageName": "Maven-com-typesafe-netty_netty-reactive-streams-http", "packageVersion": "2.x" }, + "commons-codec:commons-codec": { "packageName": "JakartaCommons-codec", "packageVersion": "1.x" }, + "io.netty:netty-buffer": { "packageName": "Netty4", "packageVersion": "4.1" }, + "io.netty:netty-codec": { "packageName": "Netty4", "packageVersion": "4.1" }, + "io.netty:netty-codec-http": { "packageName": "Netty4", "packageVersion": "4.1" }, + "io.netty:netty-codec-http2": { "packageName": "Netty4", "packageVersion": "4.1" }, + "io.netty:netty-common": { "packageName": "Netty4", "packageVersion": "4.1" }, + "io.netty:netty-handler": { "packageName": "Netty4", "packageVersion": "4.1" }, + "io.netty:netty-resolver": { "packageName": "Netty4", "packageVersion": "4.1" }, + "io.netty:netty-transport": { "packageName": "Netty4", "packageVersion": "4.1" }, + "io.netty:netty-transport-native-epoll": { "packageName": "Netty4", "packageVersion": "4.1" }, + "io.netty:netty-transport-native-unix-common": { "packageName": "Netty4", "packageVersion": "4.1" }, + "org.apache.httpcomponents:httpclient": { "packageName": "Apache-HttpComponents-HttpClient", "packageVersion": "4.5.x" }, + "org.apache.httpcomponents:httpcore": { "packageName": "Apache-HttpComponents-HttpCore", "packageVersion": "4.4.x" }, + "org.eclipse.jdt:org.eclipse.jdt.core": { "packageName": "AwsJavaSdk-Codegen-EclipseJdtDependencies", "packageVersion": "2.0" }, + "org.eclipse.text:org.eclipse.text": { "packageName": "AwsJavaSdk-Codegen-EclipseJdtDependencies", "packageVersion": "2.0" }, + "org.reactivestreams:reactive-streams": { "packageName": "Maven-org-reactivestreams_reactive-streams", "packageVersion": "1.x" }, + "org.slf4j:slf4j-api": { "packageName": "Slf4j", "packageVersion": "1.7" }, + "software.amazon.awssdk.crt:aws-crt": { "packageName": "Aws-crt-java", "packageVersion": "1.0.x" }, + "software.amazon.eventstream:eventstream": { "packageName": "AwsFlowJava", "packageVersion": "1.0" }, + "software.amazon.ion:ion-java": { "packageName": "Maven-software-amazon-ion_ion-java", "packageVersion": "1.x" }, + "software.amazon:flow": { "packageName": "AwsFlowJava", "packageVersion": "1.0" } + } +} diff --git a/.changes/2.16.101.json b/.changes/2.16.101.json new file mode 100644 index 000000000000..b9a7edba402b --- /dev/null +++ b/.changes/2.16.101.json @@ -0,0 +1,54 @@ +{ + "version": "2.16.101", + "date": "2021-07-13", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "Changes to OpsCenter APIs to support a new feature, operational insights." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "contributor": "", + "description": "Customers can now migrate bots built with Lex V1 APIs to V2 APIs. This release adds APIs to initiate and manage the migration of a bot." + }, + { + "type": "feature", + "category": "Amazon Redshift", + "contributor": "", + "description": "Release new APIs to support new Redshift feature - Authentication Profile" + }, + { + "type": "feature", + "category": "AmplifyBackend", + "contributor": "", + "description": "Added Sign in with Apple OAuth provider." + }, + { + "type": "feature", + "category": "AWS Direct Connect", + "contributor": "", + "description": "This release adds a new filed named awsLogicalDeviceId that it displays the AWS Direct Connect endpoint which terminates a physical connection's BGP Sessions." + }, + { + "type": "feature", + "category": "AWS Price List Service", + "contributor": "", + "description": "Documentation updates for api.pricing" + }, + { + "type": "feature", + "category": "Amazon DevOps Guru", + "contributor": "", + "description": "Add paginator for GetCostEstimation" + } + ] +} \ No newline at end of file diff --git a/.changes/2.16.102.json b/.changes/2.16.102.json new file mode 100644 index 000000000000..5b0470b6c47e --- /dev/null +++ b/.changes/2.16.102.json @@ -0,0 +1,48 @@ +{ + "version": "2.16.102", + "date": "2021-07-14", + "entries": [ + { + "type": "feature", + "category": "AWS Certificate Manager", + "contributor": "", + "description": "Added support for RSA 3072 SSL certificate import" + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "contributor": "", + "description": "This release adds support for the Amazon Lightsail object storage service, which allows you to create buckets and store objects." + }, + { + "type": "feature", + "category": "Amazon HealthLake", + "contributor": "", + "description": "General availability for Amazon HealthLake. StartFHIRImportJob and StartFHIRExportJob APIs now require AWS KMS parameter. For more information, see the Amazon HealthLake Documentation https://docs.aws.amazon.com/healthlake/index.html." + }, + { + "type": "documentation", + "category": "AWS SDK for Java v2", + "contributor": "Bennett-Lynch", + "description": "Update pull request template to place motivation before description" + }, + { + "type": "feature", + "category": "AWS Well-Architected Tool", + "contributor": "", + "description": "This update provides support for Well-Architected API users to mark answer choices as not applicable." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Add support for Event Driven Workflows" + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "Release of feature needed for ECA-Endpoint settings. This allows customer to delete a field in endpoint settings by using --exact-settings flag in modify-endpoint api. This also displays default values for certain required fields of endpoint settings in describe-endpoint-settings api." + } + ] +} \ No newline at end of file diff --git a/.changes/2.16.103.json b/.changes/2.16.103.json new file mode 100644 index 000000000000..d7d7883efd64 --- /dev/null +++ b/.changes/2.16.103.json @@ -0,0 +1,42 @@ +{ + "version": "2.16.103", + "date": "2021-07-15", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This feature enables customers to specify weekly recurring time window(s) for scheduled events that reboot, stop or terminate EC2 instances." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add support for marshalling lists of strings in HTTP headers" + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "contributor": "", + "description": "Documentation updates for cognito-idp" + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "contributor": "", + "description": "Lex now supports the en-IN locale" + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "Documentation updates for support of awsvpc mode on Windows." + }, + { + "type": "feature", + "category": "AWS IoT SiteWise", + "contributor": "", + "description": "Update the default endpoint for the APIs used to manage asset models, assets, gateways, tags, and account configurations. If you have firewalls with strict egress rules, configure the rules to grant you access to api.iotsitewise.[region].amazonaws.com or api.iotsitewise.[cn-region].amazonaws.com.cn." + } + ] +} \ No newline at end of file diff --git a/.changes/2.16.104.json b/.changes/2.16.104.json new file mode 100644 index 000000000000..4cb72b02f41d --- /dev/null +++ b/.changes/2.16.104.json @@ -0,0 +1,36 @@ +{ + "version": "2.16.104", + "date": "2021-07-16", + "entries": [ + { + "type": "feature", + "category": "AWS Audit Manager", + "contributor": "", + "description": "This release relaxes the S3 URL character restrictions in AWS Audit Manager. Regex patterns have been updated for the following attributes: s3RelativePath, destination, and s3ResourcePath. 'AWS' terms have also been replaced with entities to align with China Rebrand documentation efforts." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Chime", + "contributor": "", + "description": "This SDK release adds Account Status as one of the attributes in Account API response" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "This changes implements support for the `AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE` environment variable and\n`ec2_metadata_service_endpoint` profile file property.\n\nWhen no endpoint override is set using `AWS_EC2_METADATA_SERVICE_ENDPOINT`, this configuration controls which of the default\nIMDS endpoints the client will use. Valid values are `IPv4` or `IPv6`" + }, + { + "type": "feature", + "category": "Amazon AppIntegrations Service", + "contributor": "", + "description": "Documentation update for AppIntegrations Service" + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.0.json b/.changes/2.17.0.json new file mode 100644 index 000000000000..9ccbdb7a1b3d --- /dev/null +++ b/.changes/2.17.0.json @@ -0,0 +1,48 @@ +{ + "version": "2.17.0", + "date": "2021-07-19", + "entries": [ + { + "type": "feature", + "category": "AWS RoboMaker", + "contributor": "", + "description": "This release allows customers to create a new version of WorldTemplates with support for Doors." + }, + { + "type": "feature", + "category": "Amazon Location Service", + "contributor": "", + "description": "Add five new API operations: UpdateGeofenceCollection, UpdateMap, UpdatePlaceIndex, UpdateRouteCalculator, UpdateTracker." + }, + { + "type": "feature", + "category": "Amazon EMR Containers", + "contributor": "", + "description": "Updated DescribeManagedEndpoint and ListManagedEndpoints to return failureReason and stateDetails in API response." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Moved Jackson from an external SDK dependency to an internal dependency: https://aws.amazon.com/blogs/developer/the-aws-sdk-for-java-2-17-removes-its-external-dependency-on-jackson/" + }, + { + "type": "feature", + "category": "AWS Health APIs and Notifications", + "contributor": "", + "description": "In the Health API, the maximum number of entities for the EventFilter and EntityFilter data types has changed from 100 to 99. This change is related to an internal optimization of the AWS Health service." + }, + { + "type": "feature", + "category": "AWS Direct Connect", + "contributor": "", + "description": "Documentation updates for directconnect" + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "contributor": "", + "description": "Documentation updates for reversal of default value for additional instance configuration SSM switch, plus improved descriptions for semantic versioning." + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.1.json b/.changes/2.17.1.json new file mode 100644 index 000000000000..2439f8bf4674 --- /dev/null +++ b/.changes/2.17.1.json @@ -0,0 +1,18 @@ +{ + "version": "2.17.1", + "date": "2021-07-20", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Added idempotency to the CreateVolume API using the ClientToken request parameter" + }, + { + "type": "feature", + "category": "AWS Compute Optimizer", + "contributor": "", + "description": "Documentation updates for Compute Optimizer" + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.10.json b/.changes/2.17.10.json new file mode 100644 index 000000000000..a526ef27abc9 --- /dev/null +++ b/.changes/2.17.10.json @@ -0,0 +1,30 @@ +{ + "version": "2.17.10", + "date": "2021-08-02", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Don't attempt to use empty session tokens" + }, + { + "type": "feature", + "category": "AWS IoT Greengrass V2", + "contributor": "", + "description": "This release adds support for component system resource limits and idempotent Create operations. You can now specify the maximum amount of CPU and memory resources that each component can use." + }, + { + "type": "bugfix", + "category": "AWS Savings Plans", + "contributor": "", + "description": "Setting default content type to 'application/json' since the service does not accept 'x-amz-json-'" + }, + { + "type": "feature", + "category": "AWS Systems Manager Incident Manager Contacts", + "contributor": "", + "description": "Added new attribute in AcceptCode API. AcceptCodeValidation takes in two values - ENFORCE, IGNORE. ENFORCE forces validation of accept code and IGNORE ignores it which is also the default behavior; Corrected TagKeyList length from 200 to 50" + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.11.json b/.changes/2.17.11.json new file mode 100644 index 000000000000..422083e4ccc2 --- /dev/null +++ b/.changes/2.17.11.json @@ -0,0 +1,42 @@ +{ + "version": "2.17.11", + "date": "2021-08-03", + "entries": [ + { + "type": "feature", + "category": "AWS IoT SiteWise", + "contributor": "", + "description": "My AWS Service (placeholder) - This release introduces custom Intervals and offset for tumbling window in metric for AWS IoT SiteWise." + }, + { + "type": "feature", + "category": "AWS Proton", + "contributor": "", + "description": "Docs only add idempotent create apis" + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Add ConcurrentModificationException to create-table, delete-table, create-database, update-database, delete-database" + }, + { + "type": "feature", + "category": "Amazon Redshift", + "contributor": "", + "description": "API support for Redshift Data Sharing feature." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add IntelliJ .ipr files to gitignore" + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "AWS Elemental MediaConvert SDK has added control over the passthrough of XDS captions metadata to outputs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.12.json b/.changes/2.17.12.json new file mode 100644 index 000000000000..954d53f0de0a --- /dev/null +++ b/.changes/2.17.12.json @@ -0,0 +1,54 @@ +{ + "version": "2.17.12", + "date": "2021-08-04", + "entries": [ + { + "type": "feature", + "category": "EC2 Image Builder", + "contributor": "", + "description": "Updated list actions to include a list of valid filters that can be used in the request." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "contributor": "", + "description": "This release adds support for call analytics (batch) within Amazon Transcribe." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add customization.config support for setting default RetryMode" + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "contributor": "", + "description": "Documentation updates to add EC2 Image Builder as a target on PutTargets." + }, + { + "type": "feature", + "category": "AWS Systems Manager Incident Manager", + "contributor": "", + "description": "Documentation updates for Incident Manager." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "This release adds AutomaticRestartTime to the DescribeDBInstances and DescribeDBClusters operations. AutomaticRestartTime indicates the time when a stopped DB instance or DB cluster is restarted automatically." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fixed an issue where a bean-based marshallers for request and response models would fail for fluent setters that happened to start with \"set\", like \"settings\"." + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.13.json b/.changes/2.17.13.json new file mode 100644 index 000000000000..143dacba0266 --- /dev/null +++ b/.changes/2.17.13.json @@ -0,0 +1,18 @@ +{ + "version": "2.17.13", + "date": "2021-08-05", + "entries": [ + { + "type": "feature", + "category": "Amazon Lex Model Building V2", + "contributor": "", + "description": "Customers can now toggle the active field on prompts and responses." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "EC2 Auto Scaling adds configuration checks and Launch Template validation to Instance Refresh." + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.14.json b/.changes/2.17.14.json new file mode 100644 index 000000000000..2c63a5a1eb51 --- /dev/null +++ b/.changes/2.17.14.json @@ -0,0 +1,54 @@ +{ + "version": "2.17.14", + "date": "2021-08-06", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Athena", + "contributor": "", + "description": "Documentation updates for Athena." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "contributor": "", + "description": "This release adds support to track when a bucket access key was last used." + }, + { + "type": "bugfix", + "category": "Amazon S3", + "contributor": "", + "description": "Fixed an issue where checksum validation only considered the first 4 bytes of the 16 byte checksum, creating the potential for corrupted downloads to go undetected." + }, + { + "type": "feature", + "category": "Amazon Chime SDK Messaging", + "contributor": "", + "description": "The Amazon Chime SDK Messaging APIs allow software developers to send and receive messages in custom messaging applications." + }, + { + "type": "feature", + "category": "Amazon Chime SDK Identity", + "contributor": "", + "description": "The Amazon Chime SDK Identity APIs allow software developers to create and manage unique instances of their messaging applications." + }, + { + "type": "feature", + "category": "Synthetics", + "contributor": "", + "description": "Documentation updates for Visual Monitoring feature and other doc ticket fixes." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds support for agent status and hours of operation. For details, see the Release Notes in the Amazon Connect Administrator Guide." + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.15.json b/.changes/2.17.15.json new file mode 100644 index 000000000000..365fca44f840 --- /dev/null +++ b/.changes/2.17.15.json @@ -0,0 +1,24 @@ +{ + "version": "2.17.15", + "date": "2021-08-09", + "entries": [ + { + "type": "feature", + "category": "Amazon Rekognition", + "contributor": "", + "description": "This release adds support for four new types of segments (opening credits, content segments, slates, and studio logos), improved accuracy for credits and shot detection and new filters to control black frame detection." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "Documentation updates for AWS Systems Manager." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "This release adds APIs to support versioning feature of AWS WAF Managed rule groups" + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.2.json b/.changes/2.17.2.json new file mode 100644 index 000000000000..c7b8a3c72d19 --- /dev/null +++ b/.changes/2.17.2.json @@ -0,0 +1,60 @@ +{ + "version": "2.17.2", + "date": "2021-07-21", + "entries": [ + { + "type": "feature", + "category": "AWS Identity and Access Management", + "contributor": "", + "description": "Documentation updates for AWS Identity and Access Management (IAM)." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "contributor": "", + "description": "EMR now supports new DescribeReleaseLabel and ListReleaseLabel APIs. They can provide Amazon EMR release label details. You can programmatically list available releases and applications for a specific Amazon EMR release label." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "contributor": "", + "description": "AWS CodeBuild now allows you to set the access permissions for build artifacts, project artifacts, and log files that are uploaded to an Amazon S3 bucket that is owned by another account." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "Adds support for enabling TLS protocol version and cipher suite headers to be sent to backend targets for Application Load Balancers." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "New ResourceConflictException error code for PutFunctionEventInvokeConfig, UpdateFunctionEventInvokeConfig, and DeleteFunctionEventInvokeConfig operations." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Adds the OriginalSnapshotCreateTime field to the DBSnapshot response object. This field timestamps the underlying data of a snapshot and doesn't change when the snapshot is copied." + }, + { + "type": "feature", + "category": "Amazon Personalize", + "contributor": "", + "description": "My AWS Service (placeholder) - Making minProvisionedTPS an optional parameter when creating a campaign. If not provided, it defaults to 1." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "contributor": "", + "description": "Amazon Kendra now provides a data source connector for Amazon WorkDocs. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-workdocs.html" + }, + { + "type": "feature", + "category": "AWS Proton", + "contributor": "", + "description": "Documentation updates for AWS Proton" + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.3.json b/.changes/2.17.3.json new file mode 100644 index 000000000000..ef6b9e77930a --- /dev/null +++ b/.changes/2.17.3.json @@ -0,0 +1,48 @@ +{ + "version": "2.17.3", + "date": "2021-07-22", + "entries": [ + { + "type": "feature", + "category": "Amazon S3", + "contributor": "", + "description": "Add support for more user-friendly CopyObject source parameters" + }, + { + "type": "feature", + "category": "Amazon QLDB", + "contributor": "", + "description": "Amazon QLDB now supports ledgers encrypted with customer managed KMS keys. Changes in CreateLedger, UpdateLedger and DescribeLedger APIs to support the changes." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "Adds support for enabling TLS protocol version and cipher suite headers to be sent to backend targets for Application Load Balancers." + }, + { + "type": "feature", + "category": "AWS Glue DataBrew", + "contributor": "", + "description": "This SDK release adds two new features: 1) Output to Native JDBC destinations and 2) Adding configurations to profile jobs" + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "MediaLive now supports passing through style data on WebVTT caption outputs." + }, + { + "type": "feature", + "category": "AWS S3 Control", + "contributor": "", + "description": "Documentation updates for Amazon S3-control" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release allows customers to assign prefixes to their elastic network interface and to reserve IP blocks in their subnet CIDRs. These reserved blocks can be used to assign prefixes to elastic network interfaces or be excluded from auto-assignment." + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.4.json b/.changes/2.17.4.json new file mode 100644 index 000000000000..f468959fd9b1 --- /dev/null +++ b/.changes/2.17.4.json @@ -0,0 +1,24 @@ +{ + "version": "2.17.4", + "date": "2021-07-23", + "entries": [ + { + "type": "feature", + "category": "EC2 Image Builder", + "contributor": "", + "description": "Update to documentation to reapply missing change to SSM uninstall switch default value and improve description." + }, + { + "type": "feature", + "category": "Amazon S3 on Outposts", + "contributor": "", + "description": "Add on-premise access type support for endpoints" + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Added product name, company name, and Region fields for security findings. Added details objects for RDS event subscriptions and AWS ECS services. Added fields to the details for AWS Elasticsearch domains." + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.5.json b/.changes/2.17.5.json new file mode 100644 index 000000000000..23611f72c38b --- /dev/null +++ b/.changes/2.17.5.json @@ -0,0 +1,54 @@ +{ + "version": "2.17.5", + "date": "2021-07-26", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Annotate and document members that are modeled as deprecated" + }, + { + "type": "feature", + "category": "AWS SSO Identity Store", + "contributor": "", + "description": "Documentation updates for SSO API Ref." + }, + { + "type": "feature", + "category": "Amazon Textract", + "contributor": "", + "description": "Adds support for AnalyzeExpense, a new API to extract relevant data such as contact information, items purchased, and vendor name, from almost any invoice or receipt without the need for any templates or configuration." + }, + { + "type": "feature", + "category": "AWS Proton", + "contributor": "", + "description": "Documentation-only update links" + }, + { + "type": "documentation", + "category": "Amazon S3", + "contributor": "", + "description": "Deprecate S3 CopySource parameter for CopyObject & UploadPartCopy" + }, + { + "type": "feature", + "category": "AWS S3 Control", + "contributor": "", + "description": "S3 Access Point aliases can be used anywhere you use S3 bucket names to access data in S3" + }, + { + "type": "feature", + "category": "Synthetics", + "contributor": "", + "description": "CloudWatch Synthetics now supports visual testing in its canaries." + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "contributor": "", + "description": "SDK update to support creation of Cross-Account Metric Alarms and update API documentation." + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.6.json b/.changes/2.17.6.json new file mode 100644 index 000000000000..f396cd15beb3 --- /dev/null +++ b/.changes/2.17.6.json @@ -0,0 +1,78 @@ +{ + "version": "2.17.6", + "date": "2021-07-27", + "entries": [ + { + "type": "feature", + "category": "AWS Route53 Recovery Readiness", + "contributor": "", + "description": "Amazon Route 53 Application Recovery Controller's readiness check capability continually monitors resource quotas, capacity, and network routing policies to ensure that the recovery environment is scaled and configured to take over when needed." + }, + { + "type": "feature", + "category": "Route53 Recovery Cluster", + "contributor": "", + "description": "Amazon Route 53 Application Recovery Controller's routing control - Routing Control Data Plane APIs help you update the state (On/Off) of the routing controls to reroute traffic across application replicas in a 100% available manner." + }, + { + "type": "feature", + "category": "AWS Route53 Recovery Control Config", + "contributor": "", + "description": "Amazon Route 53 Application Recovery Controller's routing control - Routing Control Configuration APIs help you create and delete clusters, control panels, routing controls and safety rules. State changes (On/Off) of routing controls are not part of configuration APIs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS IoT Analytics", + "contributor": "", + "description": "IoT Analytics now supports creating a dataset resource with IoT SiteWise MultiLayerStorage data stores, enabling customers to query industrial data within the service. This release includes adding JOIN functionality for customers to query multiple data sources in a dataset." + }, + { + "type": "feature", + "category": "Amazon Route 53", + "contributor": "", + "description": "This release adds support for the RECOVERY_CONTROL health check type to be used in conjunction with Route53 Application Recovery Controller." + }, + { + "type": "feature", + "category": "AWS Shield", + "contributor": "", + "description": "Change name of DDoS Response Team (DRT) to Shield Response Team (SRT)" + }, + { + "type": "feature", + "category": "AWS IoT Wireless", + "contributor": "", + "description": "Add SidewalkManufacturingSn as an identifier to allow Customer to query WirelessDevice, in the response, AmazonId is added in the case that Sidewalk device is return." + }, + { + "type": "feature", + "category": "AWS Batch", + "contributor": "", + "description": "Add support for ListJob filters" + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "Add support to use row-level security with tags when embedding dashboards for users not provisioned in QuickSight" + }, + { + "type": "feature", + "category": "Redshift Data API Service", + "contributor": "", + "description": "Added structures to support new Data API operation BatchExecuteStatement, used to execute multiple SQL statements within a single transaction." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building V2", + "contributor": "", + "description": "Add waiters that automatically poll for resource status for asynchronous operations, such as building a bot" + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.7.json b/.changes/2.17.7.json new file mode 100644 index 000000000000..65d0e56fb9c8 --- /dev/null +++ b/.changes/2.17.7.json @@ -0,0 +1,18 @@ +{ + "version": "2.17.7", + "date": "2021-07-28", + "entries": [ + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "SDK update to support Importing existing Stacks to new/existing Self Managed StackSet - Stack Import feature." + }, + { + "type": "feature", + "category": "AWS Single Sign-On Admin", + "contributor": "", + "description": "Documentation updates for arn:aws:trebuchet:::service:v1:03a2216d-1cda-4696-9ece-1387cb6f6952" + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.8.json b/.changes/2.17.8.json new file mode 100644 index 000000000000..4db7a5321292 --- /dev/null +++ b/.changes/2.17.8.json @@ -0,0 +1,48 @@ +{ + "version": "2.17.8", + "date": "2021-07-29", + "entries": [ + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "Increase maximum credential duration of role alias to 12 hours." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for G4ad xlarge and 2xlarge instances powered by AMD Radeon Pro V520 GPUs and AMD 2nd Generation EPYC processors" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Correctly handle multi-value headers in Aws4Signer" + }, + { + "type": "feature", + "category": "Amazon Chime", + "contributor": "", + "description": "Adds support for live transcription of meetings with Amazon Transcribe and Amazon Transcribe Medical. The new APIs, StartMeetingTranscription and StopMeetingTranscription, control the generation of user-attributed transcriptions sent to meeting clients via Amazon Chime SDK data messages." + }, + { + "type": "feature", + "category": "AWS Savings Plans", + "contributor": "", + "description": "Documentation update for valid Savings Plans offering ID pattern" + }, + { + "type": "feature", + "category": "AWS IoT SiteWise", + "contributor": "", + "description": "Added support for AWS IoT SiteWise Edge. You can now create an AWS IoT SiteWise gateway that runs on AWS IoT Greengrass V2. With the gateway, you can collect local server and equipment data, process the data, and export the selected data from the edge to the AWS Cloud." + } + ] +} \ No newline at end of file diff --git a/.changes/2.17.9.json b/.changes/2.17.9.json new file mode 100644 index 000000000000..1b327e7a2328 --- /dev/null +++ b/.changes/2.17.9.json @@ -0,0 +1,36 @@ +{ + "version": "2.17.9", + "date": "2021-07-30", + "entries": [ + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "API changes with respect to Lambda steps in model building pipelines. Adds several waiters to async Sagemaker Image APIs. Add more instance types to AppInstanceType field" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Secrets Manager", + "contributor": "", + "description": "Add support for KmsKeyIds in the ListSecretVersionIds API response" + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "Client Port Preservation ALB Attribute Launch" + }, + { + "type": "feature", + "category": "AWS AppSync", + "contributor": "", + "description": "AWS AppSync now supports a new authorization mode allowing you to define your own authorization logic using an AWS Lambda function." + } + ] +} \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 87e20706d4a4..7552563d99d0 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,12 +1,12 @@ -## Description - - ## Motivation and Context +## Description + + ## Testing diff --git a/.gitignore b/.gitignore index d9d47cbdf646..b3d7ad5f4083 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ # Intellij .idea/ *.iml +*.ipr *.iws # Mac @@ -14,6 +15,10 @@ # Maven target/ +# JEnv +.java-version + +# Shade **/dependency-reduced-pom.xml *.pyc diff --git a/CHANGELOG.md b/CHANGELOG.md index c60518f46a3a..86786732854e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,500 @@ +# __2.17.15__ __2021-08-09__ +## __AWS WAFV2__ + - ### Features + - This release adds APIs to support versioning feature of AWS WAF Managed rule groups + +## __Amazon Rekognition__ + - ### Features + - This release adds support for four new types of segments (opening credits, content segments, slates, and studio logos), improved accuracy for credits and shot detection and new filters to control black frame detection. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Documentation updates for AWS Systems Manager. + +# __2.17.14__ __2021-08-06__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Athena__ + - ### Features + - Documentation updates for Athena. + +## __Amazon Chime SDK Identity__ + - ### Features + - The Amazon Chime SDK Identity APIs allow software developers to create and manage unique instances of their messaging applications. + +## __Amazon Chime SDK Messaging__ + - ### Features + - The Amazon Chime SDK Messaging APIs allow software developers to send and receive messages in custom messaging applications. + +## __Amazon Connect Service__ + - ### Features + - This release adds support for agent status and hours of operation. For details, see the Release Notes in the Amazon Connect Administrator Guide. + +## __Amazon Lightsail__ + - ### Features + - This release adds support to track when a bucket access key was last used. + +## __Amazon S3__ + - ### Bugfixes + - Fixed an issue where checksum validation only considered the first 4 bytes of the 16 byte checksum, creating the potential for corrupted downloads to go undetected. + +## __Synthetics__ + - ### Features + - Documentation updates for Visual Monitoring feature and other doc ticket fixes. + +# __2.17.13__ __2021-08-05__ +## __Amazon Lex Model Building V2__ + - ### Features + - Customers can now toggle the active field on prompts and responses. + +## __Auto Scaling__ + - ### Features + - EC2 Auto Scaling adds configuration checks and Launch Template validation to Instance Refresh. + +# __2.17.12__ __2021-08-04__ +## __AWS SDK for Java v2__ + - ### Features + - Add customization.config support for setting default RetryMode + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed an issue where a bean-based marshallers for request and response models would fail for fluent setters that happened to start with "set", like "settings". + +## __AWS Systems Manager Incident Manager__ + - ### Features + - Documentation updates for Incident Manager. + +## __Amazon EventBridge__ + - ### Features + - Documentation updates to add EC2 Image Builder as a target on PutTargets. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds AutomaticRestartTime to the DescribeDBInstances and DescribeDBClusters operations. AutomaticRestartTime indicates the time when a stopped DB instance or DB cluster is restarted automatically. + +## __Amazon Transcribe Service__ + - ### Features + - This release adds support for call analytics (batch) within Amazon Transcribe. + +## __EC2 Image Builder__ + - ### Features + - Updated list actions to include a list of valid filters that can be used in the request. + +# __2.17.11__ __2021-08-03__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added control over the passthrough of XDS captions metadata to outputs. + +## __AWS Glue__ + - ### Features + - Add ConcurrentModificationException to create-table, delete-table, create-database, update-database, delete-database + +## __AWS IoT SiteWise__ + - ### Features + - My AWS Service (placeholder) - This release introduces custom Intervals and offset for tumbling window in metric for AWS IoT SiteWise. + +## __AWS Proton__ + - ### Features + - Docs only add idempotent create apis + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Add IntelliJ .ipr files to gitignore + +## __Amazon Redshift__ + - ### Features + - API support for Redshift Data Sharing feature. + +# __2.17.10__ __2021-08-02__ +## __AWS IoT Greengrass V2__ + - ### Features + - This release adds support for component system resource limits and idempotent Create operations. You can now specify the maximum amount of CPU and memory resources that each component can use. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Don't attempt to use empty session tokens + +## __AWS Savings Plans__ + - ### Bugfixes + - Setting default content type to 'application/json' since the service does not accept 'x-amz-json-' + +## __AWS Systems Manager Incident Manager Contacts__ + - ### Features + - Added new attribute in AcceptCode API. AcceptCodeValidation takes in two values - ENFORCE, IGNORE. ENFORCE forces validation of accept code and IGNORE ignores it which is also the default behavior; Corrected TagKeyList length from 200 to 50 + +# __2.17.9__ __2021-07-30__ +## __AWS AppSync__ + - ### Features + - AWS AppSync now supports a new authorization mode allowing you to define your own authorization logic using an AWS Lambda function. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Secrets Manager__ + - ### Features + - Add support for KmsKeyIds in the ListSecretVersionIds API response + +## __Amazon SageMaker Service__ + - ### Features + - API changes with respect to Lambda steps in model building pipelines. Adds several waiters to async Sagemaker Image APIs. Add more instance types to AppInstanceType field + +## __Elastic Load Balancing__ + - ### Features + - Client Port Preservation ALB Attribute Launch + +# __2.17.8__ __2021-07-29__ +## __AWS IoT__ + - ### Features + - Increase maximum credential duration of role alias to 12 hours. + +## __AWS IoT SiteWise__ + - ### Features + - Added support for AWS IoT SiteWise Edge. You can now create an AWS IoT SiteWise gateway that runs on AWS IoT Greengrass V2. With the gateway, you can collect local server and equipment data, process the data, and export the selected data from the edge to the AWS Cloud. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Correctly handle multi-value headers in Aws4Signer + +## __AWS Savings Plans__ + - ### Features + - Documentation update for valid Savings Plans offering ID pattern + +## __Amazon Chime__ + - ### Features + - Adds support for live transcription of meetings with Amazon Transcribe and Amazon Transcribe Medical. The new APIs, StartMeetingTranscription and StopMeetingTranscription, control the generation of user-attributed transcriptions sent to meeting clients via Amazon Chime SDK data messages. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for G4ad xlarge and 2xlarge instances powered by AMD Radeon Pro V520 GPUs and AMD 2nd Generation EPYC processors + +# __2.17.7__ __2021-07-28__ +## __AWS CloudFormation__ + - ### Features + - SDK update to support Importing existing Stacks to new/existing Self Managed StackSet - Stack Import feature. + +## __AWS Single Sign-On Admin__ + - ### Features + - Documentation updates for arn:aws:trebuchet:::service:v1:03a2216d-1cda-4696-9ece-1387cb6f6952 + +# __2.17.6__ __2021-07-27__ +## __AWS Batch__ + - ### Features + - Add support for ListJob filters + +## __AWS IoT Analytics__ + - ### Features + - IoT Analytics now supports creating a dataset resource with IoT SiteWise MultiLayerStorage data stores, enabling customers to query industrial data within the service. This release includes adding JOIN functionality for customers to query multiple data sources in a dataset. + +## __AWS IoT Wireless__ + - ### Features + - Add SidewalkManufacturingSn as an identifier to allow Customer to query WirelessDevice, in the response, AmazonId is added in the case that Sidewalk device is return. + +## __AWS Route53 Recovery Control Config__ + - ### Features + - Amazon Route 53 Application Recovery Controller's routing control - Routing Control Configuration APIs help you create and delete clusters, control panels, routing controls and safety rules. State changes (On/Off) of routing controls are not part of configuration APIs. + +## __AWS Route53 Recovery Readiness__ + - ### Features + - Amazon Route 53 Application Recovery Controller's readiness check capability continually monitors resource quotas, capacity, and network routing policies to ensure that the recovery environment is scaled and configured to take over when needed. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Shield__ + - ### Features + - Change name of DDoS Response Team (DRT) to Shield Response Team (SRT) + +## __Amazon Lex Model Building V2__ + - ### Features + - Add waiters that automatically poll for resource status for asynchronous operations, such as building a bot + +## __Amazon QuickSight__ + - ### Features + - Add support to use row-level security with tags when embedding dashboards for users not provisioned in QuickSight + +## __Amazon Route 53__ + - ### Features + - This release adds support for the RECOVERY_CONTROL health check type to be used in conjunction with Route53 Application Recovery Controller. + +## __Redshift Data API Service__ + - ### Features + - Added structures to support new Data API operation BatchExecuteStatement, used to execute multiple SQL statements within a single transaction. + +## __Route53 Recovery Cluster__ + - ### Features + - Amazon Route 53 Application Recovery Controller's routing control - Routing Control Data Plane APIs help you update the state (On/Off) of the routing controls to reroute traffic across application replicas in a 100% available manner. + +# __2.17.5__ __2021-07-26__ +## __AWS Proton__ + - ### Features + - Documentation-only update links + +## __AWS S3 Control__ + - ### Features + - S3 Access Point aliases can be used anywhere you use S3 bucket names to access data in S3 + +## __AWS SDK for Java v2__ + - ### Features + - Annotate and document members that are modeled as deprecated + +## __AWS SSO Identity Store__ + - ### Features + - Documentation updates for SSO API Ref. + +## __Amazon CloudWatch__ + - ### Features + - SDK update to support creation of Cross-Account Metric Alarms and update API documentation. + +## __Amazon S3__ + - ### Documentations + - Deprecate S3 CopySource parameter for CopyObject & UploadPartCopy + +## __Amazon Textract__ + - ### Features + - Adds support for AnalyzeExpense, a new API to extract relevant data such as contact information, items purchased, and vendor name, from almost any invoice or receipt without the need for any templates or configuration. + +## __Synthetics__ + - ### Features + - CloudWatch Synthetics now supports visual testing in its canaries. + +# __2.17.4__ __2021-07-23__ +## __AWS SecurityHub__ + - ### Features + - Added product name, company name, and Region fields for security findings. Added details objects for RDS event subscriptions and AWS ECS services. Added fields to the details for AWS Elasticsearch domains. + +## __Amazon S3 on Outposts__ + - ### Features + - Add on-premise access type support for endpoints + +## __EC2 Image Builder__ + - ### Features + - Update to documentation to reapply missing change to SSM uninstall switch default value and improve description. + +# __2.17.3__ __2021-07-22__ +## __AWS Elemental MediaLive__ + - ### Features + - MediaLive now supports passing through style data on WebVTT caption outputs. + +## __AWS Glue DataBrew__ + - ### Features + - This SDK release adds two new features: 1) Output to Native JDBC destinations and 2) Adding configurations to profile jobs + +## __AWS S3 Control__ + - ### Features + - Documentation updates for Amazon S3-control + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release allows customers to assign prefixes to their elastic network interface and to reserve IP blocks in their subnet CIDRs. These reserved blocks can be used to assign prefixes to elastic network interfaces or be excluded from auto-assignment. + +## __Amazon QLDB__ + - ### Features + - Amazon QLDB now supports ledgers encrypted with customer managed KMS keys. Changes in CreateLedger, UpdateLedger and DescribeLedger APIs to support the changes. + +## __Amazon S3__ + - ### Features + - Add support for more user-friendly CopyObject source parameters + +## __Elastic Load Balancing__ + - ### Features + - Adds support for enabling TLS protocol version and cipher suite headers to be sent to backend targets for Application Load Balancers. + +# __2.17.2__ __2021-07-21__ +## __AWS CodeBuild__ + - ### Features + - AWS CodeBuild now allows you to set the access permissions for build artifacts, project artifacts, and log files that are uploaded to an Amazon S3 bucket that is owned by another account. + +## __AWS Identity and Access Management__ + - ### Features + - Documentation updates for AWS Identity and Access Management (IAM). + +## __AWS Lambda__ + - ### Features + - New ResourceConflictException error code for PutFunctionEventInvokeConfig, UpdateFunctionEventInvokeConfig, and DeleteFunctionEventInvokeConfig operations. + +## __AWS Proton__ + - ### Features + - Documentation updates for AWS Proton + +## __AWSKendraFrontendService__ + - ### Features + - Amazon Kendra now provides a data source connector for Amazon WorkDocs. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-workdocs.html + +## __Amazon Elastic MapReduce__ + - ### Features + - EMR now supports new DescribeReleaseLabel and ListReleaseLabel APIs. They can provide Amazon EMR release label details. You can programmatically list available releases and applications for a specific Amazon EMR release label. + +## __Amazon Personalize__ + - ### Features + - My AWS Service (placeholder) - Making minProvisionedTPS an optional parameter when creating a campaign. If not provided, it defaults to 1. + +## __Amazon Relational Database Service__ + - ### Features + - Adds the OriginalSnapshotCreateTime field to the DBSnapshot response object. This field timestamps the underlying data of a snapshot and doesn't change when the snapshot is copied. + +## __Elastic Load Balancing__ + - ### Features + - Adds support for enabling TLS protocol version and cipher suite headers to be sent to backend targets for Application Load Balancers. + +# __2.17.1__ __2021-07-20__ +## __AWS Compute Optimizer__ + - ### Features + - Documentation updates for Compute Optimizer + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Added idempotency to the CreateVolume API using the ClientToken request parameter + +# __2.17.0__ __2021-07-19__ +## __AWS Direct Connect__ + - ### Features + - Documentation updates for directconnect + +## __AWS Health APIs and Notifications__ + - ### Features + - In the Health API, the maximum number of entities for the EventFilter and EntityFilter data types has changed from 100 to 99. This change is related to an internal optimization of the AWS Health service. + +## __AWS RoboMaker__ + - ### Features + - This release allows customers to create a new version of WorldTemplates with support for Doors. + +## __AWS SDK for Java v2__ + - ### Features + - Moved Jackson from an external SDK dependency to an internal dependency: https://aws.amazon.com/blogs/developer/the-aws-sdk-for-java-2-17-removes-its-external-dependency-on-jackson/ + +## __Amazon EMR Containers__ + - ### Features + - Updated DescribeManagedEndpoint and ListManagedEndpoints to return failureReason and stateDetails in API response. + +## __Amazon Location Service__ + - ### Features + - Add five new API operations: UpdateGeofenceCollection, UpdateMap, UpdatePlaceIndex, UpdateRouteCalculator, UpdateTracker. + +## __EC2 Image Builder__ + - ### Features + - Documentation updates for reversal of default value for additional instance configuration SSM switch, plus improved descriptions for semantic versioning. + +# __2.16.104__ __2021-07-16__ +## __AWS Audit Manager__ + - ### Features + - This release relaxes the S3 URL character restrictions in AWS Audit Manager. Regex patterns have been updated for the following attributes: s3RelativePath, destination, and s3ResourcePath. 'AWS' terms have also been replaced with entities to align with China Rebrand documentation efforts. + +## __AWS SDK for Java v2__ + - ### Features + - This changes implements support for the `AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE` environment variable and + `ec2_metadata_service_endpoint` profile file property. + + When no endpoint override is set using `AWS_EC2_METADATA_SERVICE_ENDPOINT`, this configuration controls which of the default + IMDS endpoints the client will use. Valid values are `IPv4` or `IPv6` + - Updated service endpoint metadata. + +## __Amazon AppIntegrations Service__ + - ### Features + - Documentation update for AppIntegrations Service + +## __Amazon Chime__ + - ### Features + - This SDK release adds Account Status as one of the attributes in Account API response + +# __2.16.103__ __2021-07-15__ +## __AWS IoT SiteWise__ + - ### Features + - Update the default endpoint for the APIs used to manage asset models, assets, gateways, tags, and account configurations. If you have firewalls with strict egress rules, configure the rules to grant you access to api.iotsitewise.[region].amazonaws.com or api.iotsitewise.[cn-region].amazonaws.com.cn. + +## __AWS SDK for Java v2__ + - ### Features + - Add support for marshalling lists of strings in HTTP headers + +## __Amazon Cognito Identity Provider__ + - ### Features + - Documentation updates for cognito-idp + +## __Amazon EC2 Container Service__ + - ### Features + - Documentation updates for support of awsvpc mode on Windows. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This feature enables customers to specify weekly recurring time window(s) for scheduled events that reboot, stop or terminate EC2 instances. + +## __Amazon Lex Model Building Service__ + - ### Features + - Lex now supports the en-IN locale + +# __2.16.102__ __2021-07-14__ +## __AWS Certificate Manager__ + - ### Features + - Added support for RSA 3072 SSL certificate import + +## __AWS Database Migration Service__ + - ### Features + - Release of feature needed for ECA-Endpoint settings. This allows customer to delete a field in endpoint settings by using --exact-settings flag in modify-endpoint api. This also displays default values for certain required fields of endpoint settings in describe-endpoint-settings api. + +## __AWS Glue__ + - ### Features + - Add support for Event Driven Workflows + +## __AWS SDK for Java v2__ + - ### Documentations + - Update pull request template to place motivation before description + - Contributed by: [@Bennett-Lynch](https://github.com/Bennett-Lynch) + +## __AWS Well-Architected Tool__ + - ### Features + - This update provides support for Well-Architected API users to mark answer choices as not applicable. + +## __Amazon HealthLake__ + - ### Features + - General availability for Amazon HealthLake. StartFHIRImportJob and StartFHIRExportJob APIs now require AWS KMS parameter. For more information, see the Amazon HealthLake Documentation https://docs.aws.amazon.com/healthlake/index.html. + +## __Amazon Lightsail__ + - ### Features + - This release adds support for the Amazon Lightsail object storage service, which allows you to create buckets and store objects. + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@Bennett-Lynch](https://github.com/Bennett-Lynch) +# __2.16.101__ __2021-07-13__ +## __AWS Direct Connect__ + - ### Features + - This release adds a new filed named awsLogicalDeviceId that it displays the AWS Direct Connect endpoint which terminates a physical connection's BGP Sessions. + +## __AWS Price List Service__ + - ### Features + - Documentation updates for api.pricing + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon DevOps Guru__ + - ### Features + - Add paginator for GetCostEstimation + +## __Amazon Lex Model Building Service__ + - ### Features + - Customers can now migrate bots built with Lex V1 APIs to V2 APIs. This release adds APIs to initiate and manage the migration of a bot. + +## __Amazon Redshift__ + - ### Features + - Release new APIs to support new Redshift feature - Authentication Profile + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Changes to OpsCenter APIs to support a new feature, operational insights. + +## __AmplifyBackend__ + - ### Features + - Added Sign in with Apple OAuth provider. + # __2.16.100__ __2021-07-12__ ## __AWS SDK for Java v2__ - ### Features diff --git a/NOTICE.txt b/NOTICE.txt index aa3a5ce58762..b3b042a1db2a 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12,6 +12,8 @@ This software includes third party software subject to the following copyrights: - PKCS#1 PEM encoded private key parsing and utility functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc. - Apache Commons Lang - https://github.com/apache/commons-lang - Netty Reactive Streams - https://github.com/playframework/netty-reactive-streams +- Jackson-core - https://github.com/FasterXML/jackson-core +- Jackson-dataformat-cbor - https://github.com/FasterXML/jackson-dataformats-binary The licenses for these third party components are included in LICENSE.txt diff --git a/README.md b/README.md index a7223cee9c07..0e7047f019c9 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.16.100 + 2.17.15 pom import @@ -86,12 +86,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.16.100 + 2.17.15 software.amazon.awssdk s3 - 2.16.100 + 2.17.15 ``` @@ -103,7 +103,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.16.100 + 2.17.15 ``` diff --git a/archetypes/archetype-app-quickstart/pom.xml b/archetypes/archetype-app-quickstart/pom.xml index 3d2ee5bc18c2..077f087db08b 100644 --- a/archetypes/archetype-app-quickstart/pom.xml +++ b/archetypes/archetype-app-quickstart/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 @@ -171,4 +171,4 @@ - \ No newline at end of file + diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml index 9675673ff410..294ce74c7497 100644 --- a/archetypes/archetype-lambda/pom.xml +++ b/archetypes/archetype-lambda/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 archetype-lambda @@ -169,4 +169,4 @@ - \ No newline at end of file + diff --git a/archetypes/archetype-tools/pom.xml b/archetypes/archetype-tools/pom.xml index f2729fa713a8..943b2d2a384a 100644 --- a/archetypes/archetype-tools/pom.xml +++ b/archetypes/archetype-tools/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 @@ -59,4 +59,4 @@ - \ No newline at end of file + diff --git a/archetypes/pom.xml b/archetypes/pom.xml index 7c02f143263d..e9fe2b1ec345 100644 --- a/archetypes/pom.xml +++ b/archetypes/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 archetypes @@ -34,4 +34,4 @@ Maven Archetypes for applications using Java SDK 2.x - \ No newline at end of file + diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index 094f0adc3092..45f4a2ca73bf 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../pom.xml aws-sdk-java @@ -1363,6 +1363,31 @@ Amazon AutoScaling, etc). proton ${awsjavasdk.version} + + software.amazon.awssdk + route53recoveryreadiness + ${awsjavasdk.version} + + + software.amazon.awssdk + route53recoverycontrolconfig + ${awsjavasdk.version} + + + software.amazon.awssdk + route53recoverycluster + ${awsjavasdk.version} + + + software.amazon.awssdk + chimesdkmessaging + ${awsjavasdk.version} + + + software.amazon.awssdk + chimesdkidentity + ${awsjavasdk.version} + ${project.artifactId}-${project.version} diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index 9017ddb3ce1a..d83eb6681b37 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 @@ -84,11 +84,6 @@ jackson-datatype-jsr310 ${jackson.version} - - software.amazon.ion - ion-java - ${ion.java.version} - org.apache.httpcomponents httpclient diff --git a/bom/pom.xml b/bom/pom.xml index 67f8a7bbd0da..87c51c9818be 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../pom.xml bom @@ -64,27 +64,37 @@ software.amazon.awssdk - auth + json-utils ${awsjavasdk.version} software.amazon.awssdk - aws-core + third-party-jackson-core ${awsjavasdk.version} software.amazon.awssdk - profiles + third-party-jackson-dataformat-cbor ${awsjavasdk.version} software.amazon.awssdk - aws-cbor-protocol + auth + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-core ${awsjavasdk.version} software.amazon.awssdk - aws-ion-protocol + profiles + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-cbor-protocol ${awsjavasdk.version} @@ -1493,6 +1503,31 @@ proton ${awsjavasdk.version} + + software.amazon.awssdk + route53recoveryreadiness + ${awsjavasdk.version} + + + software.amazon.awssdk + route53recoverycontrolconfig + ${awsjavasdk.version} + + + software.amazon.awssdk + route53recoverycluster + ${awsjavasdk.version} + + + software.amazon.awssdk + chimesdkmessaging + ${awsjavasdk.version} + + + software.amazon.awssdk + chimesdkidentity + ${awsjavasdk.version} + diff --git a/buildspecs/release-javadoc.yml b/buildspecs/release-javadoc.yml index bbacef6911ae..9887d3594770 100644 --- a/buildspecs/release-javadoc.yml +++ b/buildspecs/release-javadoc.yml @@ -15,7 +15,7 @@ phases: build: commands: - mvn install -P quick -T1C - - mvn install javadoc:aggregate -B -Ppublic-javadoc -Dcheckstyle.skip -Dspotbugs.skip -DskipTests -Ddoclint=none -pl '!:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:module-path-tests,!:test-utils,!:http-client-tests,!:tests-coverage-reporting' + - mvn clean install javadoc:aggregate -B -Ppublic-javadoc -Dcheckstyle.skip -Dspotbugs.skip -DskipTests -Ddoclint=none -pl '!:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:module-path-tests,!:test-utils,!:http-client-tests,!:tests-coverage-reporting' - RELEASE_VERSION=`mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec` - - aws s3 sync target/site/apidocs/ $DOC_PATH/$RELEASE_VERSION/ diff --git a/buildspecs/release-to-maven.yml b/buildspecs/release-to-maven.yml index 7d0579c50ac2..e734173cbaa3 100644 --- a/buildspecs/release-to-maven.yml +++ b/buildspecs/release-to-maven.yml @@ -26,7 +26,7 @@ phases: if ! curl -f --head $SONATYPE_URL; then mkdir -p $CREDENTIALS aws s3 cp s3://aws-java-sdk-release-credentials/ $CREDENTIALS/ --recursive - mvn clean deploy -B -s $SETTINGS_XML -Dgpg.homedir=$GPG_HOME -Ppublishing -DperformRelease -Dspotbugs.skip -DskipTests -Dcheckstyle.skip -Djapicmp.skip -Ddoclint=none -pl !:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:s3-benchmarks,!:module-path-tests,!:tests-coverage-reporting,!:stability-tests,!:sdk-native-image-test -DautoReleaseAfterClose=true -DstagingProgressTimeoutMinutes=30 + mvn clean deploy -B -s $SETTINGS_XML -Dgpg.homedir=$GPG_HOME -Ppublishing -DperformRelease -Dspotbugs.skip -DskipTests -Dcheckstyle.skip -Djapicmp.skip -Ddoclint=none -pl !:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:module-path-tests,!:tests-coverage-reporting,!:stability-tests,!:sdk-native-image-test,!:auth-sts-testing,!:s3-benchmarks -DautoReleaseAfterClose=true -DstagingProgressTimeoutMinutes=30 else echo "This version was already released." fi diff --git a/bundle/pom.xml b/bundle/pom.xml index c38813b02760..de1161521f0f 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT bundle jar @@ -70,31 +70,20 @@ com.fasterxml.jackson.jr:* io.netty:* com.typesafe.netty:* - com.fasterxml.jackson.core:* - com.fasterxml.jackson.dataformat:jackson-dataformat-cbor org.apache.httpcomponents:* org.reactivestreams:* org.slf4j:* commons-codec:commons-codec - software.amazon.ion:ion-java software.amazon.awssdk:* software.amazon:* commons-logging:* - - com.fasterxml.jackson - software.amazon.awssdk.thirdparty.com.fasterxml.jackson - org.apache software.amazon.awssdk.thirdparty.org.apache - - software.amazon.ion - software.amazon.awssdk.thirdparty.ion - io.netty software.amazon.awssdk.thirdparty.io.netty diff --git a/codegen-lite-maven-plugin/pom.xml b/codegen-lite-maven-plugin/pom.xml index 299a4a556867..e0a7a0f645ff 100644 --- a/codegen-lite-maven-plugin/pom.xml +++ b/codegen-lite-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../pom.xml codegen-lite-maven-plugin diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index 4e1ba4979ef2..a9735da5c749 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index c9fc7e69a02d..12a11b665ca8 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../pom.xml codegen-maven-plugin diff --git a/codegen/pom.xml b/codegen/pom.xml index 6232ef43b45a..7a1e1bc0bf53 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT codegen AWS Java SDK :: Code Generator @@ -102,11 +102,6 @@ aws-cbor-protocol ${awsjavasdk.version} - - software.amazon.awssdk - aws-ion-protocol - ${awsjavasdk.version} - software.amazon.awssdk aws-query-protocol @@ -143,6 +138,14 @@ com.fasterxml.jackson.jr jackson-jr-stree + + com.fasterxml.jackson.core + jackson-annotations + + + com.fasterxml.jackson.core + jackson-databind + org.slf4j slf4j-api diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java index f5da83363eac..0eb74f109466 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java @@ -79,6 +79,7 @@ protected final ShapeModel generateShapeModel(String javaClassName, String shape // contains the list of c2j member names that are required for this shape. shapeModel.setRequired(shape.getRequired()); shapeModel.setDeprecated(shape.isDeprecated()); + shapeModel.setDeprecatedMessage(shape.getDeprecatedMessage()); shapeModel.setWrapper(shape.isWrapper()); shapeModel.withIsEventStream(shape.isEventstream()); shapeModel.withIsEvent(shape.isEvent()); @@ -172,6 +173,7 @@ private MemberModel generateMemberModel(String c2jMemberName, Member c2jMemberDe .withJsonValue(c2jMemberDefinition.getJsonvalue()); memberModel.setDocumentation(c2jMemberDefinition.getDocumentation()); memberModel.setDeprecated(c2jMemberDefinition.isDeprecated()); + memberModel.setDeprecatedMessage(c2jMemberDefinition.getDeprecatedMessage()); memberModel.setSensitive(isSensitiveShapeOrContainer(c2jMemberDefinition, allC2jShapes)); memberModel .withFluentGetterMethodName(namingStrategy.getFluentGetterMethodName(c2jMemberName, parentShape, shape)) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/ShapeModifiersProcessor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/ShapeModifiersProcessor.java index 4deb239b5935..9e371bc6b29b 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/ShapeModifiersProcessor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/ShapeModifiersProcessor.java @@ -208,7 +208,13 @@ private void preprocessModifyShapeMembers(ServiceModel serviceModel, Shape shape private void doModifyShapeMembers(ServiceModel serviceModel, Shape shape, String memberToModify, ModifyModelShapeModifier modifyModel) { - + if (modifyModel.isDeprecated()) { + Member member = shape.getMembers().get(memberToModify); + member.setDeprecated(true); + if (modifyModel.getDeprecatedMessage() != null) { + member.setDeprecatedMessage(modifyModel.getDeprecatedMessage()); + } + } // Currently only supports emitPropertyName which is to rename the member if (modifyModel.getEmitPropertyName() != null) { Member member = shape.getMembers().remove(memberToModify); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java index 6707d63d68b9..518056266be0 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java @@ -19,9 +19,15 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.traits.PayloadTrait; import software.amazon.awssdk.utils.AttributeMap; +/** + * {@code service-2.json} models can be manually modified via defining properties in an associated {@code customization.config} + * file. This class defines the Java bean representation that will be used to parse the JSON customization file. The bean can + * then be later queried in the misc. codegen steps. + */ public class CustomizationConfig { /** @@ -188,6 +194,8 @@ public class CustomizationConfig { private UnderscoresInNameBehavior underscoresInNameBehavior; private String userAgent; + + private RetryMode defaultRetryMode; private CustomizationConfig() { } @@ -485,4 +493,12 @@ public CustomizationConfig withUserAgent(String userAgent) { this.userAgent = userAgent; return this; } + + public RetryMode getDefaultRetryMode() { + return defaultRetryMode; + } + + public void setDefaultRetryMode(RetryMode defaultRetryMode) { + this.defaultRetryMode = defaultRetryMode; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/ModifyModelShapeModifier.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/ModifyModelShapeModifier.java index b2f2465ea567..dffaf7d0d40b 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/ModifyModelShapeModifier.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/ModifyModelShapeModifier.java @@ -17,6 +17,16 @@ public class ModifyModelShapeModifier { + /** + * Indicates whether a member should be annotated as {@link Deprecated}. + */ + private boolean deprecated; + + /** + * The Javadoc message that will be included with the {@link Deprecated} annotation. + */ + private String deprecatedMessage; + /** * Indicates whether a renamed member should create getters and setters under the existing name */ @@ -49,6 +59,22 @@ public class ModifyModelShapeModifier { private String unmarshallLocationName; + public String getDeprecatedMessage() { + return deprecatedMessage; + } + + public void setDeprecatedMessage(String deprecatedMessage) { + this.deprecatedMessage = deprecatedMessage; + } + + public boolean isDeprecated() { + return deprecated; + } + + public void setDeprecated(boolean deprecated) { + this.deprecated = deprecated; + } + public boolean isExistingNameDeprecated() { return existingNameDeprecated; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java index f529214d2ecb..199f5c827d3a 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java @@ -55,6 +55,8 @@ public class MemberModel extends DocumentationModel { private ParameterHttpMapping http; private boolean deprecated; + + private String deprecatedMessage; private ListModel listModel; @@ -301,6 +303,14 @@ public void setDeprecated(boolean deprecated) { this.deprecated = deprecated; } + public String getDeprecatedMessage() { + return deprecatedMessage; + } + + public void setDeprecatedMessage(String deprecatedMessage) { + this.deprecatedMessage = deprecatedMessage; + } + public boolean isEventPayload() { return eventPayload; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Metadata.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Metadata.java index eb3a83d9fb20..14fc9a3a6cd3 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Metadata.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Metadata.java @@ -499,17 +499,12 @@ public Metadata withJsonVersion(String jsonVersion) { return this; } - public boolean isIonProtocol() { - return protocol == Protocol.ION; - } - public boolean isCborProtocol() { return protocol == Protocol.CBOR; } public boolean isJsonProtocol() { return protocol == Protocol.CBOR || - protocol == Protocol.ION || protocol == Protocol.AWS_JSON || protocol == Protocol.REST_JSON; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Protocol.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Protocol.java index c24898551033..719d088c77e2 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Protocol.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Protocol.java @@ -24,8 +24,7 @@ public enum Protocol { REST_JSON("rest-json"), CBOR("cbor"), QUERY("query"), - REST_XML("rest-xml"), - ION("ion"); + REST_XML("rest-xml"); private String protocol; diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java index 551eaa308e7b..26b797637805 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java @@ -37,6 +37,7 @@ public class ShapeModel extends DocumentationModel implements HasDeprecation { private String shapeName; // the local variable name inside marshaller/unmarshaller implementation private boolean deprecated; + private String deprecatedMessage; private String type; private List required; private boolean hasPayloadMember; @@ -94,6 +95,14 @@ public void setDeprecated(boolean deprecated) { this.deprecated = deprecated; } + public String getDeprecatedMessage() { + return deprecatedMessage; + } + + public void setDeprecatedMessage(String deprecatedMessage) { + this.deprecatedMessage = deprecatedMessage; + } + public String getC2jName() { return c2jName; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Member.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Member.java index fc106d3df83b..685d761acbee 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Member.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Member.java @@ -41,6 +41,8 @@ public class Member { private boolean deprecated; + private String deprecatedMessage; + private boolean jsonvalue; private String timestampFormat; @@ -153,6 +155,14 @@ public void setDeprecated(boolean deprecated) { this.deprecated = deprecated; } + public String getDeprecatedMessage() { + return deprecatedMessage; + } + + public void setDeprecatedMessage(String deprecatedMessage) { + this.deprecatedMessage = deprecatedMessage; + } + public boolean getJsonvalue() { return jsonvalue; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Shape.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Shape.java index 5a93a8b22928..fbdb522e6145 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Shape.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Shape.java @@ -59,6 +59,8 @@ public class Shape { private boolean fault; private boolean deprecated; + + private String deprecatedMessage; private boolean eventstream; @@ -260,6 +262,14 @@ public void setDeprecated(boolean deprecated) { this.deprecated = deprecated; } + public String getDeprecatedMessage() { + return deprecatedMessage; + } + + public void setDeprecatedMessage(String deprecatedMessage) { + this.deprecatedMessage = deprecatedMessage; + } + public boolean isEventstream() { return eventstream; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java index 4d141cd594ff..69b6e4500c45 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java @@ -29,6 +29,7 @@ import com.squareup.javapoet.TypeVariableName; import java.util.Collections; import java.util.List; +import java.util.Optional; import javax.lang.model.element.Modifier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.signer.Aws4Signer; @@ -45,6 +46,7 @@ import software.amazon.awssdk.core.endpointdiscovery.providers.DefaultEndpointDiscoveryProviderChain; import software.amazon.awssdk.core.interceptor.ClasspathInterceptorChainFactory; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.SdkHttpConfigurationOption; @@ -92,9 +94,7 @@ public TypeSpec poetSpec() { builder.addMethod(serviceNameMethod()); builder.addMethod(mergeServiceDefaultsMethod()); - if (model.getCustomizationConfig().getUserAgent() != null) { - builder.addMethod(mergeInternalDefaultsMethod()); - } + mergeInternalDefaultsMethod().ifPresent(builder::addMethod); builder.addMethod(finalizeServiceConfigurationMethod()); builder.addMethod(defaultSignerMethod()); @@ -175,19 +175,31 @@ private MethodSpec mergeServiceDefaultsMethod() { return builder.build(); } - private MethodSpec mergeInternalDefaultsMethod() { + private Optional mergeInternalDefaultsMethod() { String userAgent = model.getCustomizationConfig().getUserAgent(); + RetryMode defaultRetryMode = model.getCustomizationConfig().getDefaultRetryMode(); + + // If none of the options are customized, then we do not need to bother overriding the method + if (userAgent == null && defaultRetryMode == null) { + return Optional.empty(); + } MethodSpec.Builder builder = MethodSpec.methodBuilder("mergeInternalDefaults") .addAnnotation(Override.class) .addModifiers(PROTECTED, FINAL) .returns(SdkClientConfiguration.class) .addParameter(SdkClientConfiguration.class, "config") - .addCode("return config.merge(c -> c.option($T.INTERNAL_USER_AGENT, $S)\n", - SdkClientOption.class, userAgent); - - builder.addCode(");"); - return builder.build(); + .addCode("return config.merge(c -> {\n"); + if (userAgent != null) { + builder.addCode("c.option($T.INTERNAL_USER_AGENT, $S);\n", + SdkClientOption.class, userAgent); + } + if (defaultRetryMode != null) { + builder.addCode("c.option($T.DEFAULT_RETRY_MODE, $T.$L);\n", + SdkClientOption.class, RetryMode.class, defaultRetryMode.name()); + } + builder.addCode("});\n"); + return Optional.of(builder.build()); } private MethodSpec finalizeServiceConfigurationMethod() { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java index 7fe594733993..e8c300619e5d 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java @@ -339,7 +339,6 @@ static ProtocolSpec getProtocolSpecs(PoetExtensions poetExtensions, Intermediate case AWS_JSON: case REST_JSON: case CBOR: - case ION: return new JsonProtocolSpec(poetExtensions, model); default: throw new RuntimeException("Unknown protocol: " + protocol.name()); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java index efb03a8e7eb9..bfb151877f13 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java @@ -51,7 +51,6 @@ import software.amazon.awssdk.core.http.HttpResponseHandler; import software.amazon.awssdk.core.protocol.VoidSdkResponse; import software.amazon.awssdk.protocols.cbor.AwsCborProtocolFactory; -import software.amazon.awssdk.protocols.ion.AwsIonProtocolFactory; import software.amazon.awssdk.protocols.json.AwsJsonProtocol; import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory; import software.amazon.awssdk.protocols.json.BaseAwsJsonProtocolFactory; @@ -119,8 +118,6 @@ private CodeBlock customErrorCodeFieldName() { private Class protocolFactoryClass() { if (model.getMetadata().isCborProtocol()) { return AwsCborProtocolFactory.class; - } else if (model.getMetadata().isIonProtocol()) { - return AwsIonProtocolFactory.class; } else { return AwsJsonProtocolFactory.class; } @@ -377,7 +374,6 @@ public Optional createErrorResponseHandler() { private String protocolEnumName(software.amazon.awssdk.codegen.model.intermediate.Protocol protocol) { switch (protocol) { case CBOR: - case ION: case AWS_JSON: return AWS_JSON.name(); default: diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AbstractMemberSetters.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AbstractMemberSetters.java index f34e5a310f87..11bc144b4dc2 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AbstractMemberSetters.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AbstractMemberSetters.java @@ -22,6 +22,7 @@ import com.squareup.javapoet.MethodSpec; import com.squareup.javapoet.ParameterSpec; import com.squareup.javapoet.TypeName; +import java.beans.Transient; import java.util.Optional; import java.util.stream.Collectors; import javax.lang.model.element.Modifier; @@ -84,6 +85,7 @@ protected MethodSpec.Builder fluentSetterBuilder(String methodName, ParameterSpe return MethodSpec.methodBuilder(methodName) .addParameter(setterParam) .addAnnotation(Override.class) + .addAnnotation(Transient.class) .returns(returnType) .addModifiers(Modifier.PUBLIC, Modifier.FINAL); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AwsServiceModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AwsServiceModel.java index b5289a33ffc5..4860dd4b06d8 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AwsServiceModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AwsServiceModel.java @@ -18,6 +18,7 @@ import static javax.lang.model.element.Modifier.FINAL; import static javax.lang.model.element.Modifier.PRIVATE; import static javax.lang.model.element.Modifier.PUBLIC; +import static software.amazon.awssdk.codegen.poet.model.DeprecationUtils.checkDeprecated; import com.squareup.javapoet.ClassName; import com.squareup.javapoet.CodeBlock; @@ -509,7 +510,7 @@ private Stream memberGetters(MemberModel member) { result.add(memberGetter(member)); - return result.stream(); + return checkDeprecated(member, result).stream(); } private boolean shouldGenerateDeprecatedNameGetter(MemberModel member) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/DeprecationUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/DeprecationUtils.java new file mode 100644 index 000000000000..aee1c7177011 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/DeprecationUtils.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.model; + +import static java.util.stream.Collectors.toList; +import static software.amazon.awssdk.codegen.internal.Constant.LF; + +import com.squareup.javapoet.AnnotationSpec; +import com.squareup.javapoet.MethodSpec; +import java.util.List; +import software.amazon.awssdk.codegen.model.intermediate.MemberModel; +import software.amazon.awssdk.utils.StringUtils; + +public final class DeprecationUtils { + + private static final AnnotationSpec DEPRECATED = AnnotationSpec.builder(Deprecated.class).build(); + + private DeprecationUtils() { + } + + /** + * If a given member is modeled as deprecated, add the {@link Deprecated} annotation to the method and, if the method + * already has existing Javadoc, append a section with the {@code @deprecated} tag. + */ + public static MethodSpec checkDeprecated(MemberModel member, MethodSpec method) { + if (!member.isDeprecated() || method.annotations.contains(DEPRECATED)) { + return method; + } + MethodSpec.Builder builder = method.toBuilder().addAnnotation(DEPRECATED); + if (!method.javadoc.isEmpty()) { + builder.addJavadoc(LF + "@deprecated"); + if (StringUtils.isNotBlank(member.getDeprecatedMessage())) { + builder.addJavadoc(" $L", member.getDeprecatedMessage()); + } + } + return builder.build(); + } + + public static List checkDeprecated(MemberModel member, List methods) { + return methods.stream().map(methodSpec -> checkDeprecated(member, methodSpec)).collect(toList()); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelBuilderSpecs.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelBuilderSpecs.java index 31cdc4401d2e..80d487668767 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelBuilderSpecs.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelBuilderSpecs.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.codegen.poet.model; import static java.util.stream.Collectors.toList; +import static software.amazon.awssdk.codegen.poet.model.DeprecationUtils.checkDeprecated; import com.squareup.javapoet.ClassName; import com.squareup.javapoet.FieldSpec; @@ -76,8 +77,10 @@ public TypeSpec builderInterface() { shapeModel.getNonStreamingMembers() .forEach(m -> { - builder.addMethods(accessorsFactory.fluentSetterDeclarations(m, builderInterfaceName())); - builder.addMethods(accessorsFactory.convenienceSetterDeclarations(m, builderInterfaceName())); + builder.addMethods( + checkDeprecated(m, accessorsFactory.fluentSetterDeclarations(m, builderInterfaceName()))); + builder.addMethods( + checkDeprecated(m, accessorsFactory.convenienceSetterDeclarations(m, builderInterfaceName()))); }); if (isException()) { @@ -212,13 +215,10 @@ private List accessors() { List accessors = new ArrayList<>(); shapeModel.getNonStreamingMembers() .forEach(m -> { - accessors.add(accessorsFactory.beanStyleGetter(m)); - - List fluentSetters = accessorsFactory.fluentSetters(m, builderInterfaceName()); - accessors.addAll(fluentSetters); - - accessors.addAll(accessorsFactory.beanStyleSetters(m)); - accessors.addAll(accessorsFactory.convenienceSetters(m, builderInterfaceName())); + accessors.add(checkDeprecated(m, accessorsFactory.beanStyleGetter(m))); + accessors.addAll(checkDeprecated(m, accessorsFactory.beanStyleSetters(m))); + accessors.addAll(checkDeprecated(m, accessorsFactory.fluentSetters(m, builderInterfaceName()))); + accessors.addAll(checkDeprecated(m, accessorsFactory.convenienceSetters(m, builderInterfaceName()))); }); if (isException()) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/transform/MarshallerSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/transform/MarshallerSpec.java index 3024048cd15c..8d62120355e4 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/transform/MarshallerSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/transform/MarshallerSpec.java @@ -117,7 +117,6 @@ private MarshallerProtocolSpec getProtocolSpecs(software.amazon.awssdk.codegen.m switch (protocol) { case REST_JSON: case CBOR: - case ION: case AWS_JSON: return getJsonMarshallerSpec(); diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BuilderClassTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BuilderClassTest.java index 120227faa29f..74c942f68cb6 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BuilderClassTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BuilderClassTest.java @@ -40,7 +40,7 @@ public void baseClientBuilderClass() throws Exception { @Test public void baseClientBuilderClassWithInternalUserAgent() throws Exception { - assertThat(new BaseClientBuilderClass(ClientTestModels.internalConfigModels()), generatesTo("test-client-builder-internal-user-agent-class.java")); + assertThat(new BaseClientBuilderClass(ClientTestModels.internalConfigModels()), generatesTo("test-client-builder-internal-defaults-class.java")); } @Test diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-user-agent-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java similarity index 90% rename from codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-user-agent-class.java rename to codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java index 024ff4687e89..39f2c5697bb6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-user-agent-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java @@ -10,6 +10,7 @@ import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.interceptor.ClasspathInterceptorChainFactory; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.utils.CollectionUtils; @@ -37,7 +38,10 @@ protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfigurati @Override protected final SdkClientConfiguration mergeInternalDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.INTERNAL_USER_AGENT, "md/foobar")); + return config.merge(c -> { + c.option(SdkClientOption.INTERNAL_USER_AGENT, "md/foobar"); + c.option(SdkClientOption.DEFAULT_RETRY_MODE, RetryMode.STANDARD); + }); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/internalconfig/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/internalconfig/customization.config index 687ea6c7bbad..b4e783add53d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/internalconfig/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/internalconfig/customization.config @@ -2,5 +2,6 @@ "authPolicyActions" : { "skip" : true }, - "userAgent": "md/foobar" + "userAgent": "md/foobar", + "defaultRetryMode": "STANDARD" } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java index 53f9ad25e81b..6dbe9bceb13c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.nio.ByteBuffer; import java.time.Instant; import java.util.Arrays; @@ -424,9 +425,8 @@ SdkField. builder(MarshallingType.SDK_BYTES) .build(); private static final SdkField MY_DOCUMENT_FIELD = SdkField. builder(MarshallingType.DOCUMENT) - .memberName("MyDocument").getter(getter(AllTypesRequest::myDocument)).setter(setter(Builder::myDocument)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MyDocument").build()).build(); - + .memberName("MyDocument").getter(getter(AllTypesRequest::myDocument)).setter(setter(Builder::myDocument)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MyDocument").build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(STRING_MEMBER_FIELD, INTEGER_MEMBER_FIELD, BOOLEAN_MEMBER_FIELD, FLOAT_MEMBER_FIELD, DOUBLE_MEMBER_FIELD, LONG_MEMBER_FIELD, @@ -507,7 +507,6 @@ SdkField. builder(MarshallingType.SDK_BYTES) private final Document myDocument; - private AllTypesRequest(BuilderImpl builder) { super(builder); this.stringMember = builder.stringMember; @@ -1393,9 +1392,9 @@ public final String toString() { .add("StructWithNestedTimestampMember", structWithNestedTimestampMember()).add("BlobArg", blobArg()) .add("StructWithNestedBlob", structWithNestedBlob()).add("BlobMap", hasBlobMap() ? blobMap() : null) .add("ListOfBlobs", hasListOfBlobs() ? listOfBlobs() : null).add("RecursiveStruct", recursiveStruct()) - .add("PolymorphicTypeWithSubTypes", polymorphicTypeWithSubTypes()) - .add("PolymorphicTypeWithoutSubTypes", polymorphicTypeWithoutSubTypes()).add("EnumType", enumTypeAsString()) - .add("Underscore_Name_Type", underscore_Name_Type()).add("MyDocument", myDocument()).build(); + .add("PolymorphicTypeWithSubTypes", polymorphicTypeWithSubTypes()) + .add("PolymorphicTypeWithoutSubTypes", polymorphicTypeWithoutSubTypes()).add("EnumType", enumTypeAsString()) + .add("Underscore_Name_Type", underscore_Name_Type()).add("MyDocument", myDocument()).build(); } public final Optional getValueForField(String fieldName, Class clazz) { @@ -2187,98 +2186,105 @@ public final String getStringMember() { return stringMember; } - @Override - public final Builder stringMember(String stringMember) { + public final void setStringMember(String stringMember) { this.stringMember = stringMember; - return this; } - public final void setStringMember(String stringMember) { + @Override + @Transient + public final Builder stringMember(String stringMember) { this.stringMember = stringMember; + return this; } public final Integer getIntegerMember() { return integerMember; } - @Override - public final Builder integerMember(Integer integerMember) { + public final void setIntegerMember(Integer integerMember) { this.integerMember = integerMember; - return this; } - public final void setIntegerMember(Integer integerMember) { + @Override + @Transient + public final Builder integerMember(Integer integerMember) { this.integerMember = integerMember; + return this; } public final Boolean getBooleanMember() { return booleanMember; } - @Override - public final Builder booleanMember(Boolean booleanMember) { + public final void setBooleanMember(Boolean booleanMember) { this.booleanMember = booleanMember; - return this; } - public final void setBooleanMember(Boolean booleanMember) { + @Override + @Transient + public final Builder booleanMember(Boolean booleanMember) { this.booleanMember = booleanMember; + return this; } public final Float getFloatMember() { return floatMember; } - @Override - public final Builder floatMember(Float floatMember) { + public final void setFloatMember(Float floatMember) { this.floatMember = floatMember; - return this; } - public final void setFloatMember(Float floatMember) { + @Override + @Transient + public final Builder floatMember(Float floatMember) { this.floatMember = floatMember; + return this; } public final Double getDoubleMember() { return doubleMember; } - @Override - public final Builder doubleMember(Double doubleMember) { + public final void setDoubleMember(Double doubleMember) { this.doubleMember = doubleMember; - return this; } - public final void setDoubleMember(Double doubleMember) { + @Override + @Transient + public final Builder doubleMember(Double doubleMember) { this.doubleMember = doubleMember; + return this; } public final Long getLongMember() { return longMember; } - @Override - public final Builder longMember(Long longMember) { + public final void setLongMember(Long longMember) { this.longMember = longMember; - return this; } - public final void setLongMember(Long longMember) { + @Override + @Transient + public final Builder longMember(Long longMember) { this.longMember = longMember; + return this; } public final Short getShortMember() { return shortMember; } - @Override - public final Builder shortMember(Short shortMember) { + public final void setShortMember(Short shortMember) { this.shortMember = shortMember; - return this; } - public final void setShortMember(Short shortMember) { + @Override + @Transient + public final Builder shortMember(Short shortMember) { this.shortMember = shortMember; + return this; } public final Collection getSimpleList() { @@ -2288,23 +2294,25 @@ public final Collection getSimpleList() { return simpleList; } + public final void setSimpleList(Collection simpleList) { + this.simpleList = ListOfStringsCopier.copy(simpleList); + } + @Override + @Transient public final Builder simpleList(Collection simpleList) { this.simpleList = ListOfStringsCopier.copy(simpleList); return this; } @Override + @Transient @SafeVarargs public final Builder simpleList(String... simpleList) { simpleList(Arrays.asList(simpleList)); return this; } - public final void setSimpleList(Collection simpleList) { - this.simpleList = ListOfStringsCopier.copy(simpleList); - } - public final Collection getListOfEnums() { if (listOfEnums instanceof SdkAutoConstructList) { return null; @@ -2312,13 +2320,19 @@ public final Collection getListOfEnums() { return listOfEnums; } + public final void setListOfEnums(Collection listOfEnums) { + this.listOfEnums = ListOfEnumsCopier.copy(listOfEnums); + } + @Override + @Transient public final Builder listOfEnumsWithStrings(Collection listOfEnums) { this.listOfEnums = ListOfEnumsCopier.copy(listOfEnums); return this; } @Override + @Transient @SafeVarargs public final Builder listOfEnumsWithStrings(String... listOfEnums) { listOfEnumsWithStrings(Arrays.asList(listOfEnums)); @@ -2326,22 +2340,20 @@ public final Builder listOfEnumsWithStrings(String... listOfEnums) { } @Override + @Transient public final Builder listOfEnums(Collection listOfEnums) { this.listOfEnums = ListOfEnumsCopier.copyEnumToString(listOfEnums); return this; } @Override + @Transient @SafeVarargs public final Builder listOfEnums(EnumType... listOfEnums) { listOfEnums(Arrays.asList(listOfEnums)); return this; } - public final void setListOfEnums(Collection listOfEnums) { - this.listOfEnums = ListOfEnumsCopier.copy(listOfEnums); - } - public final Collection> getListOfMaps() { if (listOfMaps instanceof SdkAutoConstructList) { return null; @@ -2349,23 +2361,25 @@ public final Collection> getListOfMaps() { return listOfMaps; } + public final void setListOfMaps(Collection> listOfMaps) { + this.listOfMaps = ListOfMapStringToStringCopier.copy(listOfMaps); + } + @Override + @Transient public final Builder listOfMaps(Collection> listOfMaps) { this.listOfMaps = ListOfMapStringToStringCopier.copy(listOfMaps); return this; } @Override + @Transient @SafeVarargs public final Builder listOfMaps(Map... listOfMaps) { listOfMaps(Arrays.asList(listOfMaps)); return this; } - public final void setListOfMaps(Collection> listOfMaps) { - this.listOfMaps = ListOfMapStringToStringCopier.copy(listOfMaps); - } - public final List getListOfStructs() { List result = ListOfSimpleStructsCopier.copyToBuilder(this.listOfStructs); if (result instanceof SdkAutoConstructList) { @@ -2374,13 +2388,19 @@ public final List getListOfStructs() { return result; } + public final void setListOfStructs(Collection listOfStructs) { + this.listOfStructs = ListOfSimpleStructsCopier.copyFromBuilder(listOfStructs); + } + @Override + @Transient public final Builder listOfStructs(Collection listOfStructs) { this.listOfStructs = ListOfSimpleStructsCopier.copy(listOfStructs); return this; } @Override + @Transient @SafeVarargs public final Builder listOfStructs(SimpleStruct... listOfStructs) { listOfStructs(Arrays.asList(listOfStructs)); @@ -2388,6 +2408,7 @@ public final Builder listOfStructs(SimpleStruct... listOfStructs) { } @Override + @Transient @SafeVarargs public final Builder listOfStructs(Consumer... listOfStructs) { listOfStructs(Stream.of(listOfStructs).map(c -> SimpleStruct.builder().applyMutation(c).build()) @@ -2395,10 +2416,6 @@ public final Builder listOfStructs(Consumer... listOfStruc return this; } - public final void setListOfStructs(Collection listOfStructs) { - this.listOfStructs = ListOfSimpleStructsCopier.copyFromBuilder(listOfStructs); - } - public final Collection> getListOfMapOfEnumToString() { if (listOfMapOfEnumToString instanceof SdkAutoConstructList) { return null; @@ -2406,23 +2423,25 @@ public final Collection> getListOfMapOfEnumToStrin return listOfMapOfEnumToString; } + public final void setListOfMapOfEnumToString(Collection> listOfMapOfEnumToString) { + this.listOfMapOfEnumToString = ListOfMapOfEnumToStringCopier.copy(listOfMapOfEnumToString); + } + @Override + @Transient public final Builder listOfMapOfEnumToStringWithStrings(Collection> listOfMapOfEnumToString) { this.listOfMapOfEnumToString = ListOfMapOfEnumToStringCopier.copy(listOfMapOfEnumToString); return this; } @Override + @Transient @SafeVarargs public final Builder listOfMapOfEnumToStringWithStrings(Map... listOfMapOfEnumToString) { listOfMapOfEnumToStringWithStrings(Arrays.asList(listOfMapOfEnumToString)); return this; } - public final void setListOfMapOfEnumToString(Collection> listOfMapOfEnumToString) { - this.listOfMapOfEnumToString = ListOfMapOfEnumToStringCopier.copy(listOfMapOfEnumToString); - } - public final List> getListOfMapOfStringToStruct() { List> result = ListOfMapOfStringToStructCopier .copyToBuilder(this.listOfMapOfStringToStruct); @@ -2432,24 +2451,26 @@ public final List> getListOfMapOfStringToStruc return result; } + public final void setListOfMapOfStringToStruct( + Collection> listOfMapOfStringToStruct) { + this.listOfMapOfStringToStruct = ListOfMapOfStringToStructCopier.copyFromBuilder(listOfMapOfStringToStruct); + } + @Override + @Transient public final Builder listOfMapOfStringToStruct(Collection> listOfMapOfStringToStruct) { this.listOfMapOfStringToStruct = ListOfMapOfStringToStructCopier.copy(listOfMapOfStringToStruct); return this; } @Override + @Transient @SafeVarargs public final Builder listOfMapOfStringToStruct(Map... listOfMapOfStringToStruct) { listOfMapOfStringToStruct(Arrays.asList(listOfMapOfStringToStruct)); return this; } - public final void setListOfMapOfStringToStruct( - Collection> listOfMapOfStringToStruct) { - this.listOfMapOfStringToStruct = ListOfMapOfStringToStructCopier.copyFromBuilder(listOfMapOfStringToStruct); - } - public final Map> getMapOfStringToIntegerList() { if (mapOfStringToIntegerList instanceof SdkAutoConstructMap) { return null; @@ -2457,14 +2478,15 @@ public final void setListOfMapOfStringToStruct( return mapOfStringToIntegerList; } - @Override - public final Builder mapOfStringToIntegerList(Map> mapOfStringToIntegerList) { + public final void setMapOfStringToIntegerList(Map> mapOfStringToIntegerList) { this.mapOfStringToIntegerList = MapOfStringToIntegerListCopier.copy(mapOfStringToIntegerList); - return this; } - public final void setMapOfStringToIntegerList(Map> mapOfStringToIntegerList) { + @Override + @Transient + public final Builder mapOfStringToIntegerList(Map> mapOfStringToIntegerList) { this.mapOfStringToIntegerList = MapOfStringToIntegerListCopier.copy(mapOfStringToIntegerList); + return this; } public final Map getMapOfStringToString() { @@ -2474,14 +2496,15 @@ public final Map getMapOfStringToString() { return mapOfStringToString; } - @Override - public final Builder mapOfStringToString(Map mapOfStringToString) { + public final void setMapOfStringToString(Map mapOfStringToString) { this.mapOfStringToString = MapOfStringToStringCopier.copy(mapOfStringToString); - return this; } - public final void setMapOfStringToString(Map mapOfStringToString) { + @Override + @Transient + public final Builder mapOfStringToString(Map mapOfStringToString) { this.mapOfStringToString = MapOfStringToStringCopier.copy(mapOfStringToString); + return this; } public final Map getMapOfStringToSimpleStruct() { @@ -2493,16 +2516,17 @@ public final Map getMapOfStringToSimpleStruct() { return result; } + public final void setMapOfStringToSimpleStruct(Map mapOfStringToSimpleStruct) { + this.mapOfStringToSimpleStruct = MapOfStringToSimpleStructCopier.copyFromBuilder(mapOfStringToSimpleStruct); + } + @Override + @Transient public final Builder mapOfStringToSimpleStruct(Map mapOfStringToSimpleStruct) { this.mapOfStringToSimpleStruct = MapOfStringToSimpleStructCopier.copy(mapOfStringToSimpleStruct); return this; } - public final void setMapOfStringToSimpleStruct(Map mapOfStringToSimpleStruct) { - this.mapOfStringToSimpleStruct = MapOfStringToSimpleStructCopier.copyFromBuilder(mapOfStringToSimpleStruct); - } - public final Map getMapOfEnumToEnum() { if (mapOfEnumToEnum instanceof SdkAutoConstructMap) { return null; @@ -2510,22 +2534,24 @@ public final Map getMapOfEnumToEnum() { return mapOfEnumToEnum; } + public final void setMapOfEnumToEnum(Map mapOfEnumToEnum) { + this.mapOfEnumToEnum = MapOfEnumToEnumCopier.copy(mapOfEnumToEnum); + } + @Override + @Transient public final Builder mapOfEnumToEnumWithStrings(Map mapOfEnumToEnum) { this.mapOfEnumToEnum = MapOfEnumToEnumCopier.copy(mapOfEnumToEnum); return this; } @Override + @Transient public final Builder mapOfEnumToEnum(Map mapOfEnumToEnum) { this.mapOfEnumToEnum = MapOfEnumToEnumCopier.copyEnumToString(mapOfEnumToEnum); return this; } - public final void setMapOfEnumToEnum(Map mapOfEnumToEnum) { - this.mapOfEnumToEnum = MapOfEnumToEnumCopier.copy(mapOfEnumToEnum); - } - public final Map getMapOfEnumToString() { if (mapOfEnumToString instanceof SdkAutoConstructMap) { return null; @@ -2533,22 +2559,24 @@ public final Map getMapOfEnumToString() { return mapOfEnumToString; } + public final void setMapOfEnumToString(Map mapOfEnumToString) { + this.mapOfEnumToString = MapOfEnumToStringCopier.copy(mapOfEnumToString); + } + @Override + @Transient public final Builder mapOfEnumToStringWithStrings(Map mapOfEnumToString) { this.mapOfEnumToString = MapOfEnumToStringCopier.copy(mapOfEnumToString); return this; } @Override + @Transient public final Builder mapOfEnumToString(Map mapOfEnumToString) { this.mapOfEnumToString = MapOfEnumToStringCopier.copyEnumToString(mapOfEnumToString); return this; } - public final void setMapOfEnumToString(Map mapOfEnumToString) { - this.mapOfEnumToString = MapOfEnumToStringCopier.copy(mapOfEnumToString); - } - public final Map getMapOfStringToEnum() { if (mapOfStringToEnum instanceof SdkAutoConstructMap) { return null; @@ -2556,22 +2584,24 @@ public final Map getMapOfStringToEnum() { return mapOfStringToEnum; } + public final void setMapOfStringToEnum(Map mapOfStringToEnum) { + this.mapOfStringToEnum = MapOfStringToEnumCopier.copy(mapOfStringToEnum); + } + @Override + @Transient public final Builder mapOfStringToEnumWithStrings(Map mapOfStringToEnum) { this.mapOfStringToEnum = MapOfStringToEnumCopier.copy(mapOfStringToEnum); return this; } @Override + @Transient public final Builder mapOfStringToEnum(Map mapOfStringToEnum) { this.mapOfStringToEnum = MapOfStringToEnumCopier.copyEnumToString(mapOfStringToEnum); return this; } - public final void setMapOfStringToEnum(Map mapOfStringToEnum) { - this.mapOfStringToEnum = MapOfStringToEnumCopier.copy(mapOfStringToEnum); - } - public final Map getMapOfEnumToSimpleStruct() { Map result = MapOfEnumToSimpleStructCopier.copyToBuilder(this.mapOfEnumToSimpleStruct); if (result instanceof SdkAutoConstructMap) { @@ -2580,22 +2610,24 @@ public final Map getMapOfEnumToSimpleStruct() { return result; } + public final void setMapOfEnumToSimpleStruct(Map mapOfEnumToSimpleStruct) { + this.mapOfEnumToSimpleStruct = MapOfEnumToSimpleStructCopier.copyFromBuilder(mapOfEnumToSimpleStruct); + } + @Override + @Transient public final Builder mapOfEnumToSimpleStructWithStrings(Map mapOfEnumToSimpleStruct) { this.mapOfEnumToSimpleStruct = MapOfEnumToSimpleStructCopier.copy(mapOfEnumToSimpleStruct); return this; } @Override + @Transient public final Builder mapOfEnumToSimpleStruct(Map mapOfEnumToSimpleStruct) { this.mapOfEnumToSimpleStruct = MapOfEnumToSimpleStructCopier.copyEnumToString(mapOfEnumToSimpleStruct); return this; } - public final void setMapOfEnumToSimpleStruct(Map mapOfEnumToSimpleStruct) { - this.mapOfEnumToSimpleStruct = MapOfEnumToSimpleStructCopier.copyFromBuilder(mapOfEnumToSimpleStruct); - } - public final Map> getMapOfEnumToListOfEnums() { if (mapOfEnumToListOfEnums instanceof SdkAutoConstructMap) { return null; @@ -2603,22 +2635,24 @@ public final void setMapOfEnumToSimpleStruct(Map> mapOfEnumToListOfEnums) { + this.mapOfEnumToListOfEnums = MapOfEnumToListOfEnumsCopier.copy(mapOfEnumToListOfEnums); + } + @Override + @Transient public final Builder mapOfEnumToListOfEnumsWithStrings(Map> mapOfEnumToListOfEnums) { this.mapOfEnumToListOfEnums = MapOfEnumToListOfEnumsCopier.copy(mapOfEnumToListOfEnums); return this; } @Override + @Transient public final Builder mapOfEnumToListOfEnums(Map> mapOfEnumToListOfEnums) { this.mapOfEnumToListOfEnums = MapOfEnumToListOfEnumsCopier.copyEnumToString(mapOfEnumToListOfEnums); return this; } - public final void setMapOfEnumToListOfEnums(Map> mapOfEnumToListOfEnums) { - this.mapOfEnumToListOfEnums = MapOfEnumToListOfEnumsCopier.copy(mapOfEnumToListOfEnums); - } - public final Map> getMapOfEnumToMapOfStringToEnum() { if (mapOfEnumToMapOfStringToEnum instanceof SdkAutoConstructMap) { return null; @@ -2626,7 +2660,12 @@ public final void setMapOfEnumToListOfEnums(Map> mapOfEnumToMapOfStringToEnum) { + this.mapOfEnumToMapOfStringToEnum = MapOfEnumToMapOfStringToEnumCopier.copy(mapOfEnumToMapOfStringToEnum); + } + @Override + @Transient public final Builder mapOfEnumToMapOfStringToEnumWithStrings( Map> mapOfEnumToMapOfStringToEnum) { this.mapOfEnumToMapOfStringToEnum = MapOfEnumToMapOfStringToEnumCopier.copy(mapOfEnumToMapOfStringToEnum); @@ -2634,73 +2673,74 @@ public final Builder mapOfEnumToMapOfStringToEnumWithStrings( } @Override + @Transient public final Builder mapOfEnumToMapOfStringToEnum( Map> mapOfEnumToMapOfStringToEnum) { this.mapOfEnumToMapOfStringToEnum = MapOfEnumToMapOfStringToEnumCopier.copyEnumToString(mapOfEnumToMapOfStringToEnum); return this; } - public final void setMapOfEnumToMapOfStringToEnum(Map> mapOfEnumToMapOfStringToEnum) { - this.mapOfEnumToMapOfStringToEnum = MapOfEnumToMapOfStringToEnumCopier.copy(mapOfEnumToMapOfStringToEnum); - } - public final Instant getTimestampMember() { return timestampMember; } - @Override - public final Builder timestampMember(Instant timestampMember) { + public final void setTimestampMember(Instant timestampMember) { this.timestampMember = timestampMember; - return this; } - public final void setTimestampMember(Instant timestampMember) { + @Override + @Transient + public final Builder timestampMember(Instant timestampMember) { this.timestampMember = timestampMember; + return this; } public final StructWithTimestamp.Builder getStructWithNestedTimestampMember() { return structWithNestedTimestampMember != null ? structWithNestedTimestampMember.toBuilder() : null; } + public final void setStructWithNestedTimestampMember(StructWithTimestamp.BuilderImpl structWithNestedTimestampMember) { + this.structWithNestedTimestampMember = structWithNestedTimestampMember != null ? structWithNestedTimestampMember + .build() : null; + } + @Override + @Transient public final Builder structWithNestedTimestampMember(StructWithTimestamp structWithNestedTimestampMember) { this.structWithNestedTimestampMember = structWithNestedTimestampMember; return this; } - public final void setStructWithNestedTimestampMember(StructWithTimestamp.BuilderImpl structWithNestedTimestampMember) { - this.structWithNestedTimestampMember = structWithNestedTimestampMember != null ? structWithNestedTimestampMember - .build() : null; - } - public final ByteBuffer getBlobArg() { return blobArg == null ? null : blobArg.asByteBuffer(); } + public final void setBlobArg(ByteBuffer blobArg) { + blobArg(blobArg == null ? null : SdkBytes.fromByteBuffer(blobArg)); + } + @Override + @Transient public final Builder blobArg(SdkBytes blobArg) { this.blobArg = blobArg; return this; } - public final void setBlobArg(ByteBuffer blobArg) { - blobArg(blobArg == null ? null : SdkBytes.fromByteBuffer(blobArg)); - } - public final StructWithNestedBlobType.Builder getStructWithNestedBlob() { return structWithNestedBlob != null ? structWithNestedBlob.toBuilder() : null; } + public final void setStructWithNestedBlob(StructWithNestedBlobType.BuilderImpl structWithNestedBlob) { + this.structWithNestedBlob = structWithNestedBlob != null ? structWithNestedBlob.build() : null; + } + @Override + @Transient public final Builder structWithNestedBlob(StructWithNestedBlobType structWithNestedBlob) { this.structWithNestedBlob = structWithNestedBlob; return this; } - public final void setStructWithNestedBlob(StructWithNestedBlobType.BuilderImpl structWithNestedBlob) { - this.structWithNestedBlob = structWithNestedBlob != null ? structWithNestedBlob.build() : null; - } - public final Map getBlobMap() { if (blobMap instanceof SdkAutoConstructMap) { return null; @@ -2709,17 +2749,18 @@ public final Map getBlobMap() { .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue().asByteBuffer())); } + public final void setBlobMap(Map blobMap) { + blobMap(blobMap == null ? null : blobMap.entrySet().stream() + .collect(Collectors.toMap(e -> e.getKey(), e -> SdkBytes.fromByteBuffer(e.getValue())))); + } + @Override + @Transient public final Builder blobMap(Map blobMap) { this.blobMap = BlobMapTypeCopier.copy(blobMap); return this; } - public final void setBlobMap(Map blobMap) { - blobMap(blobMap == null ? null : blobMap.entrySet().stream() - .collect(Collectors.toMap(e -> e.getKey(), e -> SdkBytes.fromByteBuffer(e.getValue())))); - } - public final List getListOfBlobs() { if (listOfBlobs instanceof SdkAutoConstructList) { return null; @@ -2727,113 +2768,122 @@ public final List getListOfBlobs() { return listOfBlobs == null ? null : listOfBlobs.stream().map(SdkBytes::asByteBuffer).collect(Collectors.toList()); } + public final void setListOfBlobs(Collection listOfBlobs) { + listOfBlobs(listOfBlobs == null ? null : listOfBlobs.stream().map(SdkBytes::fromByteBuffer) + .collect(Collectors.toList())); + } + @Override + @Transient public final Builder listOfBlobs(Collection listOfBlobs) { this.listOfBlobs = ListOfBlobsTypeCopier.copy(listOfBlobs); return this; } @Override + @Transient @SafeVarargs public final Builder listOfBlobs(SdkBytes... listOfBlobs) { listOfBlobs(Arrays.asList(listOfBlobs)); return this; } - public final void setListOfBlobs(Collection listOfBlobs) { - listOfBlobs(listOfBlobs == null ? null : listOfBlobs.stream().map(SdkBytes::fromByteBuffer) - .collect(Collectors.toList())); - } - public final RecursiveStructType.Builder getRecursiveStruct() { return recursiveStruct != null ? recursiveStruct.toBuilder() : null; } + public final void setRecursiveStruct(RecursiveStructType.BuilderImpl recursiveStruct) { + this.recursiveStruct = recursiveStruct != null ? recursiveStruct.build() : null; + } + @Override + @Transient public final Builder recursiveStruct(RecursiveStructType recursiveStruct) { this.recursiveStruct = recursiveStruct; return this; } - public final void setRecursiveStruct(RecursiveStructType.BuilderImpl recursiveStruct) { - this.recursiveStruct = recursiveStruct != null ? recursiveStruct.build() : null; - } - public final BaseType.Builder getPolymorphicTypeWithSubTypes() { return polymorphicTypeWithSubTypes != null ? polymorphicTypeWithSubTypes.toBuilder() : null; } + public final void setPolymorphicTypeWithSubTypes(BaseType.BuilderImpl polymorphicTypeWithSubTypes) { + this.polymorphicTypeWithSubTypes = polymorphicTypeWithSubTypes != null ? polymorphicTypeWithSubTypes.build() : null; + } + @Override + @Transient public final Builder polymorphicTypeWithSubTypes(BaseType polymorphicTypeWithSubTypes) { this.polymorphicTypeWithSubTypes = polymorphicTypeWithSubTypes; return this; } - public final void setPolymorphicTypeWithSubTypes(BaseType.BuilderImpl polymorphicTypeWithSubTypes) { - this.polymorphicTypeWithSubTypes = polymorphicTypeWithSubTypes != null ? polymorphicTypeWithSubTypes.build() : null; - } - public final SubTypeOne.Builder getPolymorphicTypeWithoutSubTypes() { return polymorphicTypeWithoutSubTypes != null ? polymorphicTypeWithoutSubTypes.toBuilder() : null; } + public final void setPolymorphicTypeWithoutSubTypes(SubTypeOne.BuilderImpl polymorphicTypeWithoutSubTypes) { + this.polymorphicTypeWithoutSubTypes = polymorphicTypeWithoutSubTypes != null ? polymorphicTypeWithoutSubTypes.build() + : null; + } + @Override + @Transient public final Builder polymorphicTypeWithoutSubTypes(SubTypeOne polymorphicTypeWithoutSubTypes) { this.polymorphicTypeWithoutSubTypes = polymorphicTypeWithoutSubTypes; return this; } - public final void setPolymorphicTypeWithoutSubTypes(SubTypeOne.BuilderImpl polymorphicTypeWithoutSubTypes) { - this.polymorphicTypeWithoutSubTypes = polymorphicTypeWithoutSubTypes != null ? polymorphicTypeWithoutSubTypes.build() - : null; - } - public final String getEnumType() { return enumType; } + public final void setEnumType(String enumType) { + this.enumType = enumType; + } + @Override + @Transient public final Builder enumType(String enumType) { this.enumType = enumType; return this; } @Override + @Transient public final Builder enumType(EnumType enumType) { this.enumType(enumType == null ? null : enumType.toString()); return this; } - public final void setEnumType(String enumType) { - this.enumType = enumType; - } - public final Underscore_Name_Type.Builder getUnderscore_Name_Type() { return underscore_Name_Type != null ? underscore_Name_Type.toBuilder() : null; } + public final void setUnderscore_Name_Type(Underscore_Name_Type.BuilderImpl underscore_Name_Type) { + this.underscore_Name_Type = underscore_Name_Type != null ? underscore_Name_Type.build() : null; + } + @Override + @Transient public final Builder underscore_Name_Type(Underscore_Name_Type underscore_Name_Type) { this.underscore_Name_Type = underscore_Name_Type; return this; } - public final void setUnderscore_Name_Type(Underscore_Name_Type.BuilderImpl underscore_Name_Type) { - this.underscore_Name_Type = underscore_Name_Type != null ? underscore_Name_Type.build() : null; - } - public final Document getMyDocument() { return myDocument; } - @Override - public final Builder myDocument(Document myDocument) { + public final void setMyDocument(Document myDocument) { this.myDocument = myDocument; - return this; } - public final void setMyDocument(Document myDocument) { + @Override + @Transient + public final Builder myDocument(Document myDocument) { this.myDocument = myDocument; + return this; } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java index 04e1d2cc1ddd..e4d67881b89e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.nio.ByteBuffer; import java.time.Instant; import java.util.Arrays; @@ -423,8 +424,8 @@ SdkField. builder(MarshallingType.SDK_BYTES) .build(); private static final SdkField MY_DOCUMENT_FIELD = SdkField. builder(MarshallingType.DOCUMENT) - .memberName("MyDocument").getter(getter(AllTypesResponse::myDocument)).setter(setter(Builder::myDocument)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MyDocument").build()).build(); + .memberName("MyDocument").getter(getter(AllTypesResponse::myDocument)).setter(setter(Builder::myDocument)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MyDocument").build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(STRING_MEMBER_FIELD, INTEGER_MEMBER_FIELD, BOOLEAN_MEMBER_FIELD, FLOAT_MEMBER_FIELD, DOUBLE_MEMBER_FIELD, LONG_MEMBER_FIELD, @@ -1278,7 +1279,7 @@ public final int hashCode() { hashCode = 31 * hashCode + Objects.hashCode(hasMapOfEnumToSimpleStruct() ? mapOfEnumToSimpleStructAsStrings() : null); hashCode = 31 * hashCode + Objects.hashCode(hasMapOfEnumToListOfEnums() ? mapOfEnumToListOfEnumsAsStrings() : null); hashCode = 31 * hashCode - + Objects.hashCode(hasMapOfEnumToMapOfStringToEnum() ? mapOfEnumToMapOfStringToEnumAsStrings() : null); + + Objects.hashCode(hasMapOfEnumToMapOfStringToEnum() ? mapOfEnumToMapOfStringToEnumAsStrings() : null); hashCode = 31 * hashCode + Objects.hashCode(timestampMember()); hashCode = 31 * hashCode + Objects.hashCode(structWithNestedTimestampMember()); hashCode = 31 * hashCode + Objects.hashCode(blobArg()); @@ -1351,8 +1352,8 @@ && hasMapOfEnumToMapOfStringToEnum() == other.hasMapOfEnumToMapOfStringToEnum() && Objects.equals(polymorphicTypeWithSubTypes(), other.polymorphicTypeWithSubTypes()) && Objects.equals(polymorphicTypeWithoutSubTypes(), other.polymorphicTypeWithoutSubTypes()) && Objects.equals(enumTypeAsString(), other.enumTypeAsString()) - && Objects.equals(underscore_Name_Type(), other.underscore_Name_Type()) - && Objects.equals(myDocument(), other.myDocument()); + && Objects.equals(underscore_Name_Type(), other.underscore_Name_Type()) + && Objects.equals(myDocument(), other.myDocument()); } /** @@ -1392,7 +1393,7 @@ public final String toString() { .add("ListOfBlobs", hasListOfBlobs() ? listOfBlobs() : null).add("RecursiveStruct", recursiveStruct()) .add("PolymorphicTypeWithSubTypes", polymorphicTypeWithSubTypes()) .add("PolymorphicTypeWithoutSubTypes", polymorphicTypeWithoutSubTypes()).add("EnumType", enumTypeAsString()) - .add("Underscore_Name_Type", underscore_Name_Type()).add("MyDocument", myDocument()).build(); + .add("Underscore_Name_Type", underscore_Name_Type()).add("MyDocument", myDocument()).build(); } public final Optional getValueForField(String fieldName, Class clazz) { @@ -2178,98 +2179,105 @@ public final String getStringMember() { return stringMember; } - @Override - public final Builder stringMember(String stringMember) { + public final void setStringMember(String stringMember) { this.stringMember = stringMember; - return this; } - public final void setStringMember(String stringMember) { + @Override + @Transient + public final Builder stringMember(String stringMember) { this.stringMember = stringMember; + return this; } public final Integer getIntegerMember() { return integerMember; } - @Override - public final Builder integerMember(Integer integerMember) { + public final void setIntegerMember(Integer integerMember) { this.integerMember = integerMember; - return this; } - public final void setIntegerMember(Integer integerMember) { + @Override + @Transient + public final Builder integerMember(Integer integerMember) { this.integerMember = integerMember; + return this; } public final Boolean getBooleanMember() { return booleanMember; } - @Override - public final Builder booleanMember(Boolean booleanMember) { + public final void setBooleanMember(Boolean booleanMember) { this.booleanMember = booleanMember; - return this; } - public final void setBooleanMember(Boolean booleanMember) { + @Override + @Transient + public final Builder booleanMember(Boolean booleanMember) { this.booleanMember = booleanMember; + return this; } public final Float getFloatMember() { return floatMember; } - @Override - public final Builder floatMember(Float floatMember) { + public final void setFloatMember(Float floatMember) { this.floatMember = floatMember; - return this; } - public final void setFloatMember(Float floatMember) { + @Override + @Transient + public final Builder floatMember(Float floatMember) { this.floatMember = floatMember; + return this; } public final Double getDoubleMember() { return doubleMember; } - @Override - public final Builder doubleMember(Double doubleMember) { + public final void setDoubleMember(Double doubleMember) { this.doubleMember = doubleMember; - return this; } - public final void setDoubleMember(Double doubleMember) { + @Override + @Transient + public final Builder doubleMember(Double doubleMember) { this.doubleMember = doubleMember; + return this; } public final Long getLongMember() { return longMember; } - @Override - public final Builder longMember(Long longMember) { + public final void setLongMember(Long longMember) { this.longMember = longMember; - return this; } - public final void setLongMember(Long longMember) { + @Override + @Transient + public final Builder longMember(Long longMember) { this.longMember = longMember; + return this; } public final Short getShortMember() { return shortMember; } - @Override - public final Builder shortMember(Short shortMember) { + public final void setShortMember(Short shortMember) { this.shortMember = shortMember; - return this; } - public final void setShortMember(Short shortMember) { + @Override + @Transient + public final Builder shortMember(Short shortMember) { this.shortMember = shortMember; + return this; } public final Collection getSimpleList() { @@ -2279,23 +2287,25 @@ public final Collection getSimpleList() { return simpleList; } + public final void setSimpleList(Collection simpleList) { + this.simpleList = ListOfStringsCopier.copy(simpleList); + } + @Override + @Transient public final Builder simpleList(Collection simpleList) { this.simpleList = ListOfStringsCopier.copy(simpleList); return this; } @Override + @Transient @SafeVarargs public final Builder simpleList(String... simpleList) { simpleList(Arrays.asList(simpleList)); return this; } - public final void setSimpleList(Collection simpleList) { - this.simpleList = ListOfStringsCopier.copy(simpleList); - } - public final Collection getListOfEnums() { if (listOfEnums instanceof SdkAutoConstructList) { return null; @@ -2303,13 +2313,19 @@ public final Collection getListOfEnums() { return listOfEnums; } + public final void setListOfEnums(Collection listOfEnums) { + this.listOfEnums = ListOfEnumsCopier.copy(listOfEnums); + } + @Override + @Transient public final Builder listOfEnumsWithStrings(Collection listOfEnums) { this.listOfEnums = ListOfEnumsCopier.copy(listOfEnums); return this; } @Override + @Transient @SafeVarargs public final Builder listOfEnumsWithStrings(String... listOfEnums) { listOfEnumsWithStrings(Arrays.asList(listOfEnums)); @@ -2317,22 +2333,20 @@ public final Builder listOfEnumsWithStrings(String... listOfEnums) { } @Override + @Transient public final Builder listOfEnums(Collection listOfEnums) { this.listOfEnums = ListOfEnumsCopier.copyEnumToString(listOfEnums); return this; } @Override + @Transient @SafeVarargs public final Builder listOfEnums(EnumType... listOfEnums) { listOfEnums(Arrays.asList(listOfEnums)); return this; } - public final void setListOfEnums(Collection listOfEnums) { - this.listOfEnums = ListOfEnumsCopier.copy(listOfEnums); - } - public final Collection> getListOfMaps() { if (listOfMaps instanceof SdkAutoConstructList) { return null; @@ -2340,23 +2354,25 @@ public final Collection> getListOfMaps() { return listOfMaps; } + public final void setListOfMaps(Collection> listOfMaps) { + this.listOfMaps = ListOfMapStringToStringCopier.copy(listOfMaps); + } + @Override + @Transient public final Builder listOfMaps(Collection> listOfMaps) { this.listOfMaps = ListOfMapStringToStringCopier.copy(listOfMaps); return this; } @Override + @Transient @SafeVarargs public final Builder listOfMaps(Map... listOfMaps) { listOfMaps(Arrays.asList(listOfMaps)); return this; } - public final void setListOfMaps(Collection> listOfMaps) { - this.listOfMaps = ListOfMapStringToStringCopier.copy(listOfMaps); - } - public final List getListOfStructs() { List result = ListOfSimpleStructsCopier.copyToBuilder(this.listOfStructs); if (result instanceof SdkAutoConstructList) { @@ -2365,13 +2381,19 @@ public final List getListOfStructs() { return result; } + public final void setListOfStructs(Collection listOfStructs) { + this.listOfStructs = ListOfSimpleStructsCopier.copyFromBuilder(listOfStructs); + } + @Override + @Transient public final Builder listOfStructs(Collection listOfStructs) { this.listOfStructs = ListOfSimpleStructsCopier.copy(listOfStructs); return this; } @Override + @Transient @SafeVarargs public final Builder listOfStructs(SimpleStruct... listOfStructs) { listOfStructs(Arrays.asList(listOfStructs)); @@ -2379,6 +2401,7 @@ public final Builder listOfStructs(SimpleStruct... listOfStructs) { } @Override + @Transient @SafeVarargs public final Builder listOfStructs(Consumer... listOfStructs) { listOfStructs(Stream.of(listOfStructs).map(c -> SimpleStruct.builder().applyMutation(c).build()) @@ -2386,10 +2409,6 @@ public final Builder listOfStructs(Consumer... listOfStruc return this; } - public final void setListOfStructs(Collection listOfStructs) { - this.listOfStructs = ListOfSimpleStructsCopier.copyFromBuilder(listOfStructs); - } - public final Collection> getListOfMapOfEnumToString() { if (listOfMapOfEnumToString instanceof SdkAutoConstructList) { return null; @@ -2397,23 +2416,25 @@ public final Collection> getListOfMapOfEnumToStrin return listOfMapOfEnumToString; } + public final void setListOfMapOfEnumToString(Collection> listOfMapOfEnumToString) { + this.listOfMapOfEnumToString = ListOfMapOfEnumToStringCopier.copy(listOfMapOfEnumToString); + } + @Override + @Transient public final Builder listOfMapOfEnumToStringWithStrings(Collection> listOfMapOfEnumToString) { this.listOfMapOfEnumToString = ListOfMapOfEnumToStringCopier.copy(listOfMapOfEnumToString); return this; } @Override + @Transient @SafeVarargs public final Builder listOfMapOfEnumToStringWithStrings(Map... listOfMapOfEnumToString) { listOfMapOfEnumToStringWithStrings(Arrays.asList(listOfMapOfEnumToString)); return this; } - public final void setListOfMapOfEnumToString(Collection> listOfMapOfEnumToString) { - this.listOfMapOfEnumToString = ListOfMapOfEnumToStringCopier.copy(listOfMapOfEnumToString); - } - public final List> getListOfMapOfStringToStruct() { List> result = ListOfMapOfStringToStructCopier .copyToBuilder(this.listOfMapOfStringToStruct); @@ -2423,24 +2444,26 @@ public final List> getListOfMapOfStringToStruc return result; } + public final void setListOfMapOfStringToStruct( + Collection> listOfMapOfStringToStruct) { + this.listOfMapOfStringToStruct = ListOfMapOfStringToStructCopier.copyFromBuilder(listOfMapOfStringToStruct); + } + @Override + @Transient public final Builder listOfMapOfStringToStruct(Collection> listOfMapOfStringToStruct) { this.listOfMapOfStringToStruct = ListOfMapOfStringToStructCopier.copy(listOfMapOfStringToStruct); return this; } @Override + @Transient @SafeVarargs public final Builder listOfMapOfStringToStruct(Map... listOfMapOfStringToStruct) { listOfMapOfStringToStruct(Arrays.asList(listOfMapOfStringToStruct)); return this; } - public final void setListOfMapOfStringToStruct( - Collection> listOfMapOfStringToStruct) { - this.listOfMapOfStringToStruct = ListOfMapOfStringToStructCopier.copyFromBuilder(listOfMapOfStringToStruct); - } - public final Map> getMapOfStringToIntegerList() { if (mapOfStringToIntegerList instanceof SdkAutoConstructMap) { return null; @@ -2448,14 +2471,15 @@ public final void setListOfMapOfStringToStruct( return mapOfStringToIntegerList; } - @Override - public final Builder mapOfStringToIntegerList(Map> mapOfStringToIntegerList) { + public final void setMapOfStringToIntegerList(Map> mapOfStringToIntegerList) { this.mapOfStringToIntegerList = MapOfStringToIntegerListCopier.copy(mapOfStringToIntegerList); - return this; } - public final void setMapOfStringToIntegerList(Map> mapOfStringToIntegerList) { + @Override + @Transient + public final Builder mapOfStringToIntegerList(Map> mapOfStringToIntegerList) { this.mapOfStringToIntegerList = MapOfStringToIntegerListCopier.copy(mapOfStringToIntegerList); + return this; } public final Map getMapOfStringToString() { @@ -2465,14 +2489,15 @@ public final Map getMapOfStringToString() { return mapOfStringToString; } - @Override - public final Builder mapOfStringToString(Map mapOfStringToString) { + public final void setMapOfStringToString(Map mapOfStringToString) { this.mapOfStringToString = MapOfStringToStringCopier.copy(mapOfStringToString); - return this; } - public final void setMapOfStringToString(Map mapOfStringToString) { + @Override + @Transient + public final Builder mapOfStringToString(Map mapOfStringToString) { this.mapOfStringToString = MapOfStringToStringCopier.copy(mapOfStringToString); + return this; } public final Map getMapOfStringToSimpleStruct() { @@ -2484,16 +2509,17 @@ public final Map getMapOfStringToSimpleStruct() { return result; } + public final void setMapOfStringToSimpleStruct(Map mapOfStringToSimpleStruct) { + this.mapOfStringToSimpleStruct = MapOfStringToSimpleStructCopier.copyFromBuilder(mapOfStringToSimpleStruct); + } + @Override + @Transient public final Builder mapOfStringToSimpleStruct(Map mapOfStringToSimpleStruct) { this.mapOfStringToSimpleStruct = MapOfStringToSimpleStructCopier.copy(mapOfStringToSimpleStruct); return this; } - public final void setMapOfStringToSimpleStruct(Map mapOfStringToSimpleStruct) { - this.mapOfStringToSimpleStruct = MapOfStringToSimpleStructCopier.copyFromBuilder(mapOfStringToSimpleStruct); - } - public final Map getMapOfEnumToEnum() { if (mapOfEnumToEnum instanceof SdkAutoConstructMap) { return null; @@ -2501,22 +2527,24 @@ public final Map getMapOfEnumToEnum() { return mapOfEnumToEnum; } + public final void setMapOfEnumToEnum(Map mapOfEnumToEnum) { + this.mapOfEnumToEnum = MapOfEnumToEnumCopier.copy(mapOfEnumToEnum); + } + @Override + @Transient public final Builder mapOfEnumToEnumWithStrings(Map mapOfEnumToEnum) { this.mapOfEnumToEnum = MapOfEnumToEnumCopier.copy(mapOfEnumToEnum); return this; } @Override + @Transient public final Builder mapOfEnumToEnum(Map mapOfEnumToEnum) { this.mapOfEnumToEnum = MapOfEnumToEnumCopier.copyEnumToString(mapOfEnumToEnum); return this; } - public final void setMapOfEnumToEnum(Map mapOfEnumToEnum) { - this.mapOfEnumToEnum = MapOfEnumToEnumCopier.copy(mapOfEnumToEnum); - } - public final Map getMapOfEnumToString() { if (mapOfEnumToString instanceof SdkAutoConstructMap) { return null; @@ -2524,22 +2552,24 @@ public final Map getMapOfEnumToString() { return mapOfEnumToString; } + public final void setMapOfEnumToString(Map mapOfEnumToString) { + this.mapOfEnumToString = MapOfEnumToStringCopier.copy(mapOfEnumToString); + } + @Override + @Transient public final Builder mapOfEnumToStringWithStrings(Map mapOfEnumToString) { this.mapOfEnumToString = MapOfEnumToStringCopier.copy(mapOfEnumToString); return this; } @Override + @Transient public final Builder mapOfEnumToString(Map mapOfEnumToString) { this.mapOfEnumToString = MapOfEnumToStringCopier.copyEnumToString(mapOfEnumToString); return this; } - public final void setMapOfEnumToString(Map mapOfEnumToString) { - this.mapOfEnumToString = MapOfEnumToStringCopier.copy(mapOfEnumToString); - } - public final Map getMapOfStringToEnum() { if (mapOfStringToEnum instanceof SdkAutoConstructMap) { return null; @@ -2547,22 +2577,24 @@ public final Map getMapOfStringToEnum() { return mapOfStringToEnum; } + public final void setMapOfStringToEnum(Map mapOfStringToEnum) { + this.mapOfStringToEnum = MapOfStringToEnumCopier.copy(mapOfStringToEnum); + } + @Override + @Transient public final Builder mapOfStringToEnumWithStrings(Map mapOfStringToEnum) { this.mapOfStringToEnum = MapOfStringToEnumCopier.copy(mapOfStringToEnum); return this; } @Override + @Transient public final Builder mapOfStringToEnum(Map mapOfStringToEnum) { this.mapOfStringToEnum = MapOfStringToEnumCopier.copyEnumToString(mapOfStringToEnum); return this; } - public final void setMapOfStringToEnum(Map mapOfStringToEnum) { - this.mapOfStringToEnum = MapOfStringToEnumCopier.copy(mapOfStringToEnum); - } - public final Map getMapOfEnumToSimpleStruct() { Map result = MapOfEnumToSimpleStructCopier.copyToBuilder(this.mapOfEnumToSimpleStruct); if (result instanceof SdkAutoConstructMap) { @@ -2571,22 +2603,24 @@ public final Map getMapOfEnumToSimpleStruct() { return result; } + public final void setMapOfEnumToSimpleStruct(Map mapOfEnumToSimpleStruct) { + this.mapOfEnumToSimpleStruct = MapOfEnumToSimpleStructCopier.copyFromBuilder(mapOfEnumToSimpleStruct); + } + @Override + @Transient public final Builder mapOfEnumToSimpleStructWithStrings(Map mapOfEnumToSimpleStruct) { this.mapOfEnumToSimpleStruct = MapOfEnumToSimpleStructCopier.copy(mapOfEnumToSimpleStruct); return this; } @Override + @Transient public final Builder mapOfEnumToSimpleStruct(Map mapOfEnumToSimpleStruct) { this.mapOfEnumToSimpleStruct = MapOfEnumToSimpleStructCopier.copyEnumToString(mapOfEnumToSimpleStruct); return this; } - public final void setMapOfEnumToSimpleStruct(Map mapOfEnumToSimpleStruct) { - this.mapOfEnumToSimpleStruct = MapOfEnumToSimpleStructCopier.copyFromBuilder(mapOfEnumToSimpleStruct); - } - public final Map> getMapOfEnumToListOfEnums() { if (mapOfEnumToListOfEnums instanceof SdkAutoConstructMap) { return null; @@ -2594,22 +2628,24 @@ public final void setMapOfEnumToSimpleStruct(Map> mapOfEnumToListOfEnums) { + this.mapOfEnumToListOfEnums = MapOfEnumToListOfEnumsCopier.copy(mapOfEnumToListOfEnums); + } + @Override + @Transient public final Builder mapOfEnumToListOfEnumsWithStrings(Map> mapOfEnumToListOfEnums) { this.mapOfEnumToListOfEnums = MapOfEnumToListOfEnumsCopier.copy(mapOfEnumToListOfEnums); return this; } @Override + @Transient public final Builder mapOfEnumToListOfEnums(Map> mapOfEnumToListOfEnums) { this.mapOfEnumToListOfEnums = MapOfEnumToListOfEnumsCopier.copyEnumToString(mapOfEnumToListOfEnums); return this; } - public final void setMapOfEnumToListOfEnums(Map> mapOfEnumToListOfEnums) { - this.mapOfEnumToListOfEnums = MapOfEnumToListOfEnumsCopier.copy(mapOfEnumToListOfEnums); - } - public final Map> getMapOfEnumToMapOfStringToEnum() { if (mapOfEnumToMapOfStringToEnum instanceof SdkAutoConstructMap) { return null; @@ -2617,7 +2653,12 @@ public final void setMapOfEnumToListOfEnums(Map> mapOfEnumToMapOfStringToEnum) { + this.mapOfEnumToMapOfStringToEnum = MapOfEnumToMapOfStringToEnumCopier.copy(mapOfEnumToMapOfStringToEnum); + } + @Override + @Transient public final Builder mapOfEnumToMapOfStringToEnumWithStrings( Map> mapOfEnumToMapOfStringToEnum) { this.mapOfEnumToMapOfStringToEnum = MapOfEnumToMapOfStringToEnumCopier.copy(mapOfEnumToMapOfStringToEnum); @@ -2625,73 +2666,74 @@ public final Builder mapOfEnumToMapOfStringToEnumWithStrings( } @Override + @Transient public final Builder mapOfEnumToMapOfStringToEnum( Map> mapOfEnumToMapOfStringToEnum) { this.mapOfEnumToMapOfStringToEnum = MapOfEnumToMapOfStringToEnumCopier.copyEnumToString(mapOfEnumToMapOfStringToEnum); return this; } - public final void setMapOfEnumToMapOfStringToEnum(Map> mapOfEnumToMapOfStringToEnum) { - this.mapOfEnumToMapOfStringToEnum = MapOfEnumToMapOfStringToEnumCopier.copy(mapOfEnumToMapOfStringToEnum); - } - public final Instant getTimestampMember() { return timestampMember; } - @Override - public final Builder timestampMember(Instant timestampMember) { + public final void setTimestampMember(Instant timestampMember) { this.timestampMember = timestampMember; - return this; } - public final void setTimestampMember(Instant timestampMember) { + @Override + @Transient + public final Builder timestampMember(Instant timestampMember) { this.timestampMember = timestampMember; + return this; } public final StructWithTimestamp.Builder getStructWithNestedTimestampMember() { return structWithNestedTimestampMember != null ? structWithNestedTimestampMember.toBuilder() : null; } + public final void setStructWithNestedTimestampMember(StructWithTimestamp.BuilderImpl structWithNestedTimestampMember) { + this.structWithNestedTimestampMember = structWithNestedTimestampMember != null ? structWithNestedTimestampMember + .build() : null; + } + @Override + @Transient public final Builder structWithNestedTimestampMember(StructWithTimestamp structWithNestedTimestampMember) { this.structWithNestedTimestampMember = structWithNestedTimestampMember; return this; } - public final void setStructWithNestedTimestampMember(StructWithTimestamp.BuilderImpl structWithNestedTimestampMember) { - this.structWithNestedTimestampMember = structWithNestedTimestampMember != null ? structWithNestedTimestampMember - .build() : null; - } - public final ByteBuffer getBlobArg() { return blobArg == null ? null : blobArg.asByteBuffer(); } + public final void setBlobArg(ByteBuffer blobArg) { + blobArg(blobArg == null ? null : SdkBytes.fromByteBuffer(blobArg)); + } + @Override + @Transient public final Builder blobArg(SdkBytes blobArg) { this.blobArg = blobArg; return this; } - public final void setBlobArg(ByteBuffer blobArg) { - blobArg(blobArg == null ? null : SdkBytes.fromByteBuffer(blobArg)); - } - public final StructWithNestedBlobType.Builder getStructWithNestedBlob() { return structWithNestedBlob != null ? structWithNestedBlob.toBuilder() : null; } + public final void setStructWithNestedBlob(StructWithNestedBlobType.BuilderImpl structWithNestedBlob) { + this.structWithNestedBlob = structWithNestedBlob != null ? structWithNestedBlob.build() : null; + } + @Override + @Transient public final Builder structWithNestedBlob(StructWithNestedBlobType structWithNestedBlob) { this.structWithNestedBlob = structWithNestedBlob; return this; } - public final void setStructWithNestedBlob(StructWithNestedBlobType.BuilderImpl structWithNestedBlob) { - this.structWithNestedBlob = structWithNestedBlob != null ? structWithNestedBlob.build() : null; - } - public final Map getBlobMap() { if (blobMap instanceof SdkAutoConstructMap) { return null; @@ -2700,17 +2742,18 @@ public final Map getBlobMap() { .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue().asByteBuffer())); } + public final void setBlobMap(Map blobMap) { + blobMap(blobMap == null ? null : blobMap.entrySet().stream() + .collect(Collectors.toMap(e -> e.getKey(), e -> SdkBytes.fromByteBuffer(e.getValue())))); + } + @Override + @Transient public final Builder blobMap(Map blobMap) { this.blobMap = BlobMapTypeCopier.copy(blobMap); return this; } - public final void setBlobMap(Map blobMap) { - blobMap(blobMap == null ? null : blobMap.entrySet().stream() - .collect(Collectors.toMap(e -> e.getKey(), e -> SdkBytes.fromByteBuffer(e.getValue())))); - } - public final List getListOfBlobs() { if (listOfBlobs instanceof SdkAutoConstructList) { return null; @@ -2718,113 +2761,122 @@ public final List getListOfBlobs() { return listOfBlobs == null ? null : listOfBlobs.stream().map(SdkBytes::asByteBuffer).collect(Collectors.toList()); } + public final void setListOfBlobs(Collection listOfBlobs) { + listOfBlobs(listOfBlobs == null ? null : listOfBlobs.stream().map(SdkBytes::fromByteBuffer) + .collect(Collectors.toList())); + } + @Override + @Transient public final Builder listOfBlobs(Collection listOfBlobs) { this.listOfBlobs = ListOfBlobsTypeCopier.copy(listOfBlobs); return this; } @Override + @Transient @SafeVarargs public final Builder listOfBlobs(SdkBytes... listOfBlobs) { listOfBlobs(Arrays.asList(listOfBlobs)); return this; } - public final void setListOfBlobs(Collection listOfBlobs) { - listOfBlobs(listOfBlobs == null ? null : listOfBlobs.stream().map(SdkBytes::fromByteBuffer) - .collect(Collectors.toList())); - } - public final RecursiveStructType.Builder getRecursiveStruct() { return recursiveStruct != null ? recursiveStruct.toBuilder() : null; } + public final void setRecursiveStruct(RecursiveStructType.BuilderImpl recursiveStruct) { + this.recursiveStruct = recursiveStruct != null ? recursiveStruct.build() : null; + } + @Override + @Transient public final Builder recursiveStruct(RecursiveStructType recursiveStruct) { this.recursiveStruct = recursiveStruct; return this; } - public final void setRecursiveStruct(RecursiveStructType.BuilderImpl recursiveStruct) { - this.recursiveStruct = recursiveStruct != null ? recursiveStruct.build() : null; - } - public final BaseType.Builder getPolymorphicTypeWithSubTypes() { return polymorphicTypeWithSubTypes != null ? polymorphicTypeWithSubTypes.toBuilder() : null; } + public final void setPolymorphicTypeWithSubTypes(BaseType.BuilderImpl polymorphicTypeWithSubTypes) { + this.polymorphicTypeWithSubTypes = polymorphicTypeWithSubTypes != null ? polymorphicTypeWithSubTypes.build() : null; + } + @Override + @Transient public final Builder polymorphicTypeWithSubTypes(BaseType polymorphicTypeWithSubTypes) { this.polymorphicTypeWithSubTypes = polymorphicTypeWithSubTypes; return this; } - public final void setPolymorphicTypeWithSubTypes(BaseType.BuilderImpl polymorphicTypeWithSubTypes) { - this.polymorphicTypeWithSubTypes = polymorphicTypeWithSubTypes != null ? polymorphicTypeWithSubTypes.build() : null; - } - public final SubTypeOne.Builder getPolymorphicTypeWithoutSubTypes() { return polymorphicTypeWithoutSubTypes != null ? polymorphicTypeWithoutSubTypes.toBuilder() : null; } + public final void setPolymorphicTypeWithoutSubTypes(SubTypeOne.BuilderImpl polymorphicTypeWithoutSubTypes) { + this.polymorphicTypeWithoutSubTypes = polymorphicTypeWithoutSubTypes != null ? polymorphicTypeWithoutSubTypes.build() + : null; + } + @Override + @Transient public final Builder polymorphicTypeWithoutSubTypes(SubTypeOne polymorphicTypeWithoutSubTypes) { this.polymorphicTypeWithoutSubTypes = polymorphicTypeWithoutSubTypes; return this; } - public final void setPolymorphicTypeWithoutSubTypes(SubTypeOne.BuilderImpl polymorphicTypeWithoutSubTypes) { - this.polymorphicTypeWithoutSubTypes = polymorphicTypeWithoutSubTypes != null ? polymorphicTypeWithoutSubTypes.build() - : null; - } - public final String getEnumType() { return enumType; } + public final void setEnumType(String enumType) { + this.enumType = enumType; + } + @Override + @Transient public final Builder enumType(String enumType) { this.enumType = enumType; return this; } @Override + @Transient public final Builder enumType(EnumType enumType) { this.enumType(enumType == null ? null : enumType.toString()); return this; } - public final void setEnumType(String enumType) { - this.enumType = enumType; - } - public final Underscore_Name_Type.Builder getUnderscore_Name_Type() { return underscore_Name_Type != null ? underscore_Name_Type.toBuilder() : null; } + public final void setUnderscore_Name_Type(Underscore_Name_Type.BuilderImpl underscore_Name_Type) { + this.underscore_Name_Type = underscore_Name_Type != null ? underscore_Name_Type.build() : null; + } + @Override + @Transient public final Builder underscore_Name_Type(Underscore_Name_Type underscore_Name_Type) { this.underscore_Name_Type = underscore_Name_Type; return this; } - public final void setUnderscore_Name_Type(Underscore_Name_Type.BuilderImpl underscore_Name_Type) { - this.underscore_Name_Type = underscore_Name_Type != null ? underscore_Name_Type.build() : null; - } - public final Document getMyDocument() { return myDocument; } - @Override - public final Builder myDocument(Document myDocument) { + public final void setMyDocument(Document myDocument) { this.myDocument = myDocument; - return this; } - public final void setMyDocument(Document myDocument) { + @Override + @Transient + public final Builder myDocument(Document myDocument) { this.myDocument = myDocument; + return this; } @Override @@ -2837,4 +2889,4 @@ public List> sdkFields() { return SDK_FIELDS; } } -} \ No newline at end of file +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/basetype.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/basetype.java index 1e40d634bf40..1f4bf1fc0664 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/basetype.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/basetype.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.io.Serializable; import java.util.Arrays; import java.util.Collections; @@ -23,8 +24,8 @@ @Generated("software.amazon.awssdk:codegen") public final class BaseType implements SdkPojo, Serializable, ToCopyableBuilder { private static final SdkField BASE_MEMBER_FIELD = SdkField. builder(MarshallingType.STRING) - .memberName("BaseMember").getter(getter(BaseType::baseMember)).setter(setter(Builder::baseMember)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("BaseMember").build()).build(); + .memberName("BaseMember").getter(getter(BaseType::baseMember)).setter(setter(Builder::baseMember)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("BaseMember").build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(BASE_MEMBER_FIELD)); @@ -38,7 +39,7 @@ private BaseType(BuilderImpl builder) { /** * Returns the value of the BaseMember property for this object. - * + * * @return The value of the BaseMember property for this object. */ public final String baseMember() { @@ -96,10 +97,10 @@ public final String toString() { public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "BaseMember": - return Optional.ofNullable(clazz.cast(baseMember())); - default: - return Optional.empty(); + case "BaseMember": + return Optional.ofNullable(clazz.cast(baseMember())); + default: + return Optional.empty(); } } @@ -141,14 +142,15 @@ public final String getBaseMember() { return baseMember; } - @Override - public final Builder baseMember(String baseMember) { + public final void setBaseMember(String baseMember) { this.baseMember = baseMember; - return this; } - public final void setBaseMember(String baseMember) { + @Override + @Transient + public final Builder baseMember(String baseMember) { this.baseMember = baseMember; + return this; } @Override @@ -162,4 +164,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config index f03277692cc2..baa5c389b448 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config @@ -18,6 +18,16 @@ } } ] + }, + "OperationWithDeprecatedMemberRequest": { + "modify": [ + { + "MemberModifiedAsDeprecated": { + "deprecated": true, + "deprecatedMessage": "This field is modified as deprecated." + } + } + ] } }, "underscoresInNameBehavior": "ALLOW" diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenamerequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenamerequest.java index 395bd6785d9d..43f07e4d384b 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenamerequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenamerequest.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -23,20 +24,20 @@ */ @Generated("software.amazon.awssdk:codegen") public final class DeprecatedRenameRequest extends JsonProtocolTestsRequest implements - ToCopyableBuilder { + ToCopyableBuilder { private static final SdkField NEW_NAME_NO_DEPRECATION_FIELD = SdkField. builder(MarshallingType.STRING) - .memberName("NewNameNoDeprecation").getter(getter(DeprecatedRenameRequest::newNameNoDeprecation)) - .setter(setter(Builder::newNameNoDeprecation)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("OriginalNameNoDeprecation").build()) - .build(); + .memberName("NewNameNoDeprecation").getter(getter(DeprecatedRenameRequest::newNameNoDeprecation)) + .setter(setter(Builder::newNameNoDeprecation)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("OriginalNameNoDeprecation").build()) + .build(); private static final SdkField NEW_NAME_FIELD = SdkField. builder(MarshallingType.STRING) - .memberName("NewName").getter(getter(DeprecatedRenameRequest::newName)).setter(setter(Builder::newName)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("OriginalNameDeprecated").build()) - .build(); + .memberName("NewName").getter(getter(DeprecatedRenameRequest::newName)).setter(setter(Builder::newName)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("OriginalNameDeprecated").build()) + .build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(NEW_NAME_NO_DEPRECATION_FIELD, - NEW_NAME_FIELD)); + NEW_NAME_FIELD)); private final String newNameNoDeprecation; @@ -50,7 +51,7 @@ private DeprecatedRenameRequest(BuilderImpl builder) { /** * Returns the value of the NewNameNoDeprecation property for this object. - * + * * @return The value of the NewNameNoDeprecation property for this object. */ public final String newNameNoDeprecation() { @@ -59,7 +60,7 @@ public final String newNameNoDeprecation() { /** * Returns the value of the NewName property for this object. - * + * * @return The value of the NewName property for this object. * @deprecated Use {@link #newName()} */ @@ -70,7 +71,7 @@ public final String originalNameDeprecated() { /** * Returns the value of the NewName property for this object. - * + * * @return The value of the NewName property for this object. */ public final String newName() { @@ -126,19 +127,19 @@ public final boolean equalsBySdkFields(Object obj) { @Override public final String toString() { return ToString.builder("DeprecatedRenameRequest").add("NewNameNoDeprecation", newNameNoDeprecation()) - .add("NewName", newName()).build(); + .add("NewName", newName()).build(); } public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "NewNameNoDeprecation": - return Optional.ofNullable(clazz.cast(newNameNoDeprecation())); - case "NewName": - return Optional.ofNullable(clazz.cast(newName())); - case "OriginalNameDeprecated": - return Optional.ofNullable(clazz.cast(newName())); - default: - return Optional.empty(); + case "NewNameNoDeprecation": + return Optional.ofNullable(clazz.cast(newNameNoDeprecation())); + case "NewName": + return Optional.ofNullable(clazz.cast(newName())); + case "OriginalNameDeprecated": + return Optional.ofNullable(clazz.cast(newName())); + default: + return Optional.empty(); } } @@ -210,30 +211,19 @@ public final String getNewNameNoDeprecation() { return newNameNoDeprecation; } - @Override - public final Builder newNameNoDeprecation(String newNameNoDeprecation) { - this.newNameNoDeprecation = newNameNoDeprecation; - return this; - } - public final void setNewNameNoDeprecation(String newNameNoDeprecation) { this.newNameNoDeprecation = newNameNoDeprecation; } - public final String getNewName() { - return newName; - } - @Override - public final Builder newName(String newName) { - this.newName = newName; + @Transient + public final Builder newNameNoDeprecation(String newNameNoDeprecation) { + this.newNameNoDeprecation = newNameNoDeprecation; return this; } - @Override - public final Builder originalNameDeprecated(String newName) { - this.newName = newName; - return this; + public final String getNewName() { + return newName; } public final void setNewName(String newName) { @@ -248,6 +238,20 @@ public final void setOriginalNameDeprecated(String newName) { this.newName = newName; } + @Override + @Transient + public final Builder newName(String newName) { + this.newName = newName; + return this; + } + + @Override + @Transient + public final Builder originalNameDeprecated(String newName) { + this.newName = newName; + return this; + } + @Override public Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration) { super.overrideConfiguration(overrideConfiguration); @@ -271,4 +275,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenameresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenameresponse.java index 0537255d3477..0bf35f62a16a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenameresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenameresponse.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -21,21 +22,21 @@ */ @Generated("software.amazon.awssdk:codegen") public final class DeprecatedRenameResponse extends JsonProtocolTestsResponse implements - ToCopyableBuilder { + ToCopyableBuilder { private static final SdkField ORIGINAL_NAME_NO_DEPRECATION_FIELD = SdkField. builder(MarshallingType.STRING) - .memberName("OriginalNameNoDeprecation").getter(getter(DeprecatedRenameResponse::originalNameNoDeprecation)) - .setter(setter(Builder::originalNameNoDeprecation)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("OriginalNameNoDeprecation").build()) - .build(); + .memberName("OriginalNameNoDeprecation").getter(getter(DeprecatedRenameResponse::originalNameNoDeprecation)) + .setter(setter(Builder::originalNameNoDeprecation)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("OriginalNameNoDeprecation").build()) + .build(); private static final SdkField ORIGINAL_NAME_DEPRECATED_FIELD = SdkField. builder(MarshallingType.STRING) - .memberName("OriginalNameDeprecated").getter(getter(DeprecatedRenameResponse::originalNameDeprecated)) - .setter(setter(Builder::originalNameDeprecated)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("OriginalNameDeprecated").build()) - .build(); + .memberName("OriginalNameDeprecated").getter(getter(DeprecatedRenameResponse::originalNameDeprecated)) + .setter(setter(Builder::originalNameDeprecated)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("OriginalNameDeprecated").build()) + .build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList( - ORIGINAL_NAME_NO_DEPRECATION_FIELD, ORIGINAL_NAME_DEPRECATED_FIELD)); + ORIGINAL_NAME_NO_DEPRECATION_FIELD, ORIGINAL_NAME_DEPRECATED_FIELD)); private final String originalNameNoDeprecation; @@ -49,7 +50,7 @@ private DeprecatedRenameResponse(BuilderImpl builder) { /** * Returns the value of the OriginalNameNoDeprecation property for this object. - * + * * @return The value of the OriginalNameNoDeprecation property for this object. */ public final String originalNameNoDeprecation() { @@ -58,7 +59,7 @@ public final String originalNameNoDeprecation() { /** * Returns the value of the OriginalNameDeprecated property for this object. - * + * * @return The value of the OriginalNameDeprecated property for this object. */ public final String originalNameDeprecated() { @@ -105,7 +106,7 @@ public final boolean equalsBySdkFields(Object obj) { } DeprecatedRenameResponse other = (DeprecatedRenameResponse) obj; return Objects.equals(originalNameNoDeprecation(), other.originalNameNoDeprecation()) - && Objects.equals(originalNameDeprecated(), other.originalNameDeprecated()); + && Objects.equals(originalNameDeprecated(), other.originalNameDeprecated()); } /** @@ -115,17 +116,17 @@ public final boolean equalsBySdkFields(Object obj) { @Override public final String toString() { return ToString.builder("DeprecatedRenameResponse").add("OriginalNameNoDeprecation", originalNameNoDeprecation()) - .add("OriginalNameDeprecated", originalNameDeprecated()).build(); + .add("OriginalNameDeprecated", originalNameDeprecated()).build(); } public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "OriginalNameNoDeprecation": - return Optional.ofNullable(clazz.cast(originalNameNoDeprecation())); - case "OriginalNameDeprecated": - return Optional.ofNullable(clazz.cast(originalNameDeprecated())); - default: - return Optional.empty(); + case "OriginalNameNoDeprecation": + return Optional.ofNullable(clazz.cast(originalNameNoDeprecation())); + case "OriginalNameDeprecated": + return Optional.ofNullable(clazz.cast(originalNameDeprecated())); + default: + return Optional.empty(); } } @@ -143,7 +144,7 @@ private static BiConsumer setter(BiConsumer s) { } public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, - CopyableBuilder { + CopyableBuilder { /** * Sets the value of the OriginalNameNoDeprecation property for this object. * @@ -181,28 +182,30 @@ public final String getOriginalNameNoDeprecation() { return originalNameNoDeprecation; } - @Override - public final Builder originalNameNoDeprecation(String originalNameNoDeprecation) { + public final void setOriginalNameNoDeprecation(String originalNameNoDeprecation) { this.originalNameNoDeprecation = originalNameNoDeprecation; - return this; } - public final void setOriginalNameNoDeprecation(String originalNameNoDeprecation) { + @Override + @Transient + public final Builder originalNameNoDeprecation(String originalNameNoDeprecation) { this.originalNameNoDeprecation = originalNameNoDeprecation; + return this; } public final String getOriginalNameDeprecated() { return originalNameDeprecated; } - @Override - public final Builder originalNameDeprecated(String originalNameDeprecated) { + public final void setOriginalNameDeprecated(String originalNameDeprecated) { this.originalNameDeprecated = originalNameDeprecated; - return this; } - public final void setOriginalNameDeprecated(String originalNameDeprecated) { + @Override + @Transient + public final Builder originalNameDeprecated(String originalNameDeprecated) { this.originalNameDeprecated = originalNameDeprecated; + return this; } @Override @@ -216,4 +219,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventone.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventone.java index 8cd1e423601e..35c9dc964b90 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventone.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventone.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.io.Serializable; import java.util.Arrays; import java.util.Collections; @@ -24,8 +25,8 @@ @Generated("software.amazon.awssdk:codegen") public class EventOne implements SdkPojo, Serializable, ToCopyableBuilder, EventStream { private static final SdkField FOO_FIELD = SdkField. builder(MarshallingType.STRING).memberName("Foo") - .getter(getter(EventOne::foo)).setter(setter(Builder::foo)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Foo").build()).build(); + .getter(getter(EventOne::foo)).setter(setter(Builder::foo)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Foo").build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(FOO_FIELD)); @@ -39,7 +40,7 @@ protected EventOne(BuilderImpl builder) { /** * Returns the value of the Foo property for this object. - * + * * @return The value of the Foo property for this object. */ public final String foo() { @@ -97,10 +98,10 @@ public final String toString() { public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "Foo": - return Optional.ofNullable(clazz.cast(foo())); - default: - return Optional.empty(); + case "Foo": + return Optional.ofNullable(clazz.cast(foo())); + default: + return Optional.empty(); } } @@ -158,14 +159,15 @@ public final String getFoo() { return foo; } - @Override - public final Builder foo(String foo) { + public final void setFoo(String foo) { this.foo = foo; - return this; } - public final void setFoo(String foo) { + @Override + @Transient + public final Builder foo(String foo) { this.foo = foo; + return this; } @Override @@ -179,4 +181,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventtwo.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventtwo.java index 25b45d66c08b..2640edb77159 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventtwo.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventtwo.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.io.Serializable; import java.util.Arrays; import java.util.Collections; @@ -24,8 +25,8 @@ @Generated("software.amazon.awssdk:codegen") public class EventTwo implements SdkPojo, Serializable, ToCopyableBuilder, EventStream { private static final SdkField BAR_FIELD = SdkField. builder(MarshallingType.STRING).memberName("Bar") - .getter(getter(EventTwo::bar)).setter(setter(Builder::bar)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Bar").build()).build(); + .getter(getter(EventTwo::bar)).setter(setter(Builder::bar)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Bar").build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(BAR_FIELD)); @@ -39,7 +40,7 @@ protected EventTwo(BuilderImpl builder) { /** * Returns the value of the Bar property for this object. - * + * * @return The value of the Bar property for this object. */ public final String bar() { @@ -97,10 +98,10 @@ public final String toString() { public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "Bar": - return Optional.ofNullable(clazz.cast(bar())); - default: - return Optional.empty(); + case "Bar": + return Optional.ofNullable(clazz.cast(bar())); + default: + return Optional.empty(); } } @@ -158,14 +159,15 @@ public final String getBar() { return bar; } - @Override - public final Builder bar(String bar) { + public final void setBar(String bar) { this.bar = bar; - return this; } - public final void setBar(String bar) { + @Override + @Transient + public final Builder bar(String bar) { this.bar = bar; + return this; } @Override @@ -179,4 +181,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingrequest.java index 54c651084b78..f37fe9a0a062 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingrequest.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -31,65 +32,65 @@ */ @Generated("software.amazon.awssdk:codegen") public final class ExistenceCheckNamingRequest extends JsonProtocolTestsRequest implements - ToCopyableBuilder { + ToCopyableBuilder { private static final SdkField> BUILD_FIELD = SdkField - .> builder(MarshallingType.LIST) - .memberName("Build") - .getter(getter(ExistenceCheckNamingRequest::build)) - .setter(setter(Builder::build)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Build").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField. builder(MarshallingType.STRING) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("member").build()).build()).build()).build(); + .> builder(MarshallingType.LIST) + .memberName("Build") + .getter(getter(ExistenceCheckNamingRequest::build)) + .setter(setter(Builder::build)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Build").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField. builder(MarshallingType.STRING) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("member").build()).build()).build()).build(); private static final SdkField> SUPER_FIELD = SdkField - .> builder(MarshallingType.LIST) - .memberName("super") - .getter(getter(ExistenceCheckNamingRequest::superValue)) - .setter(setter(Builder::superValue)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("super").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField. builder(MarshallingType.STRING) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("member").build()).build()).build()).build(); + .> builder(MarshallingType.LIST) + .memberName("super") + .getter(getter(ExistenceCheckNamingRequest::superValue)) + .setter(setter(Builder::superValue)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("super").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField. builder(MarshallingType.STRING) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("member").build()).build()).build()).build(); private static final SdkField> TO_STRING_FIELD = SdkField - .> builder(MarshallingType.MAP) - .memberName("toString") - .getter(getter(ExistenceCheckNamingRequest::toStringValue)) - .setter(setter(Builder::toStringValue)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("toString").build(), - MapTrait.builder() - .keyLocationName("key") - .valueLocationName("value") - .valueFieldInfo( - SdkField. builder(MarshallingType.STRING) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("value").build()).build()).build()).build(); + .> builder(MarshallingType.MAP) + .memberName("toString") + .getter(getter(ExistenceCheckNamingRequest::toStringValue)) + .setter(setter(Builder::toStringValue)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("toString").build(), + MapTrait.builder() + .keyLocationName("key") + .valueLocationName("value") + .valueFieldInfo( + SdkField. builder(MarshallingType.STRING) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("value").build()).build()).build()).build(); private static final SdkField> EQUALS_FIELD = SdkField - .> builder(MarshallingType.MAP) - .memberName("equals") - .getter(getter(ExistenceCheckNamingRequest::equalsValue)) - .setter(setter(Builder::equalsValue)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("equals").build(), - MapTrait.builder() - .keyLocationName("key") - .valueLocationName("value") - .valueFieldInfo( - SdkField. builder(MarshallingType.STRING) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("value").build()).build()).build()).build(); + .> builder(MarshallingType.MAP) + .memberName("equals") + .getter(getter(ExistenceCheckNamingRequest::equalsValue)) + .setter(setter(Builder::equalsValue)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("equals").build(), + MapTrait.builder() + .keyLocationName("key") + .valueLocationName("value") + .valueFieldInfo( + SdkField. builder(MarshallingType.STRING) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("value").build()).build()).build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(BUILD_FIELD, SUPER_FIELD, - TO_STRING_FIELD, EQUALS_FIELD)); + TO_STRING_FIELD, EQUALS_FIELD)); private final List build; @@ -123,7 +124,7 @@ public final boolean hasBuild() { *

* You can use {@link #hasBuild()} to see if a value was sent in this field. *

- * + * * @return The value of the Build property for this object. */ public final List build() { @@ -146,7 +147,7 @@ public final boolean hasSuperValue() { *

* You can use {@link #hasSuperValue()} to see if a value was sent in this field. *

- * + * * @return The value of the Super property for this object. */ public final List superValue() { @@ -169,7 +170,7 @@ public final boolean hasToStringValue() { *

* You can use {@link #hasToStringValue()} to see if a value was sent in this field. *

- * + * * @return The value of the ToString property for this object. */ public final Map toStringValue() { @@ -192,7 +193,7 @@ public final boolean hasEqualsValue() { *

* You can use {@link #hasEqualsValue()} to see if a value was sent in this field. *

- * + * * @return The value of the Equals property for this object. */ public final Map equalsValue() { @@ -241,9 +242,9 @@ public final boolean equalsBySdkFields(Object obj) { } ExistenceCheckNamingRequest other = (ExistenceCheckNamingRequest) obj; return hasBuild() == other.hasBuild() && Objects.equals(build(), other.build()) - && hasSuperValue() == other.hasSuperValue() && Objects.equals(superValue(), other.superValue()) - && hasToStringValue() == other.hasToStringValue() && Objects.equals(toStringValue(), other.toStringValue()) - && hasEqualsValue() == other.hasEqualsValue() && Objects.equals(equalsValue(), other.equalsValue()); + && hasSuperValue() == other.hasSuperValue() && Objects.equals(superValue(), other.superValue()) + && hasToStringValue() == other.hasToStringValue() && Objects.equals(toStringValue(), other.toStringValue()) + && hasEqualsValue() == other.hasEqualsValue() && Objects.equals(equalsValue(), other.equalsValue()); } /** @@ -253,22 +254,22 @@ && hasToStringValue() == other.hasToStringValue() && Objects.equals(toStringValu @Override public final String toString() { return ToString.builder("ExistenceCheckNamingRequest").add("Build", hasBuild() ? build() : null) - .add("Super", hasSuperValue() ? superValue() : null).add("ToString", hasToStringValue() ? toStringValue() : null) - .add("Equals", hasEqualsValue() ? equalsValue() : null).build(); + .add("Super", hasSuperValue() ? superValue() : null).add("ToString", hasToStringValue() ? toStringValue() : null) + .add("Equals", hasEqualsValue() ? equalsValue() : null).build(); } public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "Build": - return Optional.ofNullable(clazz.cast(build())); - case "super": - return Optional.ofNullable(clazz.cast(superValue())); - case "toString": - return Optional.ofNullable(clazz.cast(toStringValue())); - case "equals": - return Optional.ofNullable(clazz.cast(equalsValue())); - default: - return Optional.empty(); + case "Build": + return Optional.ofNullable(clazz.cast(build())); + case "super": + return Optional.ofNullable(clazz.cast(superValue())); + case "toString": + return Optional.ofNullable(clazz.cast(toStringValue())); + case "equals": + return Optional.ofNullable(clazz.cast(equalsValue())); + default: + return Optional.empty(); } } @@ -286,7 +287,7 @@ private static BiConsumer setter(BiConsumer s) { } public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, - CopyableBuilder { + CopyableBuilder { /** * Sets the value of the Build property for this object. * @@ -375,23 +376,25 @@ public final Collection getBuild() { return build; } + public final void setBuild(Collection build) { + this.build = ListOfStringsCopier.copy(build); + } + @Override + @Transient public final Builder build(Collection build) { this.build = ListOfStringsCopier.copy(build); return this; } @Override + @Transient @SafeVarargs public final Builder build(String... build) { build(Arrays.asList(build)); return this; } - public final void setBuild(Collection build) { - this.build = ListOfStringsCopier.copy(build); - } - public final Collection getSuperValue() { if (superValue instanceof SdkAutoConstructList) { return null; @@ -399,23 +402,25 @@ public final Collection getSuperValue() { return superValue; } + public final void setSuperValue(Collection superValue) { + this.superValue = ListOfStringsCopier.copy(superValue); + } + @Override + @Transient public final Builder superValue(Collection superValue) { this.superValue = ListOfStringsCopier.copy(superValue); return this; } @Override + @Transient @SafeVarargs public final Builder superValue(String... superValue) { superValue(Arrays.asList(superValue)); return this; } - public final void setSuperValue(Collection superValue) { - this.superValue = ListOfStringsCopier.copy(superValue); - } - public final Map getToStringValue() { if (toStringValue instanceof SdkAutoConstructMap) { return null; @@ -423,14 +428,15 @@ public final Map getToStringValue() { return toStringValue; } - @Override - public final Builder toStringValue(Map toStringValue) { + public final void setToStringValue(Map toStringValue) { this.toStringValue = MapOfStringToStringCopier.copy(toStringValue); - return this; } - public final void setToStringValue(Map toStringValue) { + @Override + @Transient + public final Builder toStringValue(Map toStringValue) { this.toStringValue = MapOfStringToStringCopier.copy(toStringValue); + return this; } public final Map getEqualsValue() { @@ -440,14 +446,15 @@ public final Map getEqualsValue() { return equalsValue; } - @Override - public final Builder equalsValue(Map equalsValue) { + public final void setEqualsValue(Map equalsValue) { this.equalsValue = MapOfStringToStringCopier.copy(equalsValue); - return this; } - public final void setEqualsValue(Map equalsValue) { + @Override + @Transient + public final Builder equalsValue(Map equalsValue) { this.equalsValue = MapOfStringToStringCopier.copy(equalsValue); + return this; } @Override @@ -473,4 +480,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingresponse.java index 7432ab65d384..42154d91c8d9 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingresponse.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -29,65 +30,65 @@ */ @Generated("software.amazon.awssdk:codegen") public final class ExistenceCheckNamingResponse extends JsonProtocolTestsResponse implements - ToCopyableBuilder { + ToCopyableBuilder { private static final SdkField> BUILD_FIELD = SdkField - .> builder(MarshallingType.LIST) - .memberName("Build") - .getter(getter(ExistenceCheckNamingResponse::build)) - .setter(setter(Builder::build)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Build").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField. builder(MarshallingType.STRING) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("member").build()).build()).build()).build(); + .> builder(MarshallingType.LIST) + .memberName("Build") + .getter(getter(ExistenceCheckNamingResponse::build)) + .setter(setter(Builder::build)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Build").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField. builder(MarshallingType.STRING) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("member").build()).build()).build()).build(); private static final SdkField> SUPER_FIELD = SdkField - .> builder(MarshallingType.LIST) - .memberName("super") - .getter(getter(ExistenceCheckNamingResponse::superValue)) - .setter(setter(Builder::superValue)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("super").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField. builder(MarshallingType.STRING) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("member").build()).build()).build()).build(); + .> builder(MarshallingType.LIST) + .memberName("super") + .getter(getter(ExistenceCheckNamingResponse::superValue)) + .setter(setter(Builder::superValue)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("super").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField. builder(MarshallingType.STRING) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("member").build()).build()).build()).build(); private static final SdkField> TO_STRING_FIELD = SdkField - .> builder(MarshallingType.MAP) - .memberName("toString") - .getter(getter(ExistenceCheckNamingResponse::toStringValue)) - .setter(setter(Builder::toStringValue)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("toString").build(), - MapTrait.builder() - .keyLocationName("key") - .valueLocationName("value") - .valueFieldInfo( - SdkField. builder(MarshallingType.STRING) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("value").build()).build()).build()).build(); + .> builder(MarshallingType.MAP) + .memberName("toString") + .getter(getter(ExistenceCheckNamingResponse::toStringValue)) + .setter(setter(Builder::toStringValue)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("toString").build(), + MapTrait.builder() + .keyLocationName("key") + .valueLocationName("value") + .valueFieldInfo( + SdkField. builder(MarshallingType.STRING) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("value").build()).build()).build()).build(); private static final SdkField> EQUALS_FIELD = SdkField - .> builder(MarshallingType.MAP) - .memberName("equals") - .getter(getter(ExistenceCheckNamingResponse::equalsValue)) - .setter(setter(Builder::equalsValue)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("equals").build(), - MapTrait.builder() - .keyLocationName("key") - .valueLocationName("value") - .valueFieldInfo( - SdkField. builder(MarshallingType.STRING) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("value").build()).build()).build()).build(); + .> builder(MarshallingType.MAP) + .memberName("equals") + .getter(getter(ExistenceCheckNamingResponse::equalsValue)) + .setter(setter(Builder::equalsValue)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("equals").build(), + MapTrait.builder() + .keyLocationName("key") + .valueLocationName("value") + .valueFieldInfo( + SdkField. builder(MarshallingType.STRING) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("value").build()).build()).build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(BUILD_FIELD, SUPER_FIELD, - TO_STRING_FIELD, EQUALS_FIELD)); + TO_STRING_FIELD, EQUALS_FIELD)); private final List build; @@ -121,7 +122,7 @@ public final boolean hasBuild() { *

* You can use {@link #hasBuild()} to see if a value was sent in this field. *

- * + * * @return The value of the Build property for this object. */ public final List build() { @@ -144,7 +145,7 @@ public final boolean hasSuperValue() { *

* You can use {@link #hasSuperValue()} to see if a value was sent in this field. *

- * + * * @return The value of the Super property for this object. */ public final List superValue() { @@ -167,7 +168,7 @@ public final boolean hasToStringValue() { *

* You can use {@link #hasToStringValue()} to see if a value was sent in this field. *

- * + * * @return The value of the ToString property for this object. */ public final Map toStringValue() { @@ -190,7 +191,7 @@ public final boolean hasEqualsValue() { *

* You can use {@link #hasEqualsValue()} to see if a value was sent in this field. *

- * + * * @return The value of the Equals property for this object. */ public final Map equalsValue() { @@ -239,9 +240,9 @@ public final boolean equalsBySdkFields(Object obj) { } ExistenceCheckNamingResponse other = (ExistenceCheckNamingResponse) obj; return hasBuild() == other.hasBuild() && Objects.equals(build(), other.build()) - && hasSuperValue() == other.hasSuperValue() && Objects.equals(superValue(), other.superValue()) - && hasToStringValue() == other.hasToStringValue() && Objects.equals(toStringValue(), other.toStringValue()) - && hasEqualsValue() == other.hasEqualsValue() && Objects.equals(equalsValue(), other.equalsValue()); + && hasSuperValue() == other.hasSuperValue() && Objects.equals(superValue(), other.superValue()) + && hasToStringValue() == other.hasToStringValue() && Objects.equals(toStringValue(), other.toStringValue()) + && hasEqualsValue() == other.hasEqualsValue() && Objects.equals(equalsValue(), other.equalsValue()); } /** @@ -251,22 +252,22 @@ && hasToStringValue() == other.hasToStringValue() && Objects.equals(toStringValu @Override public final String toString() { return ToString.builder("ExistenceCheckNamingResponse").add("Build", hasBuild() ? build() : null) - .add("Super", hasSuperValue() ? superValue() : null).add("ToString", hasToStringValue() ? toStringValue() : null) - .add("Equals", hasEqualsValue() ? equalsValue() : null).build(); + .add("Super", hasSuperValue() ? superValue() : null).add("ToString", hasToStringValue() ? toStringValue() : null) + .add("Equals", hasEqualsValue() ? equalsValue() : null).build(); } public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "Build": - return Optional.ofNullable(clazz.cast(build())); - case "super": - return Optional.ofNullable(clazz.cast(superValue())); - case "toString": - return Optional.ofNullable(clazz.cast(toStringValue())); - case "equals": - return Optional.ofNullable(clazz.cast(equalsValue())); - default: - return Optional.empty(); + case "Build": + return Optional.ofNullable(clazz.cast(build())); + case "super": + return Optional.ofNullable(clazz.cast(superValue())); + case "toString": + return Optional.ofNullable(clazz.cast(toStringValue())); + case "equals": + return Optional.ofNullable(clazz.cast(equalsValue())); + default: + return Optional.empty(); } } @@ -284,7 +285,7 @@ private static BiConsumer setter(BiConsumer s) { } public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, - CopyableBuilder { + CopyableBuilder { /** * Sets the value of the Build property for this object. * @@ -367,23 +368,25 @@ public final Collection getBuild() { return build; } + public final void setBuild(Collection build) { + this.build = ListOfStringsCopier.copy(build); + } + @Override + @Transient public final Builder build(Collection build) { this.build = ListOfStringsCopier.copy(build); return this; } @Override + @Transient @SafeVarargs public final Builder build(String... build) { build(Arrays.asList(build)); return this; } - public final void setBuild(Collection build) { - this.build = ListOfStringsCopier.copy(build); - } - public final Collection getSuperValue() { if (superValue instanceof SdkAutoConstructList) { return null; @@ -391,23 +394,25 @@ public final Collection getSuperValue() { return superValue; } + public final void setSuperValue(Collection superValue) { + this.superValue = ListOfStringsCopier.copy(superValue); + } + @Override + @Transient public final Builder superValue(Collection superValue) { this.superValue = ListOfStringsCopier.copy(superValue); return this; } @Override + @Transient @SafeVarargs public final Builder superValue(String... superValue) { superValue(Arrays.asList(superValue)); return this; } - public final void setSuperValue(Collection superValue) { - this.superValue = ListOfStringsCopier.copy(superValue); - } - public final Map getToStringValue() { if (toStringValue instanceof SdkAutoConstructMap) { return null; @@ -415,14 +420,15 @@ public final Map getToStringValue() { return toStringValue; } - @Override - public final Builder toStringValue(Map toStringValue) { + public final void setToStringValue(Map toStringValue) { this.toStringValue = MapOfStringToStringCopier.copy(toStringValue); - return this; } - public final void setToStringValue(Map toStringValue) { + @Override + @Transient + public final Builder toStringValue(Map toStringValue) { this.toStringValue = MapOfStringToStringCopier.copy(toStringValue); + return this; } public final Map getEqualsValue() { @@ -432,14 +438,15 @@ public final Map getEqualsValue() { return equalsValue; } - @Override - public final Builder equalsValue(Map equalsValue) { + public final void setEqualsValue(Map equalsValue) { this.equalsValue = MapOfStringToStringCopier.copy(equalsValue); - return this; } - public final void setEqualsValue(Map equalsValue) { + @Override + @Transient + public final Builder equalsValue(Map equalsValue) { this.equalsValue = MapOfStringToStringCopier.copy(equalsValue); + return this; } @Override @@ -453,4 +460,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputevent.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputevent.java index a1dee9f6070a..20c10aa3a12b 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputevent.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputevent.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.io.Serializable; import java.nio.ByteBuffer; import java.util.Arrays; @@ -155,16 +156,17 @@ public final ByteBuffer getExplicitPayloadMember() { return explicitPayloadMember == null ? null : explicitPayloadMember.asByteBuffer(); } + public final void setExplicitPayloadMember(ByteBuffer explicitPayloadMember) { + explicitPayloadMember(explicitPayloadMember == null ? null : SdkBytes.fromByteBuffer(explicitPayloadMember)); + } + @Override + @Transient public final Builder explicitPayloadMember(SdkBytes explicitPayloadMember) { this.explicitPayloadMember = explicitPayloadMember; return this; } - public final void setExplicitPayloadMember(ByteBuffer explicitPayloadMember) { - explicitPayloadMember(explicitPayloadMember == null ? null : SdkBytes.fromByteBuffer(explicitPayloadMember)); - } - @Override public InputEvent build() { return new InputEvent(this); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventtwo.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventtwo.java index 9976d1457f24..4a7da0677171 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventtwo.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventtwo.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.io.Serializable; import java.nio.ByteBuffer; import java.util.Arrays; @@ -222,42 +223,45 @@ public final ByteBuffer getImplicitPayloadMemberOne() { return implicitPayloadMemberOne == null ? null : implicitPayloadMemberOne.asByteBuffer(); } + public final void setImplicitPayloadMemberOne(ByteBuffer implicitPayloadMemberOne) { + implicitPayloadMemberOne(implicitPayloadMemberOne == null ? null : SdkBytes.fromByteBuffer(implicitPayloadMemberOne)); + } + @Override + @Transient public final Builder implicitPayloadMemberOne(SdkBytes implicitPayloadMemberOne) { this.implicitPayloadMemberOne = implicitPayloadMemberOne; return this; } - public final void setImplicitPayloadMemberOne(ByteBuffer implicitPayloadMemberOne) { - implicitPayloadMemberOne(implicitPayloadMemberOne == null ? null : SdkBytes.fromByteBuffer(implicitPayloadMemberOne)); - } - public final String getImplicitPayloadMemberTwo() { return implicitPayloadMemberTwo; } - @Override - public final Builder implicitPayloadMemberTwo(String implicitPayloadMemberTwo) { + public final void setImplicitPayloadMemberTwo(String implicitPayloadMemberTwo) { this.implicitPayloadMemberTwo = implicitPayloadMemberTwo; - return this; } - public final void setImplicitPayloadMemberTwo(String implicitPayloadMemberTwo) { + @Override + @Transient + public final Builder implicitPayloadMemberTwo(String implicitPayloadMemberTwo) { this.implicitPayloadMemberTwo = implicitPayloadMemberTwo; + return this; } public final String getEventHeaderMember() { return eventHeaderMember; } - @Override - public final Builder eventHeaderMember(String eventHeaderMember) { + public final void setEventHeaderMember(String eventHeaderMember) { this.eventHeaderMember = eventHeaderMember; - return this; } - public final void setEventHeaderMember(String eventHeaderMember) { + @Override + @Transient + public final Builder eventHeaderMember(String eventHeaderMember) { this.eventHeaderMember = eventHeaderMember; + return this; } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersrequest.java index 7b68fc584031..bbe32e25d168 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersrequest.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -31,107 +32,107 @@ */ @Generated("software.amazon.awssdk:codegen") public final class NestedContainersRequest extends JsonProtocolTestsRequest implements - ToCopyableBuilder { + ToCopyableBuilder { private static final SdkField>> LIST_OF_LIST_OF_STRINGS_FIELD = SdkField - .>> builder(MarshallingType.LIST) - .memberName("ListOfListOfStrings") - .getter(getter(NestedContainersRequest::listOfListOfStrings)) - .setter(setter(Builder::listOfListOfStrings)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("ListOfListOfStrings").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField.> builder(MarshallingType.LIST) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("member").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField. builder(MarshallingType.STRING) - .traits(LocationTrait.builder() - .location(MarshallLocation.PAYLOAD) - .locationName("member").build()).build()) - .build()).build()).build()).build(); + .>> builder(MarshallingType.LIST) + .memberName("ListOfListOfStrings") + .getter(getter(NestedContainersRequest::listOfListOfStrings)) + .setter(setter(Builder::listOfListOfStrings)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("ListOfListOfStrings").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField.> builder(MarshallingType.LIST) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("member").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField. builder(MarshallingType.STRING) + .traits(LocationTrait.builder() + .location(MarshallLocation.PAYLOAD) + .locationName("member").build()).build()) + .build()).build()).build()).build(); private static final SdkField>>> LIST_OF_LIST_OF_LIST_OF_STRINGS_FIELD = SdkField - .>>> builder(MarshallingType.LIST) - .memberName("ListOfListOfListOfStrings") - .getter(getter(NestedContainersRequest::listOfListOfListOfStrings)) - .setter(setter(Builder::listOfListOfListOfStrings)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("ListOfListOfListOfStrings").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField.>> builder(MarshallingType.LIST) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("member").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField.> builder(MarshallingType.LIST) - .traits(LocationTrait.builder() - .location(MarshallLocation.PAYLOAD) - .locationName("member").build(), - ListTrait + .>>> builder(MarshallingType.LIST) + .memberName("ListOfListOfListOfStrings") + .getter(getter(NestedContainersRequest::listOfListOfListOfStrings)) + .setter(setter(Builder::listOfListOfListOfStrings)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("ListOfListOfListOfStrings").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField.>> builder(MarshallingType.LIST) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("member").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField.> builder(MarshallingType.LIST) + .traits(LocationTrait.builder() + .location(MarshallLocation.PAYLOAD) + .locationName("member").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField. builder( + MarshallingType.STRING) + .traits(LocationTrait .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField. builder( - MarshallingType.STRING) - .traits(LocationTrait - .builder() - .location( - MarshallLocation.PAYLOAD) - .locationName( - "member") - .build()) - .build()).build()) - .build()).build()).build()).build()).build(); + .location( + MarshallLocation.PAYLOAD) + .locationName( + "member") + .build()) + .build()).build()) + .build()).build()).build()).build()).build(); private static final SdkField>>> MAP_OF_STRING_TO_LIST_OF_LIST_OF_STRINGS_FIELD = SdkField - .>>> builder(MarshallingType.MAP) - .memberName("MapOfStringToListOfListOfStrings") - .getter(getter(NestedContainersRequest::mapOfStringToListOfListOfStrings)) - .setter(setter(Builder::mapOfStringToListOfListOfStrings)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MapOfStringToListOfListOfStrings") - .build(), - MapTrait.builder() - .keyLocationName("key") - .valueLocationName("value") - .valueFieldInfo( - SdkField.>> builder(MarshallingType.LIST) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("value").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField.> builder(MarshallingType.LIST) - .traits(LocationTrait.builder() - .location(MarshallLocation.PAYLOAD) - .locationName("member").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField. builder( - MarshallingType.STRING) - .traits(LocationTrait - .builder() - .location( - MarshallLocation.PAYLOAD) - .locationName( - "member") - .build()) - .build()).build()) - .build()).build()).build()).build()).build(); + .>>> builder(MarshallingType.MAP) + .memberName("MapOfStringToListOfListOfStrings") + .getter(getter(NestedContainersRequest::mapOfStringToListOfListOfStrings)) + .setter(setter(Builder::mapOfStringToListOfListOfStrings)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MapOfStringToListOfListOfStrings") + .build(), + MapTrait.builder() + .keyLocationName("key") + .valueLocationName("value") + .valueFieldInfo( + SdkField.>> builder(MarshallingType.LIST) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("value").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField.> builder(MarshallingType.LIST) + .traits(LocationTrait.builder() + .location(MarshallLocation.PAYLOAD) + .locationName("member").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField. builder( + MarshallingType.STRING) + .traits(LocationTrait + .builder() + .location( + MarshallLocation.PAYLOAD) + .locationName( + "member") + .build()) + .build()).build()) + .build()).build()).build()).build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(LIST_OF_LIST_OF_STRINGS_FIELD, - LIST_OF_LIST_OF_LIST_OF_STRINGS_FIELD, MAP_OF_STRING_TO_LIST_OF_LIST_OF_STRINGS_FIELD)); + LIST_OF_LIST_OF_LIST_OF_STRINGS_FIELD, MAP_OF_STRING_TO_LIST_OF_LIST_OF_STRINGS_FIELD)); private final List> listOfListOfStrings; @@ -163,7 +164,7 @@ public final boolean hasListOfListOfStrings() { *

* You can use {@link #hasListOfListOfStrings()} to see if a value was sent in this field. *

- * + * * @return The value of the ListOfListOfStrings property for this object. */ public final List> listOfListOfStrings() { @@ -187,7 +188,7 @@ public final boolean hasListOfListOfListOfStrings() { *

* You can use {@link #hasListOfListOfListOfStrings()} to see if a value was sent in this field. *

- * + * * @return The value of the ListOfListOfListOfStrings property for this object. */ public final List>> listOfListOfListOfStrings() { @@ -211,7 +212,7 @@ public final boolean hasMapOfStringToListOfListOfStrings() { *

* You can use {@link #hasMapOfStringToListOfListOfStrings()} to see if a value was sent in this field. *

- * + * * @return The value of the MapOfStringToListOfListOfStrings property for this object. */ public final Map>> mapOfStringToListOfListOfStrings() { @@ -238,7 +239,7 @@ public final int hashCode() { hashCode = 31 * hashCode + Objects.hashCode(hasListOfListOfStrings() ? listOfListOfStrings() : null); hashCode = 31 * hashCode + Objects.hashCode(hasListOfListOfListOfStrings() ? listOfListOfListOfStrings() : null); hashCode = 31 * hashCode - + Objects.hashCode(hasMapOfStringToListOfListOfStrings() ? mapOfStringToListOfListOfStrings() : null); + + Objects.hashCode(hasMapOfStringToListOfListOfStrings() ? mapOfStringToListOfListOfStrings() : null); return hashCode; } @@ -260,11 +261,11 @@ public final boolean equalsBySdkFields(Object obj) { } NestedContainersRequest other = (NestedContainersRequest) obj; return hasListOfListOfStrings() == other.hasListOfListOfStrings() - && Objects.equals(listOfListOfStrings(), other.listOfListOfStrings()) - && hasListOfListOfListOfStrings() == other.hasListOfListOfListOfStrings() - && Objects.equals(listOfListOfListOfStrings(), other.listOfListOfListOfStrings()) - && hasMapOfStringToListOfListOfStrings() == other.hasMapOfStringToListOfListOfStrings() - && Objects.equals(mapOfStringToListOfListOfStrings(), other.mapOfStringToListOfListOfStrings()); + && Objects.equals(listOfListOfStrings(), other.listOfListOfStrings()) + && hasListOfListOfListOfStrings() == other.hasListOfListOfListOfStrings() + && Objects.equals(listOfListOfListOfStrings(), other.listOfListOfListOfStrings()) + && hasMapOfStringToListOfListOfStrings() == other.hasMapOfStringToListOfListOfStrings() + && Objects.equals(mapOfStringToListOfListOfStrings(), other.mapOfStringToListOfListOfStrings()); } /** @@ -274,23 +275,23 @@ && hasMapOfStringToListOfListOfStrings() == other.hasMapOfStringToListOfListOfSt @Override public final String toString() { return ToString - .builder("NestedContainersRequest") - .add("ListOfListOfStrings", hasListOfListOfStrings() ? listOfListOfStrings() : null) - .add("ListOfListOfListOfStrings", hasListOfListOfListOfStrings() ? listOfListOfListOfStrings() : null) - .add("MapOfStringToListOfListOfStrings", - hasMapOfStringToListOfListOfStrings() ? mapOfStringToListOfListOfStrings() : null).build(); + .builder("NestedContainersRequest") + .add("ListOfListOfStrings", hasListOfListOfStrings() ? listOfListOfStrings() : null) + .add("ListOfListOfListOfStrings", hasListOfListOfListOfStrings() ? listOfListOfListOfStrings() : null) + .add("MapOfStringToListOfListOfStrings", + hasMapOfStringToListOfListOfStrings() ? mapOfStringToListOfListOfStrings() : null).build(); } public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "ListOfListOfStrings": - return Optional.ofNullable(clazz.cast(listOfListOfStrings())); - case "ListOfListOfListOfStrings": - return Optional.ofNullable(clazz.cast(listOfListOfListOfStrings())); - case "MapOfStringToListOfListOfStrings": - return Optional.ofNullable(clazz.cast(mapOfStringToListOfListOfStrings())); - default: - return Optional.empty(); + case "ListOfListOfStrings": + return Optional.ofNullable(clazz.cast(listOfListOfStrings())); + case "ListOfListOfListOfStrings": + return Optional.ofNullable(clazz.cast(listOfListOfListOfStrings())); + case "MapOfStringToListOfListOfStrings": + return Optional.ofNullable(clazz.cast(mapOfStringToListOfListOfStrings())); + default: + return Optional.empty(); } } @@ -352,7 +353,7 @@ public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, Copy * @return Returns a reference to this object so that method calls can be chained together. */ Builder mapOfStringToListOfListOfStrings( - Map>> mapOfStringToListOfListOfStrings); + Map>> mapOfStringToListOfListOfStrings); @Override Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration); @@ -385,23 +386,25 @@ public final Collection> getListOfListOfStrings() { return listOfListOfStrings; } + public final void setListOfListOfStrings(Collection> listOfListOfStrings) { + this.listOfListOfStrings = ListOfListOfStringsCopier.copy(listOfListOfStrings); + } + @Override + @Transient public final Builder listOfListOfStrings(Collection> listOfListOfStrings) { this.listOfListOfStrings = ListOfListOfStringsCopier.copy(listOfListOfStrings); return this; } @Override + @Transient @SafeVarargs public final Builder listOfListOfStrings(Collection... listOfListOfStrings) { listOfListOfStrings(Arrays.asList(listOfListOfStrings)); return this; } - public final void setListOfListOfStrings(Collection> listOfListOfStrings) { - this.listOfListOfStrings = ListOfListOfStringsCopier.copy(listOfListOfStrings); - } - public final Collection>> getListOfListOfListOfStrings() { if (listOfListOfListOfStrings instanceof SdkAutoConstructList) { return null; @@ -409,25 +412,27 @@ public final Collection>> getL return listOfListOfListOfStrings; } + public final void setListOfListOfListOfStrings( + Collection>> listOfListOfListOfStrings) { + this.listOfListOfListOfStrings = ListOfListOfListOfStringsCopier.copy(listOfListOfListOfStrings); + } + @Override + @Transient public final Builder listOfListOfListOfStrings( - Collection>> listOfListOfListOfStrings) { + Collection>> listOfListOfListOfStrings) { this.listOfListOfListOfStrings = ListOfListOfListOfStringsCopier.copy(listOfListOfListOfStrings); return this; } @Override + @Transient @SafeVarargs public final Builder listOfListOfListOfStrings(Collection>... listOfListOfListOfStrings) { listOfListOfListOfStrings(Arrays.asList(listOfListOfListOfStrings)); return this; } - public final void setListOfListOfListOfStrings( - Collection>> listOfListOfListOfStrings) { - this.listOfListOfListOfStrings = ListOfListOfListOfStringsCopier.copy(listOfListOfListOfStrings); - } - public final Map>> getMapOfStringToListOfListOfStrings() { if (mapOfStringToListOfListOfStrings instanceof SdkAutoConstructMap) { return null; @@ -435,16 +440,17 @@ public final void setListOfListOfListOfStrings( return mapOfStringToListOfListOfStrings; } - @Override - public final Builder mapOfStringToListOfListOfStrings( - Map>> mapOfStringToListOfListOfStrings) { + public final void setMapOfStringToListOfListOfStrings( + Map>> mapOfStringToListOfListOfStrings) { this.mapOfStringToListOfListOfStrings = MapOfStringToListOfListOfStringsCopier.copy(mapOfStringToListOfListOfStrings); - return this; } - public final void setMapOfStringToListOfListOfStrings( - Map>> mapOfStringToListOfListOfStrings) { + @Override + @Transient + public final Builder mapOfStringToListOfListOfStrings( + Map>> mapOfStringToListOfListOfStrings) { this.mapOfStringToListOfListOfStrings = MapOfStringToListOfListOfStringsCopier.copy(mapOfStringToListOfListOfStrings); + return this; } @Override @@ -470,4 +476,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersresponse.java index 6ff648142973..f61d2f29d6c9 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersresponse.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -29,107 +30,107 @@ */ @Generated("software.amazon.awssdk:codegen") public final class NestedContainersResponse extends JsonProtocolTestsResponse implements - ToCopyableBuilder { + ToCopyableBuilder { private static final SdkField>> LIST_OF_LIST_OF_STRINGS_FIELD = SdkField - .>> builder(MarshallingType.LIST) - .memberName("ListOfListOfStrings") - .getter(getter(NestedContainersResponse::listOfListOfStrings)) - .setter(setter(Builder::listOfListOfStrings)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("ListOfListOfStrings").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField.> builder(MarshallingType.LIST) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("member").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField. builder(MarshallingType.STRING) - .traits(LocationTrait.builder() - .location(MarshallLocation.PAYLOAD) - .locationName("member").build()).build()) - .build()).build()).build()).build(); + .>> builder(MarshallingType.LIST) + .memberName("ListOfListOfStrings") + .getter(getter(NestedContainersResponse::listOfListOfStrings)) + .setter(setter(Builder::listOfListOfStrings)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("ListOfListOfStrings").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField.> builder(MarshallingType.LIST) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("member").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField. builder(MarshallingType.STRING) + .traits(LocationTrait.builder() + .location(MarshallLocation.PAYLOAD) + .locationName("member").build()).build()) + .build()).build()).build()).build(); private static final SdkField>>> LIST_OF_LIST_OF_LIST_OF_STRINGS_FIELD = SdkField - .>>> builder(MarshallingType.LIST) - .memberName("ListOfListOfListOfStrings") - .getter(getter(NestedContainersResponse::listOfListOfListOfStrings)) - .setter(setter(Builder::listOfListOfListOfStrings)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("ListOfListOfListOfStrings").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField.>> builder(MarshallingType.LIST) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("member").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField.> builder(MarshallingType.LIST) - .traits(LocationTrait.builder() - .location(MarshallLocation.PAYLOAD) - .locationName("member").build(), - ListTrait + .>>> builder(MarshallingType.LIST) + .memberName("ListOfListOfListOfStrings") + .getter(getter(NestedContainersResponse::listOfListOfListOfStrings)) + .setter(setter(Builder::listOfListOfListOfStrings)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("ListOfListOfListOfStrings").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField.>> builder(MarshallingType.LIST) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("member").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField.> builder(MarshallingType.LIST) + .traits(LocationTrait.builder() + .location(MarshallLocation.PAYLOAD) + .locationName("member").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField. builder( + MarshallingType.STRING) + .traits(LocationTrait .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField. builder( - MarshallingType.STRING) - .traits(LocationTrait - .builder() - .location( - MarshallLocation.PAYLOAD) - .locationName( - "member") - .build()) - .build()).build()) - .build()).build()).build()).build()).build(); + .location( + MarshallLocation.PAYLOAD) + .locationName( + "member") + .build()) + .build()).build()) + .build()).build()).build()).build()).build(); private static final SdkField>>> MAP_OF_STRING_TO_LIST_OF_LIST_OF_STRINGS_FIELD = SdkField - .>>> builder(MarshallingType.MAP) - .memberName("MapOfStringToListOfListOfStrings") - .getter(getter(NestedContainersResponse::mapOfStringToListOfListOfStrings)) - .setter(setter(Builder::mapOfStringToListOfListOfStrings)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MapOfStringToListOfListOfStrings") - .build(), - MapTrait.builder() - .keyLocationName("key") - .valueLocationName("value") - .valueFieldInfo( - SdkField.>> builder(MarshallingType.LIST) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) - .locationName("value").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField.> builder(MarshallingType.LIST) - .traits(LocationTrait.builder() - .location(MarshallLocation.PAYLOAD) - .locationName("member").build(), - ListTrait - .builder() - .memberLocationName(null) - .memberFieldInfo( - SdkField. builder( - MarshallingType.STRING) - .traits(LocationTrait - .builder() - .location( - MarshallLocation.PAYLOAD) - .locationName( - "member") - .build()) - .build()).build()) - .build()).build()).build()).build()).build(); + .>>> builder(MarshallingType.MAP) + .memberName("MapOfStringToListOfListOfStrings") + .getter(getter(NestedContainersResponse::mapOfStringToListOfListOfStrings)) + .setter(setter(Builder::mapOfStringToListOfListOfStrings)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MapOfStringToListOfListOfStrings") + .build(), + MapTrait.builder() + .keyLocationName("key") + .valueLocationName("value") + .valueFieldInfo( + SdkField.>> builder(MarshallingType.LIST) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) + .locationName("value").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField.> builder(MarshallingType.LIST) + .traits(LocationTrait.builder() + .location(MarshallLocation.PAYLOAD) + .locationName("member").build(), + ListTrait + .builder() + .memberLocationName(null) + .memberFieldInfo( + SdkField. builder( + MarshallingType.STRING) + .traits(LocationTrait + .builder() + .location( + MarshallLocation.PAYLOAD) + .locationName( + "member") + .build()) + .build()).build()) + .build()).build()).build()).build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(LIST_OF_LIST_OF_STRINGS_FIELD, - LIST_OF_LIST_OF_LIST_OF_STRINGS_FIELD, MAP_OF_STRING_TO_LIST_OF_LIST_OF_STRINGS_FIELD)); + LIST_OF_LIST_OF_LIST_OF_STRINGS_FIELD, MAP_OF_STRING_TO_LIST_OF_LIST_OF_STRINGS_FIELD)); private final List> listOfListOfStrings; @@ -161,7 +162,7 @@ public final boolean hasListOfListOfStrings() { *

* You can use {@link #hasListOfListOfStrings()} to see if a value was sent in this field. *

- * + * * @return The value of the ListOfListOfStrings property for this object. */ public final List> listOfListOfStrings() { @@ -185,7 +186,7 @@ public final boolean hasListOfListOfListOfStrings() { *

* You can use {@link #hasListOfListOfListOfStrings()} to see if a value was sent in this field. *

- * + * * @return The value of the ListOfListOfListOfStrings property for this object. */ public final List>> listOfListOfListOfStrings() { @@ -209,7 +210,7 @@ public final boolean hasMapOfStringToListOfListOfStrings() { *

* You can use {@link #hasMapOfStringToListOfListOfStrings()} to see if a value was sent in this field. *

- * + * * @return The value of the MapOfStringToListOfListOfStrings property for this object. */ public final Map>> mapOfStringToListOfListOfStrings() { @@ -236,7 +237,7 @@ public final int hashCode() { hashCode = 31 * hashCode + Objects.hashCode(hasListOfListOfStrings() ? listOfListOfStrings() : null); hashCode = 31 * hashCode + Objects.hashCode(hasListOfListOfListOfStrings() ? listOfListOfListOfStrings() : null); hashCode = 31 * hashCode - + Objects.hashCode(hasMapOfStringToListOfListOfStrings() ? mapOfStringToListOfListOfStrings() : null); + + Objects.hashCode(hasMapOfStringToListOfListOfStrings() ? mapOfStringToListOfListOfStrings() : null); return hashCode; } @@ -258,11 +259,11 @@ public final boolean equalsBySdkFields(Object obj) { } NestedContainersResponse other = (NestedContainersResponse) obj; return hasListOfListOfStrings() == other.hasListOfListOfStrings() - && Objects.equals(listOfListOfStrings(), other.listOfListOfStrings()) - && hasListOfListOfListOfStrings() == other.hasListOfListOfListOfStrings() - && Objects.equals(listOfListOfListOfStrings(), other.listOfListOfListOfStrings()) - && hasMapOfStringToListOfListOfStrings() == other.hasMapOfStringToListOfListOfStrings() - && Objects.equals(mapOfStringToListOfListOfStrings(), other.mapOfStringToListOfListOfStrings()); + && Objects.equals(listOfListOfStrings(), other.listOfListOfStrings()) + && hasListOfListOfListOfStrings() == other.hasListOfListOfListOfStrings() + && Objects.equals(listOfListOfListOfStrings(), other.listOfListOfListOfStrings()) + && hasMapOfStringToListOfListOfStrings() == other.hasMapOfStringToListOfListOfStrings() + && Objects.equals(mapOfStringToListOfListOfStrings(), other.mapOfStringToListOfListOfStrings()); } /** @@ -272,23 +273,23 @@ && hasMapOfStringToListOfListOfStrings() == other.hasMapOfStringToListOfListOfSt @Override public final String toString() { return ToString - .builder("NestedContainersResponse") - .add("ListOfListOfStrings", hasListOfListOfStrings() ? listOfListOfStrings() : null) - .add("ListOfListOfListOfStrings", hasListOfListOfListOfStrings() ? listOfListOfListOfStrings() : null) - .add("MapOfStringToListOfListOfStrings", - hasMapOfStringToListOfListOfStrings() ? mapOfStringToListOfListOfStrings() : null).build(); + .builder("NestedContainersResponse") + .add("ListOfListOfStrings", hasListOfListOfStrings() ? listOfListOfStrings() : null) + .add("ListOfListOfListOfStrings", hasListOfListOfListOfStrings() ? listOfListOfListOfStrings() : null) + .add("MapOfStringToListOfListOfStrings", + hasMapOfStringToListOfListOfStrings() ? mapOfStringToListOfListOfStrings() : null).build(); } public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "ListOfListOfStrings": - return Optional.ofNullable(clazz.cast(listOfListOfStrings())); - case "ListOfListOfListOfStrings": - return Optional.ofNullable(clazz.cast(listOfListOfListOfStrings())); - case "MapOfStringToListOfListOfStrings": - return Optional.ofNullable(clazz.cast(mapOfStringToListOfListOfStrings())); - default: - return Optional.empty(); + case "ListOfListOfStrings": + return Optional.ofNullable(clazz.cast(listOfListOfStrings())); + case "ListOfListOfListOfStrings": + return Optional.ofNullable(clazz.cast(listOfListOfListOfStrings())); + case "MapOfStringToListOfListOfStrings": + return Optional.ofNullable(clazz.cast(mapOfStringToListOfListOfStrings())); + default: + return Optional.empty(); } } @@ -306,7 +307,7 @@ private static BiConsumer setter(BiConsumer s) { } public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, - CopyableBuilder { + CopyableBuilder { /** * Sets the value of the ListOfListOfStrings property for this object. * @@ -351,7 +352,7 @@ public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, * @return Returns a reference to this object so that method calls can be chained together. */ Builder mapOfStringToListOfListOfStrings( - Map>> mapOfStringToListOfListOfStrings); + Map>> mapOfStringToListOfListOfStrings); } static final class BuilderImpl extends JsonProtocolTestsResponse.BuilderImpl implements Builder { @@ -378,23 +379,25 @@ public final Collection> getListOfListOfStrings() { return listOfListOfStrings; } + public final void setListOfListOfStrings(Collection> listOfListOfStrings) { + this.listOfListOfStrings = ListOfListOfStringsCopier.copy(listOfListOfStrings); + } + @Override + @Transient public final Builder listOfListOfStrings(Collection> listOfListOfStrings) { this.listOfListOfStrings = ListOfListOfStringsCopier.copy(listOfListOfStrings); return this; } @Override + @Transient @SafeVarargs public final Builder listOfListOfStrings(Collection... listOfListOfStrings) { listOfListOfStrings(Arrays.asList(listOfListOfStrings)); return this; } - public final void setListOfListOfStrings(Collection> listOfListOfStrings) { - this.listOfListOfStrings = ListOfListOfStringsCopier.copy(listOfListOfStrings); - } - public final Collection>> getListOfListOfListOfStrings() { if (listOfListOfListOfStrings instanceof SdkAutoConstructList) { return null; @@ -402,25 +405,27 @@ public final Collection>> getL return listOfListOfListOfStrings; } + public final void setListOfListOfListOfStrings( + Collection>> listOfListOfListOfStrings) { + this.listOfListOfListOfStrings = ListOfListOfListOfStringsCopier.copy(listOfListOfListOfStrings); + } + @Override + @Transient public final Builder listOfListOfListOfStrings( - Collection>> listOfListOfListOfStrings) { + Collection>> listOfListOfListOfStrings) { this.listOfListOfListOfStrings = ListOfListOfListOfStringsCopier.copy(listOfListOfListOfStrings); return this; } @Override + @Transient @SafeVarargs public final Builder listOfListOfListOfStrings(Collection>... listOfListOfListOfStrings) { listOfListOfListOfStrings(Arrays.asList(listOfListOfListOfStrings)); return this; } - public final void setListOfListOfListOfStrings( - Collection>> listOfListOfListOfStrings) { - this.listOfListOfListOfStrings = ListOfListOfListOfStringsCopier.copy(listOfListOfListOfStrings); - } - public final Map>> getMapOfStringToListOfListOfStrings() { if (mapOfStringToListOfListOfStrings instanceof SdkAutoConstructMap) { return null; @@ -428,16 +433,17 @@ public final void setListOfListOfListOfStrings( return mapOfStringToListOfListOfStrings; } - @Override - public final Builder mapOfStringToListOfListOfStrings( - Map>> mapOfStringToListOfListOfStrings) { + public final void setMapOfStringToListOfListOfStrings( + Map>> mapOfStringToListOfListOfStrings) { this.mapOfStringToListOfListOfStrings = MapOfStringToListOfListOfStringsCopier.copy(mapOfStringToListOfListOfStrings); - return this; } - public final void setMapOfStringToListOfListOfStrings( - Map>> mapOfStringToListOfListOfStrings) { + @Override + @Transient + public final Builder mapOfStringToListOfListOfStrings( + Map>> mapOfStringToListOfListOfStrings) { this.mapOfStringToListOfListOfStrings = MapOfStringToListOfListOfStringsCopier.copy(mapOfStringToListOfListOfStrings); + return this; } @Override @@ -451,4 +457,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberrequest.java new file mode 100644 index 000000000000..b23bb0971067 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberrequest.java @@ -0,0 +1,309 @@ +package software.amazon.awssdk.services.jsonprotocoltests.model; + +import java.beans.Transient; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.core.protocol.MarshallLocation; +import software.amazon.awssdk.core.protocol.MarshallingType; +import software.amazon.awssdk.core.traits.LocationTrait; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + */ +@Generated("software.amazon.awssdk:codegen") +public final class OperationWithDeprecatedMemberRequest extends JsonProtocolTestsRequest implements + ToCopyableBuilder { + private static final SdkField MEMBER_MODELED_AS_DEPRECATED_FIELD = SdkField. builder(MarshallingType.STRING) + .memberName("MemberModeledAsDeprecated") + .getter(getter(OperationWithDeprecatedMemberRequest::memberModeledAsDeprecated)) + .setter(setter(Builder::memberModeledAsDeprecated)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MemberModeledAsDeprecated").build()) + .build(); + + private static final SdkField MEMBER_MODIFIED_AS_DEPRECATED_FIELD = SdkField + . builder(MarshallingType.STRING) + .memberName("MemberModifiedAsDeprecated") + .getter(getter(OperationWithDeprecatedMemberRequest::memberModifiedAsDeprecated)) + .setter(setter(Builder::memberModifiedAsDeprecated)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MemberModifiedAsDeprecated").build()) + .build(); + + private static final SdkField UNDEPRECATED_MEMBER_FIELD = SdkField. builder(MarshallingType.STRING) + .memberName("UndeprecatedMember").getter(getter(OperationWithDeprecatedMemberRequest::undeprecatedMember)) + .setter(setter(Builder::undeprecatedMember)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("UndeprecatedMember").build()) + .build(); + + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList( + MEMBER_MODELED_AS_DEPRECATED_FIELD, MEMBER_MODIFIED_AS_DEPRECATED_FIELD, UNDEPRECATED_MEMBER_FIELD)); + + private final String memberModeledAsDeprecated; + + private final String memberModifiedAsDeprecated; + + private final String undeprecatedMember; + + private OperationWithDeprecatedMemberRequest(BuilderImpl builder) { + super(builder); + this.memberModeledAsDeprecated = builder.memberModeledAsDeprecated; + this.memberModifiedAsDeprecated = builder.memberModifiedAsDeprecated; + this.undeprecatedMember = builder.undeprecatedMember; + } + + /** + * Returns the value of the MemberModeledAsDeprecated property for this object. + * + * @return The value of the MemberModeledAsDeprecated property for this object. + * @deprecated This field is modeled as deprecated. + */ + @Deprecated + public final String memberModeledAsDeprecated() { + return memberModeledAsDeprecated; + } + + /** + * Returns the value of the MemberModifiedAsDeprecated property for this object. + * + * @return The value of the MemberModifiedAsDeprecated property for this object. + * @deprecated This field is modified as deprecated. + */ + @Deprecated + public final String memberModifiedAsDeprecated() { + return memberModifiedAsDeprecated; + } + + /** + * Returns the value of the UndeprecatedMember property for this object. + * + * @return The value of the UndeprecatedMember property for this object. + */ + public final String undeprecatedMember() { + return undeprecatedMember; + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public final int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + super.hashCode(); + hashCode = 31 * hashCode + Objects.hashCode(memberModeledAsDeprecated()); + hashCode = 31 * hashCode + Objects.hashCode(memberModifiedAsDeprecated()); + hashCode = 31 * hashCode + Objects.hashCode(undeprecatedMember()); + return hashCode; + } + + @Override + public final boolean equals(Object obj) { + return super.equals(obj) && equalsBySdkFields(obj); + } + + @Override + public final boolean equalsBySdkFields(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof OperationWithDeprecatedMemberRequest)) { + return false; + } + OperationWithDeprecatedMemberRequest other = (OperationWithDeprecatedMemberRequest) obj; + return Objects.equals(memberModeledAsDeprecated(), other.memberModeledAsDeprecated()) + && Objects.equals(memberModifiedAsDeprecated(), other.memberModifiedAsDeprecated()) + && Objects.equals(undeprecatedMember(), other.undeprecatedMember()); + } + + /** + * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be + * redacted from this string using a placeholder value. + */ + @Override + public final String toString() { + return ToString.builder("OperationWithDeprecatedMemberRequest") + .add("MemberModeledAsDeprecated", memberModeledAsDeprecated()) + .add("MemberModifiedAsDeprecated", memberModifiedAsDeprecated()).add("UndeprecatedMember", undeprecatedMember()) + .build(); + } + + public final Optional getValueForField(String fieldName, Class clazz) { + switch (fieldName) { + case "MemberModeledAsDeprecated": + return Optional.ofNullable(clazz.cast(memberModeledAsDeprecated())); + case "MemberModifiedAsDeprecated": + return Optional.ofNullable(clazz.cast(memberModifiedAsDeprecated())); + case "UndeprecatedMember": + return Optional.ofNullable(clazz.cast(undeprecatedMember())); + default: + return Optional.empty(); + } + } + + @Override + public final List> sdkFields() { + return SDK_FIELDS; + } + + private static Function getter(Function g) { + return obj -> g.apply((OperationWithDeprecatedMemberRequest) obj); + } + + private static BiConsumer setter(BiConsumer s) { + return (obj, val) -> s.accept((Builder) obj, val); + } + + public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, + CopyableBuilder { + /** + * Sets the value of the MemberModeledAsDeprecated property for this object. + * + * @param memberModeledAsDeprecated + * The new value for the MemberModeledAsDeprecated property for this object. + * @return Returns a reference to this object so that method calls can be chained together. + * @deprecated This field is modeled as deprecated. + */ + @Deprecated + Builder memberModeledAsDeprecated(String memberModeledAsDeprecated); + + /** + * Sets the value of the MemberModifiedAsDeprecated property for this object. + * + * @param memberModifiedAsDeprecated + * The new value for the MemberModifiedAsDeprecated property for this object. + * @return Returns a reference to this object so that method calls can be chained together. + * @deprecated This field is modified as deprecated. + */ + @Deprecated + Builder memberModifiedAsDeprecated(String memberModifiedAsDeprecated); + + /** + * Sets the value of the UndeprecatedMember property for this object. + * + * @param undeprecatedMember + * The new value for the UndeprecatedMember property for this object. + * @return Returns a reference to this object so that method calls can be chained together. + */ + Builder undeprecatedMember(String undeprecatedMember); + + @Override + Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration); + + @Override + Builder overrideConfiguration(Consumer builderConsumer); + } + + static final class BuilderImpl extends JsonProtocolTestsRequest.BuilderImpl implements Builder { + private String memberModeledAsDeprecated; + + private String memberModifiedAsDeprecated; + + private String undeprecatedMember; + + private BuilderImpl() { + } + + private BuilderImpl(OperationWithDeprecatedMemberRequest model) { + super(model); + memberModeledAsDeprecated(model.memberModeledAsDeprecated); + memberModifiedAsDeprecated(model.memberModifiedAsDeprecated); + undeprecatedMember(model.undeprecatedMember); + } + + @Deprecated + public final String getMemberModeledAsDeprecated() { + return memberModeledAsDeprecated; + } + + @Deprecated + public final void setMemberModeledAsDeprecated(String memberModeledAsDeprecated) { + this.memberModeledAsDeprecated = memberModeledAsDeprecated; + } + + @Override + @Transient + @Deprecated + public final Builder memberModeledAsDeprecated(String memberModeledAsDeprecated) { + this.memberModeledAsDeprecated = memberModeledAsDeprecated; + return this; + } + + @Deprecated + public final String getMemberModifiedAsDeprecated() { + return memberModifiedAsDeprecated; + } + + @Deprecated + public final void setMemberModifiedAsDeprecated(String memberModifiedAsDeprecated) { + this.memberModifiedAsDeprecated = memberModifiedAsDeprecated; + } + + @Override + @Transient + @Deprecated + public final Builder memberModifiedAsDeprecated(String memberModifiedAsDeprecated) { + this.memberModifiedAsDeprecated = memberModifiedAsDeprecated; + return this; + } + + public final String getUndeprecatedMember() { + return undeprecatedMember; + } + + public final void setUndeprecatedMember(String undeprecatedMember) { + this.undeprecatedMember = undeprecatedMember; + } + + @Override + @Transient + public final Builder undeprecatedMember(String undeprecatedMember) { + this.undeprecatedMember = undeprecatedMember; + return this; + } + + @Override + public Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration) { + super.overrideConfiguration(overrideConfiguration); + return this; + } + + @Override + public Builder overrideConfiguration(Consumer builderConsumer) { + super.overrideConfiguration(builderConsumer); + return this; + } + + @Override + public OperationWithDeprecatedMemberRequest build() { + return new OperationWithDeprecatedMemberRequest(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberresponse.java new file mode 100644 index 000000000000..d9e598b9b93a --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberresponse.java @@ -0,0 +1,282 @@ +package software.amazon.awssdk.services.jsonprotocoltests.model; + +import java.beans.Transient; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.function.BiConsumer; +import java.util.function.Function; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.core.protocol.MarshallLocation; +import software.amazon.awssdk.core.protocol.MarshallingType; +import software.amazon.awssdk.core.traits.LocationTrait; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + */ +@Generated("software.amazon.awssdk:codegen") +public final class OperationWithDeprecatedMemberResponse extends JsonProtocolTestsResponse implements + ToCopyableBuilder { + private static final SdkField MEMBER_MODELED_AS_DEPRECATED_FIELD = SdkField. builder(MarshallingType.STRING) + .memberName("MemberModeledAsDeprecated") + .getter(getter(OperationWithDeprecatedMemberResponse::memberModeledAsDeprecated)) + .setter(setter(Builder::memberModeledAsDeprecated)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MemberModeledAsDeprecated").build()) + .build(); + + private static final SdkField MEMBER_MODIFIED_AS_DEPRECATED_FIELD = SdkField + . builder(MarshallingType.STRING) + .memberName("MemberModifiedAsDeprecated") + .getter(getter(OperationWithDeprecatedMemberResponse::memberModifiedAsDeprecated)) + .setter(setter(Builder::memberModifiedAsDeprecated)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MemberModifiedAsDeprecated").build()) + .build(); + + private static final SdkField UNDEPRECATED_MEMBER_FIELD = SdkField. builder(MarshallingType.STRING) + .memberName("UndeprecatedMember").getter(getter(OperationWithDeprecatedMemberResponse::undeprecatedMember)) + .setter(setter(Builder::undeprecatedMember)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("UndeprecatedMember").build()) + .build(); + + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList( + MEMBER_MODELED_AS_DEPRECATED_FIELD, MEMBER_MODIFIED_AS_DEPRECATED_FIELD, UNDEPRECATED_MEMBER_FIELD)); + + private final String memberModeledAsDeprecated; + + private final String memberModifiedAsDeprecated; + + private final String undeprecatedMember; + + private OperationWithDeprecatedMemberResponse(BuilderImpl builder) { + super(builder); + this.memberModeledAsDeprecated = builder.memberModeledAsDeprecated; + this.memberModifiedAsDeprecated = builder.memberModifiedAsDeprecated; + this.undeprecatedMember = builder.undeprecatedMember; + } + + /** + * Returns the value of the MemberModeledAsDeprecated property for this object. + * + * @return The value of the MemberModeledAsDeprecated property for this object. + * @deprecated This field is modeled as deprecated. + */ + @Deprecated + public final String memberModeledAsDeprecated() { + return memberModeledAsDeprecated; + } + + /** + * Returns the value of the MemberModifiedAsDeprecated property for this object. + * + * @return The value of the MemberModifiedAsDeprecated property for this object. + */ + public final String memberModifiedAsDeprecated() { + return memberModifiedAsDeprecated; + } + + /** + * Returns the value of the UndeprecatedMember property for this object. + * + * @return The value of the UndeprecatedMember property for this object. + */ + public final String undeprecatedMember() { + return undeprecatedMember; + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public final int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + super.hashCode(); + hashCode = 31 * hashCode + Objects.hashCode(memberModeledAsDeprecated()); + hashCode = 31 * hashCode + Objects.hashCode(memberModifiedAsDeprecated()); + hashCode = 31 * hashCode + Objects.hashCode(undeprecatedMember()); + return hashCode; + } + + @Override + public final boolean equals(Object obj) { + return super.equals(obj) && equalsBySdkFields(obj); + } + + @Override + public final boolean equalsBySdkFields(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof OperationWithDeprecatedMemberResponse)) { + return false; + } + OperationWithDeprecatedMemberResponse other = (OperationWithDeprecatedMemberResponse) obj; + return Objects.equals(memberModeledAsDeprecated(), other.memberModeledAsDeprecated()) + && Objects.equals(memberModifiedAsDeprecated(), other.memberModifiedAsDeprecated()) + && Objects.equals(undeprecatedMember(), other.undeprecatedMember()); + } + + /** + * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be + * redacted from this string using a placeholder value. + */ + @Override + public final String toString() { + return ToString.builder("OperationWithDeprecatedMemberResponse") + .add("MemberModeledAsDeprecated", memberModeledAsDeprecated()) + .add("MemberModifiedAsDeprecated", memberModifiedAsDeprecated()).add("UndeprecatedMember", undeprecatedMember()) + .build(); + } + + public final Optional getValueForField(String fieldName, Class clazz) { + switch (fieldName) { + case "MemberModeledAsDeprecated": + return Optional.ofNullable(clazz.cast(memberModeledAsDeprecated())); + case "MemberModifiedAsDeprecated": + return Optional.ofNullable(clazz.cast(memberModifiedAsDeprecated())); + case "UndeprecatedMember": + return Optional.ofNullable(clazz.cast(undeprecatedMember())); + default: + return Optional.empty(); + } + } + + @Override + public final List> sdkFields() { + return SDK_FIELDS; + } + + private static Function getter(Function g) { + return obj -> g.apply((OperationWithDeprecatedMemberResponse) obj); + } + + private static BiConsumer setter(BiConsumer s) { + return (obj, val) -> s.accept((Builder) obj, val); + } + + public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, + CopyableBuilder { + /** + * Sets the value of the MemberModeledAsDeprecated property for this object. + * + * @param memberModeledAsDeprecated + * The new value for the MemberModeledAsDeprecated property for this object. + * @return Returns a reference to this object so that method calls can be chained together. + * @deprecated This field is modeled as deprecated. + */ + @Deprecated + Builder memberModeledAsDeprecated(String memberModeledAsDeprecated); + + /** + * Sets the value of the MemberModifiedAsDeprecated property for this object. + * + * @param memberModifiedAsDeprecated + * The new value for the MemberModifiedAsDeprecated property for this object. + * @return Returns a reference to this object so that method calls can be chained together. + */ + Builder memberModifiedAsDeprecated(String memberModifiedAsDeprecated); + + /** + * Sets the value of the UndeprecatedMember property for this object. + * + * @param undeprecatedMember + * The new value for the UndeprecatedMember property for this object. + * @return Returns a reference to this object so that method calls can be chained together. + */ + Builder undeprecatedMember(String undeprecatedMember); + } + + static final class BuilderImpl extends JsonProtocolTestsResponse.BuilderImpl implements Builder { + private String memberModeledAsDeprecated; + + private String memberModifiedAsDeprecated; + + private String undeprecatedMember; + + private BuilderImpl() { + } + + private BuilderImpl(OperationWithDeprecatedMemberResponse model) { + super(model); + memberModeledAsDeprecated(model.memberModeledAsDeprecated); + memberModifiedAsDeprecated(model.memberModifiedAsDeprecated); + undeprecatedMember(model.undeprecatedMember); + } + + @Deprecated + public final String getMemberModeledAsDeprecated() { + return memberModeledAsDeprecated; + } + + @Deprecated + public final void setMemberModeledAsDeprecated(String memberModeledAsDeprecated) { + this.memberModeledAsDeprecated = memberModeledAsDeprecated; + } + + @Override + @Transient + @Deprecated + public final Builder memberModeledAsDeprecated(String memberModeledAsDeprecated) { + this.memberModeledAsDeprecated = memberModeledAsDeprecated; + return this; + } + + public final String getMemberModifiedAsDeprecated() { + return memberModifiedAsDeprecated; + } + + public final void setMemberModifiedAsDeprecated(String memberModifiedAsDeprecated) { + this.memberModifiedAsDeprecated = memberModifiedAsDeprecated; + } + + @Override + @Transient + public final Builder memberModifiedAsDeprecated(String memberModifiedAsDeprecated) { + this.memberModifiedAsDeprecated = memberModifiedAsDeprecated; + return this; + } + + public final String getUndeprecatedMember() { + return undeprecatedMember; + } + + public final void setUndeprecatedMember(String undeprecatedMember) { + this.undeprecatedMember = undeprecatedMember; + } + + @Override + @Transient + public final Builder undeprecatedMember(String undeprecatedMember) { + this.undeprecatedMember = undeprecatedMember; + return this; + } + + @Override + public OperationWithDeprecatedMemberResponse build() { + return new OperationWithDeprecatedMemberResponse(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivestructtype.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivestructtype.java index 8cffc78d21c3..1ab7ca362428 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivestructtype.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivestructtype.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.io.Serializable; import java.util.Arrays; import java.util.Collection; @@ -346,30 +347,32 @@ public final String getNoRecurse() { return noRecurse; } - @Override - public final Builder noRecurse(String noRecurse) { + public final void setNoRecurse(String noRecurse) { this.noRecurse = noRecurse; - return this; } - public final void setNoRecurse(String noRecurse) { + @Override + @Transient + public final Builder noRecurse(String noRecurse) { this.noRecurse = noRecurse; + return this; } public final Builder getRecursiveStruct() { return recursiveStruct != null ? recursiveStruct.toBuilder() : null; } + public final void setRecursiveStruct(BuilderImpl recursiveStruct) { + this.recursiveStruct = recursiveStruct != null ? recursiveStruct.build() : null; + } + @Override + @Transient public final Builder recursiveStruct(RecursiveStructType recursiveStruct) { this.recursiveStruct = recursiveStruct; return this; } - public final void setRecursiveStruct(BuilderImpl recursiveStruct) { - this.recursiveStruct = recursiveStruct != null ? recursiveStruct.build() : null; - } - public final List getRecursiveList() { List result = RecursiveListTypeCopier.copyToBuilder(this.recursiveList); if (result instanceof SdkAutoConstructList) { @@ -378,13 +381,19 @@ public final List getRecursiveList() { return result; } + public final void setRecursiveList(Collection recursiveList) { + this.recursiveList = RecursiveListTypeCopier.copyFromBuilder(recursiveList); + } + @Override + @Transient public final Builder recursiveList(Collection recursiveList) { this.recursiveList = RecursiveListTypeCopier.copy(recursiveList); return this; } @Override + @Transient @SafeVarargs public final Builder recursiveList(RecursiveStructType... recursiveList) { recursiveList(Arrays.asList(recursiveList)); @@ -392,6 +401,7 @@ public final Builder recursiveList(RecursiveStructType... recursiveList) { } @Override + @Transient @SafeVarargs public final Builder recursiveList(Consumer... recursiveList) { recursiveList(Stream.of(recursiveList).map(c -> RecursiveStructType.builder().applyMutation(c).build()) @@ -399,10 +409,6 @@ public final Builder recursiveList(Consumer... recursiveList) { return this; } - public final void setRecursiveList(Collection recursiveList) { - this.recursiveList = RecursiveListTypeCopier.copyFromBuilder(recursiveList); - } - public final Map getRecursiveMap() { Map result = RecursiveMapTypeCopier.copyToBuilder(this.recursiveMap); if (result instanceof SdkAutoConstructMap) { @@ -411,16 +417,17 @@ public final Map getRecursiveMap() { return result; } + public final void setRecursiveMap(Map recursiveMap) { + this.recursiveMap = RecursiveMapTypeCopier.copyFromBuilder(recursiveMap); + } + @Override + @Transient public final Builder recursiveMap(Map recursiveMap) { this.recursiveMap = RecursiveMapTypeCopier.copy(recursiveMap); return this; } - public final void setRecursiveMap(Map recursiveMap) { - this.recursiveMap = RecursiveMapTypeCopier.copyFromBuilder(recursiveMap); - } - @Override public RecursiveStructType build() { return new RecursiveStructType(this); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/service-2.json index e2d000c40423..d25f85123ea5 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/service-2.json @@ -51,6 +51,15 @@ "input": {"shape": "DeprecatedRenameRequest"}, "output": {"shape": "DeprecatedRenameResponse"} }, + "OperationWithDeprecatedMember": { + "name": "OperationWithDeprecatedMember", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": {"shape": "OperationWithDeprecatedMemberRequest"}, + "output": {"shape": "OperationWithDeprecatedMemberResponse"} + }, "StreamingInputOperation":{ "name":"StreamingInputOperation", "http":{ @@ -179,6 +188,30 @@ "OriginalNameDeprecated": {"shape": "String"} } }, + "OperationWithDeprecatedMemberRequest":{ + "type": "structure", + "members": { + "MemberModeledAsDeprecated": { + "shape": "String", + "deprecated": true, + "deprecatedMessage": "This field is modeled as deprecated." + }, + "MemberModifiedAsDeprecated":{"shape": "String"}, + "UndeprecatedMember": {"shape": "String"} + } + }, + "OperationWithDeprecatedMemberResponse":{ + "type": "structure", + "members": { + "MemberModeledAsDeprecated": { + "shape": "String", + "deprecated": true, + "deprecatedMessage": "This field is modeled as deprecated." + }, + "MemberModifiedAsDeprecated":{"shape": "String"}, + "UndeprecatedMember": {"shape": "String"} + } + }, "Double":{"type":"double"}, "EmptyModeledException":{ "type":"structure", diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonresponse.java index a37e7b6b73bb..4fe4d7f604c8 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonresponse.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.sharedeventstream.model; +import java.beans.Transient; import java.time.Instant; import java.util.Arrays; import java.util.Collections; @@ -23,14 +24,14 @@ */ @Generated("software.amazon.awssdk:codegen") public class GetRandomPersonResponse extends SharedEventStreamResponse implements - ToCopyableBuilder { + ToCopyableBuilder { private static final SdkField NAME_FIELD = SdkField. builder(MarshallingType.STRING).memberName("Name") - .getter(getter(GetRandomPersonResponse::name)).setter(setter(Builder::name)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Name").build()).build(); + .getter(getter(GetRandomPersonResponse::name)).setter(setter(Builder::name)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Name").build()).build(); private static final SdkField BIRTHDAY_FIELD = SdkField. builder(MarshallingType.INSTANT) - .memberName("Birthday").getter(getter(GetRandomPersonResponse::birthday)).setter(setter(Builder::birthday)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Birthday").build()).build(); + .memberName("Birthday").getter(getter(GetRandomPersonResponse::birthday)).setter(setter(Builder::birthday)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Birthday").build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(NAME_FIELD, BIRTHDAY_FIELD)); @@ -46,7 +47,7 @@ protected GetRandomPersonResponse(BuilderImpl builder) { /** * Returns the value of the Name property for this object. - * + * * @return The value of the Name property for this object. */ public final String name() { @@ -55,7 +56,7 @@ public final String name() { /** * Returns the value of the Birthday property for this object. - * + * * @return The value of the Birthday property for this object. */ public final Instant birthday() { @@ -115,12 +116,12 @@ public final String toString() { public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "Name": - return Optional.ofNullable(clazz.cast(name())); - case "Birthday": - return Optional.ofNullable(clazz.cast(birthday())); - default: - return Optional.empty(); + case "Name": + return Optional.ofNullable(clazz.cast(name())); + case "Birthday": + return Optional.ofNullable(clazz.cast(birthday())); + default: + return Optional.empty(); } } @@ -143,7 +144,7 @@ private static BiConsumer setter(BiConsumer s) { } public interface Builder extends SharedEventStreamResponse.Builder, SdkPojo, - CopyableBuilder { + CopyableBuilder { /** * Sets the value of the Name property for this object. * @@ -181,28 +182,30 @@ public final String getName() { return name; } - @Override - public final Builder name(String name) { + public final void setName(String name) { this.name = name; - return this; } - public final void setName(String name) { + @Override + @Transient + public final Builder name(String name) { this.name = name; + return this; } public final Instant getBirthday() { return birthday; } - @Override - public final Builder birthday(Instant birthday) { + public final void setBirthday(Instant birthday) { this.birthday = birthday; - return this; } - public final void setBirthday(Instant birthday) { + @Override + @Transient + public final Builder birthday(Instant birthday) { this.birthday = birthday; + return this; } @Override @@ -216,4 +219,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/person.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/person.java index ef1a7c98673e..9e774466bb74 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/person.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/person.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.sharedeventstream.model; +import java.beans.Transient; import java.io.Serializable; import java.time.Instant; import java.util.Arrays; @@ -25,12 +26,12 @@ @Generated("software.amazon.awssdk:codegen") public class Person implements SdkPojo, Serializable, ToCopyableBuilder, EventStream { private static final SdkField NAME_FIELD = SdkField. builder(MarshallingType.STRING).memberName("Name") - .getter(getter(Person::name)).setter(setter(Builder::name)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Name").build()).build(); + .getter(getter(Person::name)).setter(setter(Builder::name)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Name").build()).build(); private static final SdkField BIRTHDAY_FIELD = SdkField. builder(MarshallingType.INSTANT) - .memberName("Birthday").getter(getter(Person::birthday)).setter(setter(Builder::birthday)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Birthday").build()).build(); + .memberName("Birthday").getter(getter(Person::birthday)).setter(setter(Builder::birthday)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Birthday").build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(NAME_FIELD, BIRTHDAY_FIELD)); @@ -47,7 +48,7 @@ protected Person(BuilderImpl builder) { /** * Returns the value of the Name property for this object. - * + * * @return The value of the Name property for this object. */ public final String name() { @@ -56,7 +57,7 @@ public final String name() { /** * Returns the value of the Birthday property for this object. - * + * * @return The value of the Birthday property for this object. */ public final Instant birthday() { @@ -115,12 +116,12 @@ public final String toString() { public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "Name": - return Optional.ofNullable(clazz.cast(name())); - case "Birthday": - return Optional.ofNullable(clazz.cast(birthday())); - default: - return Optional.empty(); + case "Name": + return Optional.ofNullable(clazz.cast(name())); + case "Birthday": + return Optional.ofNullable(clazz.cast(birthday())); + default: + return Optional.empty(); } } @@ -201,28 +202,30 @@ public final String getName() { return name; } - @Override - public final Builder name(String name) { + public final void setName(String name) { this.name = name; - return this; } - public final void setName(String name) { + @Override + @Transient + public final Builder name(String name) { this.name = name; + return this; } public final Instant getBirthday() { return birthday; } - @Override - public final Builder birthday(Instant birthday) { + public final void setBirthday(Instant birthday) { this.birthday = birthday; - return this; } - public final void setBirthday(Instant birthday) { + @Override + @Transient + public final Builder birthday(Instant birthday) { this.birthday = birthday; + return this; } @Override @@ -236,4 +239,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/simplestruct.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/simplestruct.java index 131406dee962..f611640b997a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/simplestruct.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/simplestruct.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.io.Serializable; import java.util.Arrays; import java.util.Collections; @@ -23,8 +24,8 @@ @Generated("software.amazon.awssdk:codegen") public final class SimpleStruct implements SdkPojo, Serializable, ToCopyableBuilder { private static final SdkField STRING_MEMBER_FIELD = SdkField. builder(MarshallingType.STRING) - .memberName("StringMember").getter(getter(SimpleStruct::stringMember)).setter(setter(Builder::stringMember)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("StringMember").build()).build(); + .memberName("StringMember").getter(getter(SimpleStruct::stringMember)).setter(setter(Builder::stringMember)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("StringMember").build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(STRING_MEMBER_FIELD)); @@ -38,7 +39,7 @@ private SimpleStruct(BuilderImpl builder) { /** * Returns the value of the StringMember property for this object. - * + * * @return The value of the StringMember property for this object. */ public final String stringMember() { @@ -96,10 +97,10 @@ public final String toString() { public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "StringMember": - return Optional.ofNullable(clazz.cast(stringMember())); - default: - return Optional.empty(); + case "StringMember": + return Optional.ofNullable(clazz.cast(stringMember())); + default: + return Optional.empty(); } } @@ -141,14 +142,15 @@ public final String getStringMember() { return stringMember; } - @Override - public final Builder stringMember(String stringMember) { + public final void setStringMember(String stringMember) { this.stringMember = stringMember; - return this; } - public final void setStringMember(String stringMember) { + @Override + @Transient + public final Builder stringMember(String stringMember) { this.stringMember = stringMember; + return this; } @Override @@ -162,4 +164,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithnestedblobtype.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithnestedblobtype.java index 3f781ce6274c..0d0200566f94 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithnestedblobtype.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithnestedblobtype.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.io.Serializable; import java.nio.ByteBuffer; import java.util.Arrays; @@ -144,16 +145,17 @@ public final ByteBuffer getNestedBlob() { return nestedBlob == null ? null : nestedBlob.asByteBuffer(); } + public final void setNestedBlob(ByteBuffer nestedBlob) { + nestedBlob(nestedBlob == null ? null : SdkBytes.fromByteBuffer(nestedBlob)); + } + @Override + @Transient public final Builder nestedBlob(SdkBytes nestedBlob) { this.nestedBlob = nestedBlob; return this; } - public final void setNestedBlob(ByteBuffer nestedBlob) { - nestedBlob(nestedBlob == null ? null : SdkBytes.fromByteBuffer(nestedBlob)); - } - @Override public StructWithNestedBlobType build() { return new StructWithNestedBlobType(this); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithtimestamp.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithtimestamp.java index fbaff383cca4..8f4ee6dad727 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithtimestamp.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithtimestamp.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.io.Serializable; import java.time.Instant; import java.util.Arrays; @@ -23,11 +24,11 @@ */ @Generated("software.amazon.awssdk:codegen") public final class StructWithTimestamp implements SdkPojo, Serializable, - ToCopyableBuilder { + ToCopyableBuilder { private static final SdkField NESTED_TIMESTAMP_FIELD = SdkField. builder(MarshallingType.INSTANT) - .memberName("NestedTimestamp").getter(getter(StructWithTimestamp::nestedTimestamp)) - .setter(setter(Builder::nestedTimestamp)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("NestedTimestamp").build()).build(); + .memberName("NestedTimestamp").getter(getter(StructWithTimestamp::nestedTimestamp)) + .setter(setter(Builder::nestedTimestamp)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("NestedTimestamp").build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(NESTED_TIMESTAMP_FIELD)); @@ -41,7 +42,7 @@ private StructWithTimestamp(BuilderImpl builder) { /** * Returns the value of the NestedTimestamp property for this object. - * + * * @return The value of the NestedTimestamp property for this object. */ public final Instant nestedTimestamp() { @@ -99,10 +100,10 @@ public final String toString() { public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "NestedTimestamp": - return Optional.ofNullable(clazz.cast(nestedTimestamp())); - default: - return Optional.empty(); + case "NestedTimestamp": + return Optional.ofNullable(clazz.cast(nestedTimestamp())); + default: + return Optional.empty(); } } @@ -144,14 +145,15 @@ public final Instant getNestedTimestamp() { return nestedTimestamp; } - @Override - public final Builder nestedTimestamp(Instant nestedTimestamp) { + public final void setNestedTimestamp(Instant nestedTimestamp) { this.nestedTimestamp = nestedTimestamp; - return this; } - public final void setNestedTimestamp(Instant nestedTimestamp) { + @Override + @Transient + public final Builder nestedTimestamp(Instant nestedTimestamp) { this.nestedTimestamp = nestedTimestamp; + return this; } @Override @@ -165,4 +167,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/subtypeone.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/subtypeone.java index 4dcbc2f7a0db..8b6e70958f77 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/subtypeone.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/subtypeone.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.jsonprotocoltests.model; +import java.beans.Transient; import java.io.Serializable; import java.util.Arrays; import java.util.Collections; @@ -23,9 +24,9 @@ @Generated("software.amazon.awssdk:codegen") public final class SubTypeOne implements SdkPojo, Serializable, ToCopyableBuilder { private static final SdkField SUB_TYPE_ONE_MEMBER_FIELD = SdkField. builder(MarshallingType.STRING) - .memberName("SubTypeOneMember").getter(getter(SubTypeOne::subTypeOneMember)) - .setter(setter(Builder::subTypeOneMember)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("SubTypeOneMember").build()).build(); + .memberName("SubTypeOneMember").getter(getter(SubTypeOne::subTypeOneMember)) + .setter(setter(Builder::subTypeOneMember)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("SubTypeOneMember").build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(SUB_TYPE_ONE_MEMBER_FIELD)); @@ -39,7 +40,7 @@ private SubTypeOne(BuilderImpl builder) { /** * Returns the value of the SubTypeOneMember property for this object. - * + * * @return The value of the SubTypeOneMember property for this object. */ public final String subTypeOneMember() { @@ -97,10 +98,10 @@ public final String toString() { public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "SubTypeOneMember": - return Optional.ofNullable(clazz.cast(subTypeOneMember())); - default: - return Optional.empty(); + case "SubTypeOneMember": + return Optional.ofNullable(clazz.cast(subTypeOneMember())); + default: + return Optional.empty(); } } @@ -142,14 +143,15 @@ public final String getSubTypeOneMember() { return subTypeOneMember; } - @Override - public final Builder subTypeOneMember(String subTypeOneMember) { + public final void setSubTypeOneMember(String subTypeOneMember) { this.subTypeOneMember = subTypeOneMember; - return this; } - public final void setSubTypeOneMember(String subTypeOneMember) { + @Override + @Transient + public final Builder subTypeOneMember(String subTypeOneMember) { this.subTypeOneMember = subTypeOneMember; + return this; } @Override @@ -163,4 +165,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespacerequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespacerequest.java index 3954c5488d9c..dd6cdd07bf1d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespacerequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespacerequest.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.protocolrestxml.model; +import java.beans.Transient; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -25,41 +26,41 @@ */ @Generated("software.amazon.awssdk:codegen") public final class TestXmlNamespaceRequest extends ProtocolRestXmlRequest implements - ToCopyableBuilder { + ToCopyableBuilder { private static final SdkField STRING_MEMBER_FIELD = SdkField - . builder(MarshallingType.STRING) - .memberName("stringMember") - .getter(getter(TestXmlNamespaceRequest::stringMember)) - .setter(setter(Builder::stringMember)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("stringMember") - .unmarshallLocationName("stringMember").build()).build(); + . builder(MarshallingType.STRING) + .memberName("stringMember") + .getter(getter(TestXmlNamespaceRequest::stringMember)) + .setter(setter(Builder::stringMember)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("stringMember") + .unmarshallLocationName("stringMember").build()).build(); private static final SdkField INTEGER_MEMBER_FIELD = SdkField - . builder(MarshallingType.INTEGER) - .memberName("integerMember") - .getter(getter(TestXmlNamespaceRequest::integerMember)) - .setter(setter(Builder::integerMember)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("integerMember") - .unmarshallLocationName("integerMember").build()).build(); + . builder(MarshallingType.INTEGER) + .memberName("integerMember") + .getter(getter(TestXmlNamespaceRequest::integerMember)) + .setter(setter(Builder::integerMember)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("integerMember") + .unmarshallLocationName("integerMember").build()).build(); private static final SdkField XML_NAMESPACE_MEMBER_FIELD = SdkField - . builder(MarshallingType.SDK_POJO) - .memberName("xmlNamespaceMember") - .getter(getter(TestXmlNamespaceRequest::xmlNamespaceMember)) - .setter(setter(Builder::xmlNamespaceMember)) - .constructor(XmlNamespaceMember::builder) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("xmlNamespaceMember") - .unmarshallLocationName("xmlNamespaceMember").build(), - XmlAttributesTrait.create( - Pair.of("xmlns:foo", - XmlAttributesTrait.AttributeAccessors.builder().attributeGetter((ignore) -> "http://bar") - .build()), - Pair.of("foo:type", - XmlAttributesTrait.AttributeAccessors.builder() - .attributeGetter(t -> ((XmlNamespaceMember) t).type()).build()))).build(); + . builder(MarshallingType.SDK_POJO) + .memberName("xmlNamespaceMember") + .getter(getter(TestXmlNamespaceRequest::xmlNamespaceMember)) + .setter(setter(Builder::xmlNamespaceMember)) + .constructor(XmlNamespaceMember::builder) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("xmlNamespaceMember") + .unmarshallLocationName("xmlNamespaceMember").build(), + XmlAttributesTrait.create( + Pair.of("xmlns:foo", + XmlAttributesTrait.AttributeAccessors.builder().attributeGetter((ignore) -> "http://bar") + .build()), + Pair.of("foo:type", + XmlAttributesTrait.AttributeAccessors.builder() + .attributeGetter(t -> ((XmlNamespaceMember) t).type()).build()))).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(STRING_MEMBER_FIELD, - INTEGER_MEMBER_FIELD, XML_NAMESPACE_MEMBER_FIELD)); + INTEGER_MEMBER_FIELD, XML_NAMESPACE_MEMBER_FIELD)); private final String stringMember; @@ -76,7 +77,7 @@ private TestXmlNamespaceRequest(BuilderImpl builder) { /** * Returns the value of the StringMember property for this object. - * + * * @return The value of the StringMember property for this object. */ public final String stringMember() { @@ -85,7 +86,7 @@ public final String stringMember() { /** * Returns the value of the IntegerMember property for this object. - * + * * @return The value of the IntegerMember property for this object. */ public final Integer integerMember() { @@ -94,7 +95,7 @@ public final Integer integerMember() { /** * Returns the value of the XmlNamespaceMember property for this object. - * + * * @return The value of the XmlNamespaceMember property for this object. */ public final XmlNamespaceMember xmlNamespaceMember() { @@ -142,7 +143,7 @@ public final boolean equalsBySdkFields(Object obj) { } TestXmlNamespaceRequest other = (TestXmlNamespaceRequest) obj; return Objects.equals(stringMember(), other.stringMember()) && Objects.equals(integerMember(), other.integerMember()) - && Objects.equals(xmlNamespaceMember(), other.xmlNamespaceMember()); + && Objects.equals(xmlNamespaceMember(), other.xmlNamespaceMember()); } /** @@ -152,19 +153,19 @@ public final boolean equalsBySdkFields(Object obj) { @Override public final String toString() { return ToString.builder("TestXmlNamespaceRequest").add("StringMember", stringMember()) - .add("IntegerMember", integerMember()).add("XmlNamespaceMember", xmlNamespaceMember()).build(); + .add("IntegerMember", integerMember()).add("XmlNamespaceMember", xmlNamespaceMember()).build(); } public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "stringMember": - return Optional.ofNullable(clazz.cast(stringMember())); - case "integerMember": - return Optional.ofNullable(clazz.cast(integerMember())); - case "xmlNamespaceMember": - return Optional.ofNullable(clazz.cast(xmlNamespaceMember())); - default: - return Optional.empty(); + case "stringMember": + return Optional.ofNullable(clazz.cast(stringMember())); + case "integerMember": + return Optional.ofNullable(clazz.cast(integerMember())); + case "xmlNamespaceMember": + return Optional.ofNullable(clazz.cast(xmlNamespaceMember())); + default: + return Optional.empty(); } } @@ -217,7 +218,7 @@ public interface Builder extends ProtocolRestXmlRequest.Builder, SdkPojo, Copyab * * When the {@link Consumer} completes, {@link XmlNamespaceMember.Builder#build()} is called immediately and its * result is passed to {@link #xmlNamespaceMember(XmlNamespaceMember)}. - * + * * @param xmlNamespaceMember * a consumer that will call methods on {@link XmlNamespaceMember.Builder} * @return Returns a reference to this object so that method calls can be chained together. @@ -255,44 +256,47 @@ public final String getStringMember() { return stringMember; } - @Override - public final Builder stringMember(String stringMember) { + public final void setStringMember(String stringMember) { this.stringMember = stringMember; - return this; } - public final void setStringMember(String stringMember) { + @Override + @Transient + public final Builder stringMember(String stringMember) { this.stringMember = stringMember; + return this; } public final Integer getIntegerMember() { return integerMember; } - @Override - public final Builder integerMember(Integer integerMember) { + public final void setIntegerMember(Integer integerMember) { this.integerMember = integerMember; - return this; } - public final void setIntegerMember(Integer integerMember) { + @Override + @Transient + public final Builder integerMember(Integer integerMember) { this.integerMember = integerMember; + return this; } public final XmlNamespaceMember.Builder getXmlNamespaceMember() { return xmlNamespaceMember != null ? xmlNamespaceMember.toBuilder() : null; } + public final void setXmlNamespaceMember(XmlNamespaceMember.BuilderImpl xmlNamespaceMember) { + this.xmlNamespaceMember = xmlNamespaceMember != null ? xmlNamespaceMember.build() : null; + } + @Override + @Transient public final Builder xmlNamespaceMember(XmlNamespaceMember xmlNamespaceMember) { this.xmlNamespaceMember = xmlNamespaceMember; return this; } - public final void setXmlNamespaceMember(XmlNamespaceMember.BuilderImpl xmlNamespaceMember) { - this.xmlNamespaceMember = xmlNamespaceMember != null ? xmlNamespaceMember.build() : null; - } - @Override public Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration) { super.overrideConfiguration(overrideConfiguration); @@ -316,4 +320,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespaceresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespaceresponse.java index d6792f9109c2..16d71e9765ca 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespaceresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespaceresponse.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.protocolrestxml.model; +import java.beans.Transient; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -24,41 +25,41 @@ */ @Generated("software.amazon.awssdk:codegen") public final class TestXmlNamespaceResponse extends ProtocolRestXmlResponse implements - ToCopyableBuilder { + ToCopyableBuilder { private static final SdkField STRING_MEMBER_FIELD = SdkField - . builder(MarshallingType.STRING) - .memberName("stringMember") - .getter(getter(TestXmlNamespaceResponse::stringMember)) - .setter(setter(Builder::stringMember)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("stringMember") - .unmarshallLocationName("stringMember").build()).build(); + . builder(MarshallingType.STRING) + .memberName("stringMember") + .getter(getter(TestXmlNamespaceResponse::stringMember)) + .setter(setter(Builder::stringMember)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("stringMember") + .unmarshallLocationName("stringMember").build()).build(); private static final SdkField INTEGER_MEMBER_FIELD = SdkField - . builder(MarshallingType.INTEGER) - .memberName("integerMember") - .getter(getter(TestXmlNamespaceResponse::integerMember)) - .setter(setter(Builder::integerMember)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("integerMember") - .unmarshallLocationName("integerMember").build()).build(); + . builder(MarshallingType.INTEGER) + .memberName("integerMember") + .getter(getter(TestXmlNamespaceResponse::integerMember)) + .setter(setter(Builder::integerMember)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("integerMember") + .unmarshallLocationName("integerMember").build()).build(); private static final SdkField XML_NAMESPACE_MEMBER_FIELD = SdkField - . builder(MarshallingType.SDK_POJO) - .memberName("xmlNamespaceMember") - .getter(getter(TestXmlNamespaceResponse::xmlNamespaceMember)) - .setter(setter(Builder::xmlNamespaceMember)) - .constructor(XmlNamespaceMember::builder) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("xmlNamespaceMember") - .unmarshallLocationName("xmlNamespaceMember").build(), - XmlAttributesTrait.create( - Pair.of("xmlns:foo", - XmlAttributesTrait.AttributeAccessors.builder().attributeGetter((ignore) -> "http://bar") - .build()), - Pair.of("foo:type", - XmlAttributesTrait.AttributeAccessors.builder() - .attributeGetter(t -> ((XmlNamespaceMember) t).type()).build()))).build(); + . builder(MarshallingType.SDK_POJO) + .memberName("xmlNamespaceMember") + .getter(getter(TestXmlNamespaceResponse::xmlNamespaceMember)) + .setter(setter(Builder::xmlNamespaceMember)) + .constructor(XmlNamespaceMember::builder) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("xmlNamespaceMember") + .unmarshallLocationName("xmlNamespaceMember").build(), + XmlAttributesTrait.create( + Pair.of("xmlns:foo", + XmlAttributesTrait.AttributeAccessors.builder().attributeGetter((ignore) -> "http://bar") + .build()), + Pair.of("foo:type", + XmlAttributesTrait.AttributeAccessors.builder() + .attributeGetter(t -> ((XmlNamespaceMember) t).type()).build()))).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(STRING_MEMBER_FIELD, - INTEGER_MEMBER_FIELD, XML_NAMESPACE_MEMBER_FIELD)); + INTEGER_MEMBER_FIELD, XML_NAMESPACE_MEMBER_FIELD)); private final String stringMember; @@ -75,7 +76,7 @@ private TestXmlNamespaceResponse(BuilderImpl builder) { /** * Returns the value of the StringMember property for this object. - * + * * @return The value of the StringMember property for this object. */ public final String stringMember() { @@ -84,7 +85,7 @@ public final String stringMember() { /** * Returns the value of the IntegerMember property for this object. - * + * * @return The value of the IntegerMember property for this object. */ public final Integer integerMember() { @@ -93,7 +94,7 @@ public final Integer integerMember() { /** * Returns the value of the XmlNamespaceMember property for this object. - * + * * @return The value of the XmlNamespaceMember property for this object. */ public final XmlNamespaceMember xmlNamespaceMember() { @@ -141,7 +142,7 @@ public final boolean equalsBySdkFields(Object obj) { } TestXmlNamespaceResponse other = (TestXmlNamespaceResponse) obj; return Objects.equals(stringMember(), other.stringMember()) && Objects.equals(integerMember(), other.integerMember()) - && Objects.equals(xmlNamespaceMember(), other.xmlNamespaceMember()); + && Objects.equals(xmlNamespaceMember(), other.xmlNamespaceMember()); } /** @@ -151,19 +152,19 @@ public final boolean equalsBySdkFields(Object obj) { @Override public final String toString() { return ToString.builder("TestXmlNamespaceResponse").add("StringMember", stringMember()) - .add("IntegerMember", integerMember()).add("XmlNamespaceMember", xmlNamespaceMember()).build(); + .add("IntegerMember", integerMember()).add("XmlNamespaceMember", xmlNamespaceMember()).build(); } public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "stringMember": - return Optional.ofNullable(clazz.cast(stringMember())); - case "integerMember": - return Optional.ofNullable(clazz.cast(integerMember())); - case "xmlNamespaceMember": - return Optional.ofNullable(clazz.cast(xmlNamespaceMember())); - default: - return Optional.empty(); + case "stringMember": + return Optional.ofNullable(clazz.cast(stringMember())); + case "integerMember": + return Optional.ofNullable(clazz.cast(integerMember())); + case "xmlNamespaceMember": + return Optional.ofNullable(clazz.cast(xmlNamespaceMember())); + default: + return Optional.empty(); } } @@ -216,7 +217,7 @@ public interface Builder extends ProtocolRestXmlResponse.Builder, SdkPojo, Copya * * When the {@link Consumer} completes, {@link XmlNamespaceMember.Builder#build()} is called immediately and its * result is passed to {@link #xmlNamespaceMember(XmlNamespaceMember)}. - * + * * @param xmlNamespaceMember * a consumer that will call methods on {@link XmlNamespaceMember.Builder} * @return Returns a reference to this object so that method calls can be chained together. @@ -248,44 +249,47 @@ public final String getStringMember() { return stringMember; } - @Override - public final Builder stringMember(String stringMember) { + public final void setStringMember(String stringMember) { this.stringMember = stringMember; - return this; } - public final void setStringMember(String stringMember) { + @Override + @Transient + public final Builder stringMember(String stringMember) { this.stringMember = stringMember; + return this; } public final Integer getIntegerMember() { return integerMember; } - @Override - public final Builder integerMember(Integer integerMember) { + public final void setIntegerMember(Integer integerMember) { this.integerMember = integerMember; - return this; } - public final void setIntegerMember(Integer integerMember) { + @Override + @Transient + public final Builder integerMember(Integer integerMember) { this.integerMember = integerMember; + return this; } public final XmlNamespaceMember.Builder getXmlNamespaceMember() { return xmlNamespaceMember != null ? xmlNamespaceMember.toBuilder() : null; } + public final void setXmlNamespaceMember(XmlNamespaceMember.BuilderImpl xmlNamespaceMember) { + this.xmlNamespaceMember = xmlNamespaceMember != null ? xmlNamespaceMember.build() : null; + } + @Override + @Transient public final Builder xmlNamespaceMember(XmlNamespaceMember xmlNamespaceMember) { this.xmlNamespaceMember = xmlNamespaceMember; return this; } - public final void setXmlNamespaceMember(XmlNamespaceMember.BuilderImpl xmlNamespaceMember) { - this.xmlNamespaceMember = xmlNamespaceMember != null ? xmlNamespaceMember.build() : null; - } - @Override public TestXmlNamespaceResponse build() { return new TestXmlNamespaceResponse(this); @@ -297,4 +301,3 @@ public List> sdkFields() { } } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/xmlnamespacemember.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/xmlnamespacemember.java index 16392316565e..f6ee31ea8d47 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/xmlnamespacemember.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/xmlnamespacemember.java @@ -1,5 +1,6 @@ package software.amazon.awssdk.services.protocolrestxml.model; +import java.beans.Transient; import java.io.Serializable; import java.util.Arrays; import java.util.Collections; @@ -23,25 +24,25 @@ */ @Generated("software.amazon.awssdk:codegen") public final class XmlNamespaceMember implements SdkPojo, Serializable, - ToCopyableBuilder { + ToCopyableBuilder { private static final SdkField TYPE_FIELD = SdkField - . builder(MarshallingType.STRING) - .memberName("Type") - .getter(getter(XmlNamespaceMember::type)) - .setter(setter(Builder::type)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("foo:type") - .unmarshallLocationName("foo:type").build(), XmlAttributeTrait.create()).build(); + . builder(MarshallingType.STRING) + .memberName("Type") + .getter(getter(XmlNamespaceMember::type)) + .setter(setter(Builder::type)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("foo:type") + .unmarshallLocationName("foo:type").build(), XmlAttributeTrait.create()).build(); private static final SdkField STRING_MEMBER_FIELD = SdkField - . builder(MarshallingType.STRING) - .memberName("stringMember") - .getter(getter(XmlNamespaceMember::stringMember)) - .setter(setter(Builder::stringMember)) - .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("stringMember") - .unmarshallLocationName("stringMember").build()).build(); + . builder(MarshallingType.STRING) + .memberName("stringMember") + .getter(getter(XmlNamespaceMember::stringMember)) + .setter(setter(Builder::stringMember)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("stringMember") + .unmarshallLocationName("stringMember").build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(TYPE_FIELD, - STRING_MEMBER_FIELD)); + STRING_MEMBER_FIELD)); private static final long serialVersionUID = 1L; @@ -56,7 +57,7 @@ private XmlNamespaceMember(BuilderImpl builder) { /** * Returns the value of the Type property for this object. - * + * * @return The value of the Type property for this object. */ public final String type() { @@ -65,7 +66,7 @@ public final String type() { /** * Returns the value of the StringMember property for this object. - * + * * @return The value of the StringMember property for this object. */ public final String stringMember() { @@ -124,12 +125,12 @@ public final String toString() { public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { - case "Type": - return Optional.ofNullable(clazz.cast(type())); - case "stringMember": - return Optional.ofNullable(clazz.cast(stringMember())); - default: - return Optional.empty(); + case "Type": + return Optional.ofNullable(clazz.cast(type())); + case "stringMember": + return Optional.ofNullable(clazz.cast(stringMember())); + default: + return Optional.empty(); } } @@ -183,28 +184,30 @@ public final String getType() { return type; } - @Override - public final Builder type(String type) { + public final void setType(String type) { this.type = type; - return this; } - public final void setType(String type) { + @Override + @Transient + public final Builder type(String type) { this.type = type; + return this; } public final String getStringMember() { return stringMember; } - @Override - public final Builder stringMember(String stringMember) { + public final void setStringMember(String stringMember) { this.stringMember = stringMember; - return this; } - public final void setStringMember(String stringMember) { + @Override + @Transient + public final Builder stringMember(String stringMember) { this.stringMember = stringMember; + return this; } @Override @@ -218,4 +221,3 @@ public List> sdkFields() { } } } - diff --git a/core/annotations/pom.xml b/core/annotations/pom.xml index a6ac23d52a61..df91fc2cf157 100644 --- a/core/annotations/pom.xml +++ b/core/annotations/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 diff --git a/core/arns/pom.xml b/core/arns/pom.xml index 16a0aefa177b..65abf3fdb135 100644 --- a/core/arns/pom.xml +++ b/core/arns/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 @@ -74,4 +74,4 @@
- \ No newline at end of file + diff --git a/core/auth/pom.xml b/core/auth/pom.xml index 2403c279fc79..9cafe97c0a29 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT auth @@ -63,8 +63,9 @@ ${awsjavasdk.version} - com.fasterxml.jackson.core - jackson-databind + software.amazon.awssdk + json-utils + ${awsjavasdk.version} software.amazon.eventstream diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/HttpCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/HttpCredentialsProvider.java index f2e1aa03d600..b67c1d5d91b3 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/HttpCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/HttpCredentialsProvider.java @@ -15,15 +15,15 @@ package software.amazon.awssdk.auth.credentials; -import com.fasterxml.jackson.databind.JsonMappingException; -import com.fasterxml.jackson.databind.JsonNode; import java.io.IOException; import java.time.Duration; import java.time.Instant; +import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.core.util.json.JacksonUtils; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeParser; import software.amazon.awssdk.regions.util.HttpResourcesUtils; import software.amazon.awssdk.regions.util.ResourcesEndpointProvider; import software.amazon.awssdk.utils.ComparableUtils; @@ -40,6 +40,11 @@ */ @SdkProtectedApi public abstract class HttpCredentialsProvider implements AwsCredentialsProvider, SdkAutoCloseable { + private static final JsonNodeParser SENSITIVE_PARSER = + JsonNodeParser.builder() + .removeErrorLocations(true) + .build(); + private final Optional> credentialsCache; protected HttpCredentialsProvider(BuilderImpl builder) { @@ -73,7 +78,7 @@ private RefreshResult refreshCredentials() { try { String credentialsResponse = HttpResourcesUtils.instance().readResource(getCredentialsEndpointProvider()); - JsonNode node = JacksonUtils.sensitiveJsonNodeOf(credentialsResponse); + Map node = SENSITIVE_PARSER.parse(credentialsResponse).asObject(); JsonNode accessKey = node.get("AccessKeyId"); JsonNode secretKey = node.get("SecretAccessKey"); JsonNode token = node.get("Token"); @@ -83,8 +88,8 @@ private RefreshResult refreshCredentials() { Validate.notNull(secretKey, "Failed to load secret key."); AwsCredentials credentials = - token == null ? AwsBasicCredentials.create(accessKey.asText(), secretKey.asText()) - : AwsSessionCredentials.create(accessKey.asText(), secretKey.asText(), token.asText()); + token == null ? AwsBasicCredentials.create(accessKey.text(), secretKey.text()) + : AwsSessionCredentials.create(accessKey.text(), secretKey.text(), token.text()); Instant expiration = getExpiration(expirationNode).orElse(null); if (expiration != null && Instant.now().isAfter(expiration)) { @@ -98,11 +103,6 @@ private RefreshResult refreshCredentials() { .build(); } catch (SdkClientException e) { throw e; - } catch (JsonMappingException e) { - throw SdkClientException.builder() - .message("Unable to parse response returned from service endpoint.") - .cause(e) - .build(); } catch (RuntimeException | IOException e) { throw SdkClientException.builder() .message("Unable to load credentials from service endpoint.") @@ -114,7 +114,7 @@ private RefreshResult refreshCredentials() { private Optional getExpiration(JsonNode expirationNode) { return Optional.ofNullable(expirationNode).map(node -> { // Convert the expirationNode string to ISO-8601 format. - String expirationValue = node.asText().replaceAll("\\+0000$", "Z"); + String expirationValue = node.text().replaceAll("\\+0000$", "Z"); try { return DateUtils.parseIso8601Date(expirationValue); diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProvider.java index bc424ed05d99..56bfdb2b03ba 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProvider.java @@ -20,10 +20,11 @@ import java.util.HashMap; import java.util.Map; import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.auth.credentials.internal.Ec2MetadataConfigProvider; import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.exception.SdkServiceException; import software.amazon.awssdk.core.util.SdkUserAgent; -import software.amazon.awssdk.regions.internal.util.EC2MetadataUtils; import software.amazon.awssdk.regions.util.HttpResourcesUtils; import software.amazon.awssdk.regions.util.ResourcesEndpointProvider; import software.amazon.awssdk.utils.ToString; @@ -41,11 +42,15 @@ public final class InstanceProfileCredentialsProvider extends HttpCredentialsPro private static final String SECURITY_CREDENTIALS_RESOURCE = "/latest/meta-data/iam/security-credentials/"; + private final String endpoint; + private final Ec2MetadataConfigProvider configProvider = Ec2MetadataConfigProvider.builder().build(); + /** * @see #builder() */ private InstanceProfileCredentialsProvider(BuilderImpl builder) { super(builder); + this.endpoint = builder.endpoint; } /** @@ -66,7 +71,7 @@ public static InstanceProfileCredentialsProvider create() { @Override protected ResourcesEndpointProvider getCredentialsEndpointProvider() { - return new InstanceProviderCredentialsEndpointProvider(getToken()); + return new InstanceProviderCredentialsEndpointProvider(getImdsEndpoint(), getToken()); } @Override @@ -80,7 +85,24 @@ public String toString() { } private String getToken() { - return EC2MetadataUtils.getToken(); + try { + return HttpResourcesUtils.instance() + .readResource(new TokenEndpointProvider(getImdsEndpoint()), "PUT"); + } catch (Exception e) { + + boolean is400ServiceException = e instanceof SdkServiceException + && ((SdkServiceException) e).statusCode() == 400; + + // metadata resolution must not continue to the token-less flow for a 400 + if (is400ServiceException) { + throw SdkClientException.builder() + .message("Unable to fetch metadata token") + .cause(e) + .build(); + } + + return null; + } } private static ResourcesEndpointProvider includeTokenHeader(ResourcesEndpointProvider provider, String token) { @@ -99,18 +121,26 @@ public Map headers() { }; } + private String getImdsEndpoint() { + if (endpoint != null) { + return endpoint; + } + + return configProvider.getEndpoint(); + } + private static final class InstanceProviderCredentialsEndpointProvider implements ResourcesEndpointProvider { + private final String imdsEndpoint; private final String metadataToken; - private InstanceProviderCredentialsEndpointProvider(String metadataToken) { + private InstanceProviderCredentialsEndpointProvider(String imdsEndpoint, String metadataToken) { + this.imdsEndpoint = imdsEndpoint; this.metadataToken = metadataToken; } @Override public URI endpoint() throws IOException { - String host = SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.getStringValueOrThrow(); - - URI endpoint = URI.create(host + SECURITY_CREDENTIALS_RESOURCE); + URI endpoint = URI.create(imdsEndpoint + SECURITY_CREDENTIALS_RESOURCE); ResourcesEndpointProvider endpointProvider = () -> endpoint; if (metadataToken != null) { @@ -124,7 +154,7 @@ public URI endpoint() throws IOException { throw SdkClientException.builder().message("Unable to load credentials path").build(); } - return URI.create(host + SECURITY_CREDENTIALS_RESOURCE + securityCredentials[0]); + return URI.create(imdsEndpoint + SECURITY_CREDENTIALS_RESOURCE + securityCredentials[0]); } @Override @@ -142,12 +172,46 @@ public Map headers() { } } + private static final class TokenEndpointProvider implements ResourcesEndpointProvider { + private static final String TOKEN_RESOURCE_PATH = "/latest/api/token"; + private static final String EC2_METADATA_TOKEN_TTL_HEADER = "x-aws-ec2-metadata-token-ttl-seconds"; + private static final String DEFAULT_TOKEN_TTL = "21600"; + + private final String host; + + private TokenEndpointProvider(String host) { + this.host = host; + } + + @Override + public URI endpoint() { + String finalHost = host; + if (finalHost.endsWith("/")) { + finalHost = finalHost.substring(0, finalHost.length() - 1); + } + return URI.create(finalHost + TOKEN_RESOURCE_PATH); + } + + @Override + public Map headers() { + Map requestHeaders = new HashMap<>(); + requestHeaders.put("User-Agent", SdkUserAgent.create().userAgent()); + requestHeaders.put("Accept", "*/*"); + requestHeaders.put("Connection", "keep-alive"); + requestHeaders.put(EC2_METADATA_TOKEN_TTL_HEADER, DEFAULT_TOKEN_TTL); + + return requestHeaders; + } + } + /** * A builder for creating a custom a {@link InstanceProfileCredentialsProvider}. */ public interface Builder extends HttpCredentialsProvider.Builder { + Builder endpoint(String endpoint); + /** * Build a {@link InstanceProfileCredentialsProvider} from the provided configuration. */ @@ -159,10 +223,18 @@ private static final class BuilderImpl extends HttpCredentialsProvider.BuilderImpl implements Builder { + private String endpoint; + private BuilderImpl() { super.asyncThreadName("instance-profile-credentials-provider"); } + @Override + public Builder endpoint(String endpoint) { + this.endpoint = endpoint; + return this; + } + @Override public InstanceProfileCredentialsProvider build() { return new InstanceProfileCredentialsProvider(this); diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProcessCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProcessCredentialsProvider.java index fac87fbb0e69..e45f556dc7a9 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProcessCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProcessCredentialsProvider.java @@ -15,7 +15,6 @@ package software.amazon.awssdk.auth.credentials; -import com.fasterxml.jackson.databind.JsonNode; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -25,11 +24,8 @@ import java.util.Collections; import java.util.List; import software.amazon.awssdk.annotations.SdkPublicApi; -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; -import software.amazon.awssdk.core.util.json.JacksonUtils; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeParser; import software.amazon.awssdk.utils.DateUtils; import software.amazon.awssdk.utils.IoUtils; import software.amazon.awssdk.utils.Platform; @@ -58,6 +54,10 @@ */ @SdkPublicApi public final class ProcessCredentialsProvider implements AwsCredentialsProvider { + private static final JsonNodeParser PARSER = JsonNodeParser.builder() + .removeErrorLocations(true) + .build(); + private final List command; private final Duration credentialRefreshThreshold; private final long processOutputLimit; @@ -129,14 +129,14 @@ private RefreshResult refreshCredentials() { * Parse the output from the credentials process. */ private JsonNode parseProcessOutput(String processOutput) { - JsonNode credentialsJson = JacksonUtils.sensitiveJsonNodeOf(processOutput); + JsonNode credentialsJson = PARSER.parse(processOutput); if (!credentialsJson.isObject()) { throw new IllegalStateException("Process did not return a JSON object."); } - JsonNode version = credentialsJson.get("Version"); - if (version == null || !version.isInt() || version.asInt() != 1) { + JsonNode version = credentialsJson.field("Version").orElse(null); + if (version == null || !version.isNumber() || !version.asNumber().equals("1")) { throw new IllegalStateException("Unsupported credential version: " + version); } return credentialsJson; @@ -174,21 +174,10 @@ private Instant credentialExpirationTime(JsonNode credentialsJson) { } /** - * Get a textual value from a json object, throwing an exception if the node is missing or not textual. + * Get a textual value from a json object. */ private String getText(JsonNode jsonObject, String nodeName) { - JsonNode subNode = jsonObject.get(nodeName); - - if (subNode == null) { - return null; - } - - if (!subNode.isTextual()) { - throw new IllegalStateException(nodeName + " from credential process should be textual, but was " + - subNode.getNodeType()); - } - - return subNode.asText(); + return jsonObject.field(nodeName).map(JsonNode::text).orElse(null); } /** diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProvider.java index 1a4dd42e870d..6eda75c94a70 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProvider.java @@ -69,7 +69,8 @@ private ProfileCredentialsProvider(BuilderImpl builder) { ProfileFile finalProfileFile = profileFile; credentialsProvider = profileFile.profile(profileName) - .flatMap(p -> new ProfileCredentialsUtils(p, finalProfileFile::profile).credentialsProvider()) + .flatMap(p -> new ProfileCredentialsUtils(finalProfileFile, p, finalProfileFile::profile) + .credentialsProvider()) .orElseThrow(() -> { String errorMessage = String.format("Profile file contained no credentials for " + "profile '%s': %s", finalProfileName, finalProfileFile); diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/Ec2MetadataConfigProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/Ec2MetadataConfigProvider.java new file mode 100644 index 000000000000..c3b3822e75a6 --- /dev/null +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/Ec2MetadataConfigProvider.java @@ -0,0 +1,161 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.credentials.internal; + +import java.util.Optional; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.profiles.Profile; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.profiles.ProfileProperty; + +@SdkInternalApi +// TODO: Remove or consolidate this class with the one from the regions module. +// There's currently no good way for both auth and regions to share the same +// class since there's no suitable common dependency between the two where this +// can live. Ideally, we can do this when the EC2MetadataUtils is replaced with +// the IMDS client. +public final class Ec2MetadataConfigProvider { + /** Default IPv4 endpoint for the Amazon EC2 Instance Metadata Service. */ + private static final String EC2_METADATA_SERVICE_URL_IPV4 = "http://169.254.169.254"; + + /** Default IPv6 endpoint for the Amazon EC2 Instance Metadata Service. */ + private static final String EC2_METADATA_SERVICE_URL_IPV6 = "http://[fd00:ec2::254]"; + + private final Supplier profileFile; + private final String profileName; + + private Ec2MetadataConfigProvider(Builder builder) { + this.profileFile = builder.profileFile; + this.profileName = builder.profileName; + } + + public enum EndpointMode { + IPV4, + IPV6, + ; + + public static EndpointMode fromValue(String s) { + if (s == null) { + return null; + } + + for (EndpointMode value : EndpointMode.values()) { + if (value.name().equalsIgnoreCase(s)) { + return value; + } + } + + throw new IllegalArgumentException("Unrecognized value for endpoint mode: " + s); + } + } + + public String getEndpoint() { + String endpointOverride = getEndpointOverride(); + if (endpointOverride != null) { + return endpointOverride; + } + + EndpointMode endpointMode = getEndpointMode(); + switch (endpointMode) { + case IPV4: + return EC2_METADATA_SERVICE_URL_IPV4; + case IPV6: + return EC2_METADATA_SERVICE_URL_IPV6; + default: + throw SdkClientException.create("Unknown endpoint mode: " + endpointMode); + } + } + + public EndpointMode getEndpointMode() { + Optional endpointMode = SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE.getNonDefaultStringValue(); + if (endpointMode.isPresent()) { + return EndpointMode.fromValue(endpointMode.get()); + } + + return configFileEndpointMode().orElseGet(() -> + EndpointMode.fromValue(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE.defaultValue())); + } + + public String getEndpointOverride() { + Optional endpointOverride = SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.getNonDefaultStringValue(); + if (endpointOverride.isPresent()) { + return endpointOverride.get(); + } + + Optional configFileValue = configFileEndpointOverride(); + + return configFileValue.orElse(null); + } + + public static Builder builder() { + return new Builder(); + } + + private Optional configFileEndpointMode() { + return resolveProfile().flatMap(p -> p.property(ProfileProperty.EC2_METADATA_SERVICE_ENDPOINT_MODE)) + .map(EndpointMode::fromValue); + } + + private Optional configFileEndpointOverride() { + return resolveProfile().flatMap(p -> p.property(ProfileProperty.EC2_METADATA_SERVICE_ENDPOINT)); + } + + private Optional resolveProfile() { + ProfileFile profileFileToUse = resolveProfileFile(); + String profileNameToUse = resolveProfileName(); + + return profileFileToUse.profile(profileNameToUse); + } + + private ProfileFile resolveProfileFile() { + if (profileFile != null) { + return profileFile.get(); + } + + return ProfileFile.defaultProfileFile(); + } + + private String resolveProfileName() { + if (profileName != null) { + return profileName; + } + + return ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow(); + } + + public static class Builder { + private Supplier profileFile; + private String profileName; + + public Builder profileFile(Supplier profileFile) { + this.profileFile = profileFile; + return this; + } + + public Builder profileName(String profileName) { + this.profileName = profileName; + return this; + } + + public Ec2MetadataConfigProvider build() { + return new Ec2MetadataConfigProvider(this); + } + } +} diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtils.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtils.java index 202b0fbad000..7745613e1dd6 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtils.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtils.java @@ -40,6 +40,7 @@ import software.amazon.awssdk.auth.credentials.SystemPropertyCredentialsProvider; import software.amazon.awssdk.core.internal.util.ClassLoaderHelper; import software.amazon.awssdk.profiles.Profile; +import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.profiles.ProfileProperty; import software.amazon.awssdk.utils.SdkAutoCloseable; import software.amazon.awssdk.utils.Validate; @@ -54,6 +55,10 @@ public final class ProfileCredentialsUtils { private static final String SSO_PROFILE_CREDENTIALS_PROVIDER_FACTORY = "software.amazon.awssdk.services.sso.auth.SsoProfileCredentialsProviderFactory"; + /** + * The profile file containing {@code profile}. + */ + private final ProfileFile profileFile; private final Profile profile; /** @@ -74,7 +79,10 @@ public final class ProfileCredentialsUtils { */ private final Function> credentialsSourceResolver; - public ProfileCredentialsUtils(Profile profile, Function> credentialsSourceResolver) { + public ProfileCredentialsUtils(ProfileFile profileFile, + Profile profile, + Function> credentialsSourceResolver) { + this.profileFile = Validate.paramNotNull(profileFile, "profileFile"); this.profile = Validate.paramNotNull(profile, "profile"); this.name = profile.name(); this.properties = profile.properties(); @@ -215,7 +223,7 @@ private AwsCredentialsProvider roleAndSourceProfileBasedProfileCredentialsProvid children.add(name); AwsCredentialsProvider sourceCredentialsProvider = credentialsSourceResolver.apply(properties.get(ProfileProperty.SOURCE_PROFILE)) - .flatMap(p -> new ProfileCredentialsUtils(p, credentialsSourceResolver) + .flatMap(p -> new ProfileCredentialsUtils(profileFile, p, credentialsSourceResolver) .credentialsProvider(children)) .orElseThrow(this::noSourceCredentialsException); @@ -239,7 +247,15 @@ private AwsCredentialsProvider credentialSourceCredentialProvider(CredentialSour case ECS_CONTAINER: return ContainerCredentialsProvider.builder().build(); case EC2_INSTANCE_METADATA: - return InstanceProfileCredentialsProvider.create(); + // The IMDS credentials provider should source the endpoint config properties from the currently active profile + Ec2MetadataConfigProvider configProvider = Ec2MetadataConfigProvider.builder() + .profileFile(() -> profileFile) + .profileName(name) + .build(); + + return InstanceProfileCredentialsProvider.builder() + .endpoint(configProvider.getEndpoint()) + .build(); case ENVIRONMENT: return AwsCredentialsProviderChain.builder() .addCredentialsProvider(SystemPropertyCredentialsProvider.create()) diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/SystemSettingsCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/SystemSettingsCredentialsProvider.java index c84cc44c6237..9c4a642c251c 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/SystemSettingsCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/SystemSettingsCredentialsProvider.java @@ -49,7 +49,7 @@ public AwsCredentials resolveCredentials() { String secretKey = trim(loadSetting(SdkSystemSetting.AWS_SECRET_ACCESS_KEY).orElse(null)); String sessionToken = trim(loadSetting(SdkSystemSetting.AWS_SESSION_TOKEN).orElse(null)); - if (StringUtils.isEmpty(accessKey)) { + if (StringUtils.isBlank(accessKey)) { throw SdkClientException.builder() .message(String.format("Unable to load credentials from system settings. Access key must be" + " specified either via environment variable (%s) or system property (%s).", @@ -58,7 +58,7 @@ public AwsCredentials resolveCredentials() { .build(); } - if (StringUtils.isEmpty(secretKey)) { + if (StringUtils.isBlank(secretKey)) { throw SdkClientException.builder() .message(String.format("Unable to load credentials from system settings. Secret key must be" + " specified either via environment variable (%s) or system property (%s).", @@ -67,8 +67,8 @@ public AwsCredentials resolveCredentials() { .build(); } - return sessionToken == null ? AwsBasicCredentials.create(accessKey, secretKey) - : AwsSessionCredentials.create(accessKey, secretKey, sessionToken); + return StringUtils.isBlank(sessionToken) ? AwsBasicCredentials.create(accessKey, secretKey) + : AwsSessionCredentials.create(accessKey, secretKey, sessionToken); } /** diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java index 310b60db61ac..46bf5f3a2c49 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java @@ -25,6 +25,7 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; +import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; @@ -323,46 +324,51 @@ private String getCanonicalizedHeaderString(Map> canonicali StringBuilder buffer = new StringBuilder(); canonicalizedHeaders.forEach((headerName, headerValues) -> { - for (String headerValue : headerValues) { - appendCompactedString(buffer, headerName); - buffer.append(":"); - if (headerValue != null) { - appendCompactedString(buffer, headerValue); - } - buffer.append("\n"); - } + buffer.append(headerName); + buffer.append(":"); + buffer.append(String.join(",", trimAll(headerValues))); + buffer.append("\n"); }); return buffer.toString(); } /** - * This method appends a string to a string builder and collapses contiguous - * white space is a single space. - * - * This is equivalent to: - * destination.append(source.replaceAll("\\s+", " ")) + * "The Trimall function removes excess white space before and after values, + * and converts sequential spaces to a single space." + *

+ * https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + *

+ * The collapse-whitespace logic is equivalent to: + *

+     *     value.replaceAll("\\s+", " ")
+     * 
* but does not create a Pattern object that needs to compile the match * string; it also prevents us from having to make a Matcher object as well. - * */ - private void appendCompactedString(final StringBuilder destination, final String source) { + private String trimAll(String value) { boolean previousIsWhiteSpace = false; - int length = source.length(); + StringBuilder sb = new StringBuilder(value.length()); - for (int i = 0; i < length; i++) { - char ch = source.charAt(i); + for (int i = 0; i < value.length(); i++) { + char ch = value.charAt(i); if (isWhiteSpace(ch)) { if (previousIsWhiteSpace) { continue; } - destination.append(' '); + sb.append(' '); previousIsWhiteSpace = true; } else { - destination.append(ch); + sb.append(ch); previousIsWhiteSpace = false; } } + + return sb.toString().trim(); + } + + private List trimAll(List values) { + return values.stream().map(this::trimAll).collect(Collectors.toList()); } /** diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProviderTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProviderTest.java index e0fc36d0d204..05b4fb6f5885 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProviderTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProviderTest.java @@ -23,9 +23,11 @@ import static com.github.tomakehurst.wiremock.client.WireMock.putRequestedFor; import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; - +import com.github.tomakehurst.wiremock.WireMockServer; import com.github.tomakehurst.wiremock.client.WireMock; +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; import com.github.tomakehurst.wiremock.junit.WireMockRule; +import com.github.tomakehurst.wiremock.matching.RequestPatternBuilder; import java.time.Duration; import java.time.Instant; import org.junit.AfterClass; @@ -209,4 +211,35 @@ public void resolveCredentials_endpointSettingHostNotExists_throws() { provider.resolveCredentials(); } + + @Test + public void resolveCredentials_customProfileFileAndName_usesCorrectEndpoint() { + WireMockServer mockMetadataEndpoint_2 = new WireMockServer(WireMockConfiguration.options().dynamicPort()); + mockMetadataEndpoint_2.start(); + try { + String stubToken = "some-token"; + mockMetadataEndpoint_2.stubFor(put(urlPathEqualTo(TOKEN_RESOURCE_PATH)).willReturn(aResponse().withBody(stubToken))); + mockMetadataEndpoint_2.stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).willReturn(aResponse().withBody("some-profile"))); + mockMetadataEndpoint_2.stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).willReturn(aResponse().withBody(STUB_CREDENTIALS))); + + String mockServer2Endpoint = "http://localhost:" + mockMetadataEndpoint_2.port(); + + InstanceProfileCredentialsProvider provider = InstanceProfileCredentialsProvider.builder() + .endpoint(mockServer2Endpoint) + .build(); + + provider.resolveCredentials(); + + String userAgentHeader = "User-Agent"; + String userAgent = SdkUserAgent.create().userAgent(); + mockMetadataEndpoint_2.verify(putRequestedFor(urlPathEqualTo(TOKEN_RESOURCE_PATH)).withHeader(userAgentHeader, equalTo(userAgent))); + mockMetadataEndpoint_2.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).withHeader(userAgentHeader, equalTo(userAgent))); + mockMetadataEndpoint_2.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).withHeader(userAgentHeader, equalTo(userAgent))); + + // all requests should have gone to the second server, and none to the other one + mockMetadataEndpoint.verify(0, RequestPatternBuilder.allRequests()); + } finally { + mockMetadataEndpoint_2.stop(); + } + } } diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/Ec2MetadataConfigProviderEndpointModeTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/Ec2MetadataConfigProviderEndpointModeTest.java new file mode 100644 index 000000000000..3db2cec09b95 --- /dev/null +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/Ec2MetadataConfigProviderEndpointModeTest.java @@ -0,0 +1,201 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.credentials.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.testutils.EnvironmentVariableHelper; + +@RunWith(Parameterized.class) +public class Ec2MetadataConfigProviderEndpointModeTest { + private static final String TEST_PROFILES_PATH_PREFIX = "/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/"; + private static final EnvironmentVariableHelper ENVIRONMENT_VARIABLE_HELPER = new EnvironmentVariableHelper(); + private static final String CUSTOM_PROFILE = "myprofile"; + + @Parameterized.Parameter + public TestCase testCase; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Parameterized.Parameters(name = "{0}") + public static Iterable testCases() { + return Arrays.asList( + new TestCase().expectedEndpointMode(null).expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV4), + + new TestCase().envEndpointMode("ipv4").expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV4), + new TestCase().envEndpointMode("IPv4").expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV4), + new TestCase().envEndpointMode("ipv6").expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV6), + new TestCase().envEndpointMode("IPv6").expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV6), + new TestCase().envEndpointMode("Ipv99").expectedException(IllegalArgumentException.class), + + new TestCase().systemPropertyEndpointMode("ipv4").expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV4), + new TestCase().systemPropertyEndpointMode("IPv4").expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV4), + new TestCase().systemPropertyEndpointMode("ipv6").expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV6), + new TestCase().systemPropertyEndpointMode("IPv6").expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV6), + new TestCase().systemPropertyEndpointMode("Ipv99").expectedException(IllegalArgumentException.class), + + new TestCase().sharedConfigFile(TEST_PROFILES_PATH_PREFIX + "endpoint_mode_ipv6") + .expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV6), + new TestCase().sharedConfigFile(TEST_PROFILES_PATH_PREFIX + "endpoint_mode_invalidValue") + .expectedException(IllegalArgumentException.class), + + // System property takes highest precedence + new TestCase().systemPropertyEndpointMode("ipv6").envEndpointMode("ipv4") + .expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV6), + new TestCase().systemPropertyEndpointMode("ipv6").sharedConfigFile(TEST_PROFILES_PATH_PREFIX + "endpoint_mode_ipv4") + .expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV6), + + // env var has higher precedence than shared config + new TestCase().envEndpointMode("ipv6").sharedConfigFile(TEST_PROFILES_PATH_PREFIX + "endpoint_mode_ipv4") + .expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV6), + + // Test custom profile supplier and custom profile name + new TestCase().sharedConfigFile(TEST_PROFILES_PATH_PREFIX + "endpoint_mode_ipv6_custom_profile") + .customProfileName(CUSTOM_PROFILE).expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV6), + new TestCase().customProfileFile(Ec2MetadataConfigProviderEndpointModeTest::customProfileFile) + .customProfileName(CUSTOM_PROFILE).expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode.IPV6) + ); + } + + @Before + public void setup() { + ENVIRONMENT_VARIABLE_HELPER.reset(); + System.clearProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE.property()); + + if (testCase.envEndpointMode != null) { + ENVIRONMENT_VARIABLE_HELPER.set(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE.environmentVariable(), + testCase.envEndpointMode); + } + + if (testCase.systemPropertyEndpointMode != null) { + System.setProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE.property(), + testCase.systemPropertyEndpointMode); + } + if (testCase.sharedConfigFile != null) { + ENVIRONMENT_VARIABLE_HELPER.set(ProfileFileSystemSetting.AWS_CONFIG_FILE.environmentVariable(), + getTestFilePath(testCase.sharedConfigFile)); + } + + if (testCase.expectedException != null) { + thrown.expect(testCase.expectedException); + } + } + + @After + public void teardown() { + ENVIRONMENT_VARIABLE_HELPER.reset(); + System.clearProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE.property()); + } + + @Test + public void resolvesCorrectEndpointMode() { + Ec2MetadataConfigProvider configProvider = Ec2MetadataConfigProvider.builder() + .profileFile(testCase.customProfileFile) + .profileName(testCase.customProfileName) + .build(); + + assertThat(configProvider.getEndpointMode()).isEqualTo(testCase.expectedEndpointMode); + } + + private static String getTestFilePath(String testFile) { + return Ec2MetadataConfigProviderEndpointModeTest.class.getResource(testFile).getFile(); + } + + private static ProfileFile customProfileFile() { + String content = "[profile myprofile]\n" + + "ec2_metadata_service_endpoint_mode=ipv6\n"; + + return ProfileFile.builder() + .type(ProfileFile.Type.CONFIGURATION) + .content(new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8))) + .build(); + } + + private static class TestCase { + private String envEndpointMode; + private String systemPropertyEndpointMode; + + private String sharedConfigFile; + + private Supplier customProfileFile; + + private String customProfileName; + + private Ec2MetadataConfigProvider.EndpointMode expectedEndpointMode; + private Class expectedException; + + public TestCase envEndpointMode(String envEndpointMode) { + this.envEndpointMode = envEndpointMode; + return this; + } + public TestCase systemPropertyEndpointMode(String systemPropertyEndpointMode) { + this.systemPropertyEndpointMode = systemPropertyEndpointMode; + return this; + } + + public TestCase sharedConfigFile(String sharedConfigFile) { + this.sharedConfigFile = sharedConfigFile; + return this; + } + + public TestCase customProfileFile(Supplier customProfileFile) { + this.customProfileFile = customProfileFile; + return this; + } + + private TestCase customProfileName(String customProfileName) { + this.customProfileName = customProfileName; + return this; + } + + public TestCase expectedEndpointMode(Ec2MetadataConfigProvider.EndpointMode expectedEndpointMode) { + this.expectedEndpointMode = expectedEndpointMode; + return this; + } + + public TestCase expectedException(Class expectedException) { + this.expectedException = expectedException; + return this; + } + + @Override + public String toString() { + return "TestCase{" + + "envEndpointMode='" + envEndpointMode + '\'' + + ", systemPropertyEndpointMode='" + systemPropertyEndpointMode + '\'' + + ", sharedConfigFile='" + sharedConfigFile + '\'' + + ", customProfileFile=" + customProfileFile + + ", customProfileName='" + customProfileName + '\'' + + ", expectedEndpointMode=" + expectedEndpointMode + + ", expectedException=" + expectedException + + '}'; + } + } +} diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/Ec2MetadataConfigProviderEndpointOverrideTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/Ec2MetadataConfigProviderEndpointOverrideTest.java new file mode 100644 index 000000000000..9ecea0fe4def --- /dev/null +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/Ec2MetadataConfigProviderEndpointOverrideTest.java @@ -0,0 +1,151 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.credentials.internal; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertThat; +import java.util.Arrays; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.testutils.EnvironmentVariableHelper; + +@RunWith(Parameterized.class) +public class Ec2MetadataConfigProviderEndpointOverrideTest { + private static final String TEST_PROFILES_PATH_PREFIX = "/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/"; + private static final EnvironmentVariableHelper ENVIRONMENT_VARIABLE_HELPER = new EnvironmentVariableHelper(); + + @Parameterized.Parameter + public TestCase testCase; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Parameterized.Parameters(name = "{0}") + public static Iterable testCases() { + return Arrays.asList( + new TestCase().expectedEndpointOverride(null), + + new TestCase().envEndpointOverride("my-custom-imds").expectedEndpointOverride("my-custom-imds"), + + new TestCase().systemPropertyEndpointOverride("my-custom-imds").expectedEndpointOverride("my-custom-imds"), + + new TestCase().sharedConfigFile(TEST_PROFILES_PATH_PREFIX + "endpoint_override") + .expectedEndpointOverride("my-custom-imds-profile"), + + // System property takes highest precedence + new TestCase().systemPropertyEndpointOverride("my-systemprop-endpoint").envEndpointOverride("my-env-endpoint") + .expectedEndpointOverride("my-systemprop-endpoint"), + new TestCase().systemPropertyEndpointOverride("my-systemprop-endpoint").sharedConfigFile(TEST_PROFILES_PATH_PREFIX + "endpoint_override") + .expectedEndpointOverride("my-systemprop-endpoint"), +// + // env var has higher precedence than shared config + new TestCase().envEndpointOverride("my-env-endpoint").sharedConfigFile(TEST_PROFILES_PATH_PREFIX + "endpoint_override") + .expectedEndpointOverride("my-env-endpoint") + + ); + } + + @Before + public void setup() { + if (testCase.envEndpointOverride != null) { + ENVIRONMENT_VARIABLE_HELPER.set(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.environmentVariable(), + testCase.envEndpointOverride); + } + + if (testCase.systemPropertyEndpointOverride != null) { + System.setProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.property(), + testCase.systemPropertyEndpointOverride); + } + + if (testCase.sharedConfigFile != null) { + ENVIRONMENT_VARIABLE_HELPER.set(ProfileFileSystemSetting.AWS_CONFIG_FILE.environmentVariable(), + getTestFilePath(testCase.sharedConfigFile)); + } + + if (testCase.expectedException != null) { + thrown.expect(testCase.expectedException); + } + } + + @After + public void teardown() { + ENVIRONMENT_VARIABLE_HELPER.reset(); + System.clearProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.property()); + } + + @Test + public void resolvesCorrectEndpointOverride() { + String endpointOverride = Ec2MetadataConfigProvider.builder().build().getEndpointOverride(); + + assertThat(endpointOverride, equalTo(testCase.expectedEndpointOverride)); + } + + private static String getTestFilePath(String testFile) { + return Ec2MetadataConfigProviderEndpointOverrideTest.class.getResource(testFile).getFile(); + } + + private static class TestCase { + private String envEndpointOverride; + private String systemPropertyEndpointOverride; + + private String sharedConfigFile; + + private String expectedEndpointOverride; + private Class expectedException; + + public TestCase envEndpointOverride(String envEndpointOverride) { + this.envEndpointOverride = envEndpointOverride; + return this; + } + public TestCase systemPropertyEndpointOverride(String systemPropertyEndpointOverride) { + this.systemPropertyEndpointOverride = systemPropertyEndpointOverride; + return this; + } + + public TestCase sharedConfigFile(String sharedConfigFile) { + this.sharedConfigFile = sharedConfigFile; + return this; + } + + public TestCase expectedEndpointOverride(String expectedEndpointOverride) { + this.expectedEndpointOverride = expectedEndpointOverride; + return this; + } + + public TestCase expectedException(Class expectedException) { + this.expectedException = expectedException; + return this; + } + + @Override + public String toString() { + return "TestCase{" + + "envEndpointOverride='" + envEndpointOverride + '\'' + + ", systemPropertyEndpointOverride='" + systemPropertyEndpointOverride + '\'' + + ", sharedConfigFile='" + sharedConfigFile + '\'' + + ", expectedEndpointOverride='" + expectedEndpointOverride + '\'' + + ", expectedException=" + expectedException + + '}'; + } + } +} diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/Ec2MetadataConfigProviderEndpointResolutionTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/Ec2MetadataConfigProviderEndpointResolutionTest.java new file mode 100644 index 000000000000..6b30ce63fb26 --- /dev/null +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/Ec2MetadataConfigProviderEndpointResolutionTest.java @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.credentials.internal; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertThat; +import java.util.Arrays; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import software.amazon.awssdk.core.SdkSystemSetting; + +@RunWith(Parameterized.class) +public class Ec2MetadataConfigProviderEndpointResolutionTest { + private static final String EC2_METADATA_SERVICE_URL_IPV4 = "http://169.254.169.254"; + private static final String EC2_METADATA_SERVICE_URL_IPV6 = "http://[fd00:ec2::254]"; + private static final String ENDPOINT_OVERRIDE = "http://my-custom-endpoint"; + + @Parameterized.Parameter + public TestCase testCase; + + @Parameterized.Parameters + public static Iterable testCases() { + return Arrays.asList( + new TestCase().expectedEndpoint(EC2_METADATA_SERVICE_URL_IPV4), + new TestCase().endpointMode("ipv6").expectedEndpoint(EC2_METADATA_SERVICE_URL_IPV6), + new TestCase().endpointMode("ipv4").expectedEndpoint(EC2_METADATA_SERVICE_URL_IPV4), + + new TestCase().endpointOverride(ENDPOINT_OVERRIDE).expectedEndpoint(ENDPOINT_OVERRIDE), + new TestCase().endpointMode("ipv4").endpointOverride(ENDPOINT_OVERRIDE).expectedEndpoint(ENDPOINT_OVERRIDE), + new TestCase().endpointMode("ipv6").endpointOverride(ENDPOINT_OVERRIDE).expectedEndpoint(ENDPOINT_OVERRIDE), + new TestCase().endpointMode("ipv99").endpointOverride(ENDPOINT_OVERRIDE).expectedEndpoint(ENDPOINT_OVERRIDE) + ); + } + + @Before + public void setup() { + System.clearProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.property()); + System.clearProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE.property()); + + if (testCase.endpointMode != null) { + System.setProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE.property(), testCase.endpointMode); + } + + if (testCase.endpointOverride != null) { + System.setProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.property(), testCase.endpointOverride); + } + } + + @Test + public void resolvesCorrectEndpoint() { + assertThat(Ec2MetadataConfigProvider.builder().build().getEndpoint(), equalTo(testCase.expectedEndpoint)); + } + + private static class TestCase { + private String endpointMode; + private String endpointOverride; + + private String expectedEndpoint; + + public TestCase endpointMode(String endpointMode) { + this.endpointMode = endpointMode; + return this; + } + + public TestCase endpointOverride(String endpointOverride) { + this.endpointOverride = endpointOverride; + return this; + } + + public TestCase expectedEndpoint(String expectedEndpoint) { + this.expectedEndpoint = expectedEndpoint; + return this; + } + } +} \ No newline at end of file diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/EndpointModeTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/EndpointModeTest.java new file mode 100644 index 000000000000..539666d8227d --- /dev/null +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/EndpointModeTest.java @@ -0,0 +1,41 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.credentials.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static software.amazon.awssdk.auth.credentials.internal.Ec2MetadataConfigProvider.EndpointMode.IPV4; +import static software.amazon.awssdk.auth.credentials.internal.Ec2MetadataConfigProvider.EndpointMode.IPV6; +import org.junit.Test; + +public class EndpointModeTest { + @Test + public void fromString_caseInsensitive() { + assertThat(Ec2MetadataConfigProvider.EndpointMode.fromValue("iPv6")).isEqualTo(IPV6); + assertThat(Ec2MetadataConfigProvider.EndpointMode.fromValue("iPv4")).isEqualTo(IPV4); + } + + @Test + public void fromString_unknownValue_throws() { + assertThatThrownBy(() -> Ec2MetadataConfigProvider.EndpointMode.fromValue("unknown")) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void fromString_nullValue_returnsNull() { + assertThat(Ec2MetadataConfigProvider.EndpointMode.fromValue(null)).isNull(); + } +} diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtilsTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtilsTest.java index eaf9a1f067e7..1f1a820a3c14 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtilsTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtilsTest.java @@ -66,7 +66,7 @@ public void roleProfileCanInheritFromAnotherFile() { ProfileFile configProfile = aggregateFileProfiles(configChild, credentialsSource); Consumer profileValidator = profileFile -> - Assertions.assertThatThrownBy(new ProfileCredentialsUtils(profileFile.profiles().get("child"), + Assertions.assertThatThrownBy(new ProfileCredentialsUtils(profileFile, profileFile.profiles().get("child"), profileFile::profile)::credentialsProvider) .hasMessageContaining("the 'sts' service module must be on the class path"); @@ -79,7 +79,7 @@ public void roleProfileWithMissingSourceThrowsException() { ProfileFile profileFile = configFile("[profile test]\n" + "source_profile=source\n" + "role_arn=arn:aws:iam::123456789012:role/testRole"); - Assertions.assertThatThrownBy(new ProfileCredentialsUtils(profileFile.profile("test") + Assertions.assertThatThrownBy(new ProfileCredentialsUtils(profileFile, profileFile.profile("test") .get(), profileFile::profile)::credentialsProvider) .hasMessageContaining("source profile has no credentials configured."); } @@ -92,7 +92,7 @@ public void roleProfileWithSourceThatHasNoCredentialsThrowsExceptionWhenLoadingC "role_arn=arn:aws:iam::123456789012:role/testRole"); assertThat(profiles.profile("test")).hasValueSatisfying(profile -> { - ProfileCredentialsUtils profileCredentialsUtils = new ProfileCredentialsUtils(profile, profiles::profile); + ProfileCredentialsUtils profileCredentialsUtils = new ProfileCredentialsUtils(profiles, profile, profiles::profile); Assertions.assertThatThrownBy(profileCredentialsUtils::credentialsProvider) .hasMessageContaining("source profile has no credentials configured"); }); @@ -113,7 +113,7 @@ public void profileFileWithStaticCredentialsLoadsCorrectly() { assertThat(profile.property(ProfileProperty.AWS_ACCESS_KEY_ID)).hasValue("defaultAccessKey"); assertThat(profile.toString()).contains("default"); assertThat(profile.property(ProfileProperty.REGION)).isNotPresent(); - assertThat(new ProfileCredentialsUtils(profile, profileFile::profile).credentialsProvider()).hasValueSatisfying(credentialsProvider -> { + assertThat(new ProfileCredentialsUtils(profileFile, profile, profileFile::profile).credentialsProvider()).hasValueSatisfying(credentialsProvider -> { assertThat(credentialsProvider.resolveCredentials()).satisfies(credentials -> { assertThat(credentials.accessKeyId()).isEqualTo("defaultAccessKey"); assertThat(credentials.secretAccessKey()).isEqualTo("defaultSecretAccessKey"); @@ -127,7 +127,7 @@ public void profileFileWithSessionCredentialsLoadsCorrectly() { ProfileFile profileFile = allTypesProfile(); assertThat(profileFile.profile("profile-with-session-token")).hasValueSatisfying(profile -> { assertThat(profile.property(ProfileProperty.REGION)).isNotPresent(); - assertThat(new ProfileCredentialsUtils(profile, profileFile::profile).credentialsProvider()).hasValueSatisfying(credentialsProvider -> { + assertThat(new ProfileCredentialsUtils(profileFile, profile, profileFile::profile).credentialsProvider()).hasValueSatisfying(credentialsProvider -> { assertThat(credentialsProvider.resolveCredentials()).satisfies(credentials -> { assertThat(credentials).isInstanceOf(AwsSessionCredentials.class); assertThat(credentials.accessKeyId()).isEqualTo("defaultAccessKey"); @@ -143,7 +143,7 @@ public void profileFileWithProcessCredentialsLoadsCorrectly() { ProfileFile profileFile = allTypesProfile(); assertThat(profileFile.profile("profile-credential-process")).hasValueSatisfying(profile -> { assertThat(profile.property(ProfileProperty.REGION)).isNotPresent(); - assertThat(new ProfileCredentialsUtils(profile, profileFile::profile).credentialsProvider()).hasValueSatisfying(credentialsProvider -> { + assertThat(new ProfileCredentialsUtils(profileFile, profile, profileFile::profile).credentialsProvider()).hasValueSatisfying(credentialsProvider -> { assertThat(credentialsProvider.resolveCredentials()).satisfies(credentials -> { assertThat(credentials).isInstanceOf(AwsBasicCredentials.class); assertThat(credentials.accessKeyId()).isEqualTo("defaultAccessKey"); @@ -159,7 +159,7 @@ public void profileFileWithAssumeRoleThrowsExceptionWhenRetrievingCredentialsPro assertThat(profileFile.profile("profile-with-assume-role")).hasValueSatisfying(profile -> { assertThat(profile.property(ProfileProperty.REGION)).isNotPresent(); - ProfileCredentialsUtils profileCredentialsUtils = new ProfileCredentialsUtils(profile, profileFile::profile); + ProfileCredentialsUtils profileCredentialsUtils = new ProfileCredentialsUtils(profileFile, profile, profileFile::profile); Assertions.assertThatThrownBy(profileCredentialsUtils::credentialsProvider).isInstanceOf(IllegalStateException.class); }); } @@ -176,7 +176,7 @@ public void profileFileWithCredentialSourceThrowsExceptionWhenRetrievingCredenti assertThat(profileFile.profile(profileName)).hasValueSatisfying(profile -> { assertThat(profile.property(ProfileProperty.REGION)).isNotPresent(); - ProfileCredentialsUtils profileCredentialsUtils = new ProfileCredentialsUtils(profile, profileFile::profile); + ProfileCredentialsUtils profileCredentialsUtils = new ProfileCredentialsUtils(profileFile, profile, profileFile::profile); Assertions.assertThatThrownBy(profileCredentialsUtils::credentialsProvider).isInstanceOf(IllegalStateException.class); }); }); @@ -199,7 +199,7 @@ public void profileFileWithCircularDependencyThrowsExceptionWhenResolvingCredent "[profile test3]\n" + "source_profile=test2\n" + "role_arn=arn:aws:iam::123456789012:role/testRole3"); - Assertions.assertThatThrownBy(() -> new ProfileCredentialsUtils(configFile.profile("test").get(), configFile::profile) + Assertions.assertThatThrownBy(() -> new ProfileCredentialsUtils(configFile, configFile.profile("test").get(), configFile::profile) .credentialsProvider()) .isInstanceOf(IllegalStateException.class) .hasMessageContaining("Invalid profile file: Circular relationship detected with profiles"); @@ -215,7 +215,7 @@ public void profileWithBothCredentialSourceAndSourceProfileThrowsException() { "[profile source]\n" + "aws_access_key_id=defaultAccessKey\n" + "aws_secret_access_key=defaultSecretAccessKey"); - Assertions.assertThatThrownBy(() -> new ProfileCredentialsUtils(configFile.profile("test").get(), configFile::profile) + Assertions.assertThatThrownBy(() -> new ProfileCredentialsUtils(configFile, configFile.profile("test").get(), configFile::profile) .credentialsProvider()) .isInstanceOf(IllegalStateException.class) .hasMessageContaining("Invalid profile file: profile has both source_profile and credential_source."); @@ -226,7 +226,7 @@ public void profileWithInvalidCredentialSourceThrowsException() { ProfileFile configFile = configFile("[profile test]\n" + "credential_source=foobar\n" + "role_arn=arn:aws:iam::123456789012:role/testRole3"); - Assertions.assertThatThrownBy(() -> new ProfileCredentialsUtils(configFile.profile("test").get(), configFile::profile) + Assertions.assertThatThrownBy(() -> new ProfileCredentialsUtils(configFile, configFile.profile("test").get(), configFile::profile) .credentialsProvider()) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("foobar is not a valid credential_source"); diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/Aws4SignerTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/Aws4SignerTest.java index ef2ee593a995..185ae8e55bae 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/Aws4SignerTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/Aws4SignerTest.java @@ -156,6 +156,48 @@ public void xAmznTraceId_NotSigned() throws Exception { "Signature=581d0042389009a28d461124138f1fe8eeb8daed87611d2a2b47fd3d68d81d73"); } + /** + * Multi-value headers should be comma separated. + */ + @Test + public void canonicalizedHeaderString_multiValueHeaders_areCommaSeparated() throws Exception { + AwsBasicCredentials credentials = AwsBasicCredentials.create("akid", "skid"); + SdkHttpFullRequest.Builder request = generateBasicRequest(); + request.appendHeader("foo","bar"); + request.appendHeader("foo","baz"); + + SdkHttpFullRequest actual = SignerTestUtils.signRequest(signer, request.build(), credentials, "demo", signingOverrideClock, "us-east-1"); + + // We cannot easily test the canonical header string value, but the below signature asserts that it contains: + // foo:bar,baz + assertThat(actual.firstMatchingHeader("Authorization")) + .hasValue("AWS4-HMAC-SHA256 Credential=akid/19810216/us-east-1/demo/aws4_request, " + + "SignedHeaders=foo;host;x-amz-archive-description;x-amz-date, " + + "Signature=1253bc1751048ea299e688cbe07a2224292e5cc606a079cb40459ad987793c19"); + } + + /** + * Canonical headers should remove excess white space before and after values, and convert sequential spaces to a single + * space. + */ + @Test + public void canonicalizedHeaderString_valuesWithExtraWhitespace_areTrimmed() throws Exception { + AwsBasicCredentials credentials = AwsBasicCredentials.create("akid", "skid"); + SdkHttpFullRequest.Builder request = generateBasicRequest(); + request.putHeader("My-header1"," a b c "); + request.putHeader("My-Header2"," \"a b c\" "); + + SdkHttpFullRequest actual = SignerTestUtils.signRequest(signer, request.build(), credentials, "demo", signingOverrideClock, "us-east-1"); + + // We cannot easily test the canonical header string value, but the below signature asserts that it contains: + // my-header1:a b c + // my-header2:"a b c" + assertThat(actual.firstMatchingHeader("Authorization")) + .hasValue("AWS4-HMAC-SHA256 Credential=akid/19810216/us-east-1/demo/aws4_request, " + + "SignedHeaders=host;my-header1;my-header2;x-amz-archive-description;x-amz-date, " + + "Signature=6d3520e3397e7aba593d8ebd8361fc4405e90aed71bc4c7a09dcacb6f72460b9"); + } + private SdkHttpFullRequest.Builder generateBasicRequest() { return SdkHttpFullRequest.builder() .contentStreamProvider(() -> new ByteArrayInputStream("{\"TableName\": \"foo\"}".getBytes())) diff --git a/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_invalidValue b/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_invalidValue new file mode 100644 index 000000000000..6861871e88eb --- /dev/null +++ b/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_invalidValue @@ -0,0 +1,2 @@ +[default] +ec2_metadata_service_endpoint_mode=ipv99 \ No newline at end of file diff --git a/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_ipv4 b/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_ipv4 new file mode 100644 index 000000000000..26d61cdc7aa2 --- /dev/null +++ b/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_ipv4 @@ -0,0 +1,2 @@ +[default] +ec2_metadata_service_endpoint_mode=ipv4 \ No newline at end of file diff --git a/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_ipv6 b/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_ipv6 new file mode 100644 index 000000000000..9a4b2df665a2 --- /dev/null +++ b/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_ipv6 @@ -0,0 +1,5 @@ +[default] +ec2_metadata_service_endpoint_mode=ipv6 + +[profile hellocustomprofile] +ec2_metadata_service_endpoint_mode=ipv6 diff --git a/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_ipv6_custom_profile b/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_ipv6_custom_profile new file mode 100644 index 000000000000..0a592bfe55fc --- /dev/null +++ b/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_ipv6_custom_profile @@ -0,0 +1,2 @@ +[profile myprofile] +ec2_metadata_service_endpoint_mode=ipv6 diff --git a/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_no_value b/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_no_value new file mode 100644 index 000000000000..3200812771bb --- /dev/null +++ b/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_mode_no_value @@ -0,0 +1,2 @@ +[default] +ec2_metadata_service_endpoint_mode= \ No newline at end of file diff --git a/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_override b/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_override new file mode 100644 index 000000000000..027d1ac2db6b --- /dev/null +++ b/core/auth/src/test/resources/software/amazon/awssdk/auth/credentials/internal/ec2metadataconfigprovider/endpoint_override @@ -0,0 +1,2 @@ +[default] +ec2_metadata_service_endpoint=my-custom-imds-profile \ No newline at end of file diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index a34bd625516b..334be7b0de66 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT aws-core diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java index 72c1be77f3ad..4d1120a74392 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java @@ -235,6 +235,7 @@ private RetryPolicy resolveAwsRetryPolicy(SdkClientConfiguration config) { RetryMode retryMode = RetryMode.resolver() .profileFile(() -> config.option(SdkClientOption.PROFILE_FILE)) .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) .resolve(); return AwsRetryPolicy.forRetryMode(retryMode); } diff --git a/core/protocols/aws-ion-protocol/pom.xml b/core/json-utils/pom.xml similarity index 60% rename from core/protocols/aws-ion-protocol/pom.xml rename to core/json-utils/pom.xml index 73f19b2eb5d3..157d577e93d1 100644 --- a/core/protocols/aws-ion-protocol/pom.xml +++ b/core/json-utils/pom.xml @@ -18,32 +18,32 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - protocols + core software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 - aws-ion-protocol - AWS Java SDK :: Core :: Protocols :: AWS Ion Protocol - The AWS SDK for Java - module holds the classes for AWS Ion protocol - + json-utils + AWS Java SDK :: Core :: Protocols :: Json Utils https://aws.amazon.com/sdkforjava + + + + software.amazon.awssdk + bom-internal + ${project.version} + pom + import + + + + software.amazon.awssdk - aws-core - ${awsjavasdk.version} - - - software.amazon.awssdk - aws-json-protocol - ${awsjavasdk.version} - - - software.amazon.awssdk - sdk-core + utils ${awsjavasdk.version} @@ -53,38 +53,14 @@ software.amazon.awssdk - http-client-spi + third-party-jackson-core ${awsjavasdk.version} - - software.amazon.awssdk - utils - ${awsjavasdk.version} - - - software.amazon.ion - ion-java - - - com.fasterxml.jackson.core - jackson-core - - - software.amazon.awssdk - protocol-core - ${awsjavasdk.version} - test - junit junit test - - org.hamcrest - hamcrest-all - test - org.assertj assertj-core @@ -105,7 +81,7 @@ - software.amazon.awssdk.protocols.ion + software.amazon.awssdk.protocols.jsoncore diff --git a/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/JsonNode.java b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/JsonNode.java new file mode 100644 index 000000000000..ef597c1b742a --- /dev/null +++ b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/JsonNode.java @@ -0,0 +1,190 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.jsoncore; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.protocols.jsoncore.internal.ObjectJsonNode; +import software.amazon.awssdk.thirdparty.jackson.core.JsonFactory; + +/** + * A node in a JSON document. Either a number, string, boolean, array, object or null. Also can be an embedded object, + * which is a non-standard type used in JSON extensions, like CBOR. + * + *

Created from a JSON document via {@link #parser()} or {@link #parserBuilder()}. + * + *

The type of node can be determined using "is" methods like {@link #isNumber()} and {@link #isString()}. + * Once the type is determined, the value of the node can be extracted via the "as" methods, like {@link #asNumber()} + * and {@link #asString()}. + */ +@SdkProtectedApi +public interface JsonNode { + /** + * Create a {@link JsonNodeParser} for generating a {@link JsonNode} from a JSON document. + */ + static JsonNodeParser parser() { + return JsonNodeParser.create(); + } + + /** + * Create a {@link JsonNodeParser.Builder} for generating a {@link JsonNode} from a JSON document. + */ + static JsonNodeParser.Builder parserBuilder() { + return JsonNodeParser.builder(); + } + + /** + * Return an empty object node. + */ + static JsonNode emptyObjectNode() { + return new ObjectJsonNode(Collections.emptyMap()); + } + + /** + * Returns true if this node represents a JSON number: https://datatracker.ietf.org/doc/html/rfc8259#section-6 + * + * @see #asNumber() + */ + default boolean isNumber() { + return false; + } + + /** + * Returns true if this node represents a JSON string: https://datatracker.ietf.org/doc/html/rfc8259#section-7 + * + * @see #asString() + */ + default boolean isString() { + return false; + } + + /** + * Returns true if this node represents a JSON boolean: https://datatracker.ietf.org/doc/html/rfc8259#section-3 + * + * @see #asBoolean() + */ + default boolean isBoolean() { + return false; + } + + /** + * Returns true if this node represents a JSON null: https://datatracker.ietf.org/doc/html/rfc8259#section-3 + */ + default boolean isNull() { + return false; + } + + /** + * Returns true if this node represents a JSON array: https://datatracker.ietf.org/doc/html/rfc8259#section-5 + * + * @see #asArray() + */ + default boolean isArray() { + return false; + } + + /** + * Returns true if this node represents a JSON object: https://datatracker.ietf.org/doc/html/rfc8259#section-4 + * + * @see #asObject() + */ + default boolean isObject() { + return false; + } + + /** + * Returns true if this node represents a JSON "embedded object". This non-standard type is associated with JSON extensions, + * like CBOR or ION. It allows additional data types to be embedded in a JSON document, like a timestamp or a raw byte array. + * + *

Users who are only concerned with handling JSON can ignore this field. It will only be present when using a custom + * {@link JsonFactory} via {@link JsonNodeParser.Builder#jsonFactory(JsonFactory)}. + * + * @see #asEmbeddedObject() + */ + default boolean isEmbeddedObject() { + return false; + } + + /** + * When {@link #isNumber()} is true, this returns the number associated with this node. This will throw an exception if + * {@link #isNumber()} is false. + * + * @see #text() + */ + String asNumber(); + + /** + * When {@link #isString()}, is true, this returns the string associated with this node. This will throw an exception if + * {@link #isString()} ()} is false. + */ + String asString(); + + /** + * When {@link #isBoolean()} is true, this returns the boolean associated with this node. This will throw an exception if + * {@link #isBoolean()} is false. + */ + boolean asBoolean(); + + /** + * When {@link #isArray()} is true, this returns the array associated with this node. This will throw an exception if + * {@link #isArray()} is false. + */ + List asArray(); + + /** + * When {@link #isObject()} is true, this returns the object associated with this node. This will throw an exception if + * {@link #isObject()} is false. + */ + Map asObject(); + + /** + * When {@link #isEmbeddedObject()} is true, this returns the embedded object associated with this node. This will throw + * an exception if {@link #isEmbeddedObject()} is false. + * + * @see #isEmbeddedObject() + */ + Object asEmbeddedObject(); + + /** + * Visit this node using the provided visitor. + */ + T visit(JsonNodeVisitor visitor); + + /** + * When {@link #isString()}, {@link #isBoolean()}, or {@link #isNumber()} is true, this will return the value of this node + * as a textual string. If this is any other type, this will return null. + */ + String text(); + + /** + * When {@link #isObject()} is true, this will return the result of {@code Optional.ofNullable(asObject().get(child))}. If + * this is any other type, this will return {@link Optional#empty()}. + */ + default Optional field(String child) { + return Optional.empty(); + } + + /** + * When {@link #isArray()} is true, this will return the result of {@code asArray().get(child)} if child is within bounds. If + * this is any other type or the child is out of bounds, this will return {@link Optional#empty()}. + */ + default Optional index(int child) { + return Optional.empty(); + } +} diff --git a/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/JsonNodeParser.java b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/JsonNodeParser.java new file mode 100644 index 000000000000..f87100eab1f5 --- /dev/null +++ b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/JsonNodeParser.java @@ -0,0 +1,231 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.jsoncore; + +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.protocols.jsoncore.internal.ArrayJsonNode; +import software.amazon.awssdk.protocols.jsoncore.internal.BooleanJsonNode; +import software.amazon.awssdk.protocols.jsoncore.internal.EmbeddedObjectJsonNode; +import software.amazon.awssdk.protocols.jsoncore.internal.NullJsonNode; +import software.amazon.awssdk.protocols.jsoncore.internal.NumberJsonNode; +import software.amazon.awssdk.protocols.jsoncore.internal.ObjectJsonNode; +import software.amazon.awssdk.protocols.jsoncore.internal.StringJsonNode; +import software.amazon.awssdk.thirdparty.jackson.core.JsonFactory; +import software.amazon.awssdk.thirdparty.jackson.core.JsonParseException; +import software.amazon.awssdk.thirdparty.jackson.core.JsonParser; +import software.amazon.awssdk.thirdparty.jackson.core.JsonToken; +import software.amazon.awssdk.thirdparty.jackson.core.json.JsonReadFeature; + +/** + * Parses an JSON document into a simple DOM-like structure, {@link JsonNode}. + * + *

This is created using {@link #create()} or {@link #builder()}. + */ +@SdkProtectedApi +public final class JsonNodeParser { + /** + * The default {@link JsonFactory} used for {@link #create()} or if a factory is not configured via + * {@link Builder#jsonFactory(JsonFactory)}. + */ + public static final JsonFactory DEFAULT_JSON_FACTORY = + JsonFactory.builder() + .configure(JsonReadFeature.ALLOW_JAVA_COMMENTS, true) + .build(); + + private final boolean removeErrorLocations; + private final JsonFactory jsonFactory; + + private JsonNodeParser(Builder builder) { + this.removeErrorLocations = builder.removeErrorLocations; + this.jsonFactory = builder.jsonFactory; + } + + /** + * Create a parser using the default configuration. + */ + public static JsonNodeParser create() { + return builder().build(); + } + + /** + * Create a parser using custom configuration. + */ + public static JsonNodeParser.Builder builder() { + return new Builder(); + } + + /** + * Parse the provided {@link InputStream} into a {@link JsonNode}. + */ + public JsonNode parse(InputStream content) { + return invokeSafely(() -> { + try (JsonParser parser = jsonFactory.createParser(content) + .configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false)) { + return parse(parser); + } + }); + } + + /** + * Parse the provided {@code byte[]} into a {@link JsonNode}. + */ + public JsonNode parse(byte[] content) { + return invokeSafely(() -> { + try (JsonParser parser = jsonFactory.createParser(content) + .configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false)) { + return parse(parser); + } + }); + } + + /** + * Parse the provided {@link String} into a {@link JsonNode}. + */ + public JsonNode parse(String content) { + return invokeSafely(() -> { + try (JsonParser parser = jsonFactory.createParser(content) + .configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false)) { + return parse(parser); + } + }); + } + + private JsonNode parse(JsonParser parser) throws IOException { + try { + return parseToken(parser, parser.nextToken()); + } catch (Exception e) { + removeErrorLocationsIfRequired(e); + throw e; + } + } + + private void removeErrorLocationsIfRequired(Throwable exception) { + if (removeErrorLocations) { + removeErrorLocations(exception); + } + } + + private void removeErrorLocations(Throwable exception) { + if (exception == null) { + return; + } + + if (exception instanceof JsonParseException) { + ((JsonParseException) exception).clearLocation(); + } + + removeErrorLocations(exception.getCause()); + } + + private JsonNode parseToken(JsonParser parser, JsonToken token) throws IOException { + if (token == null) { + return null; + } + switch (token) { + case VALUE_STRING: + return new StringJsonNode(parser.getText()); + case VALUE_FALSE: + return new BooleanJsonNode(false); + case VALUE_TRUE: + return new BooleanJsonNode(true); + case VALUE_NULL: + return NullJsonNode.instance(); + case VALUE_NUMBER_FLOAT: + case VALUE_NUMBER_INT: + return new NumberJsonNode(parser.getText()); + case START_OBJECT: + return parseObject(parser); + case START_ARRAY: + return parseArray(parser); + case VALUE_EMBEDDED_OBJECT: + return new EmbeddedObjectJsonNode(parser.getEmbeddedObject()); + default: + throw new IllegalArgumentException("Unexpected JSON token - " + token); + } + } + + private JsonNode parseObject(JsonParser parser) throws IOException { + JsonToken currentToken = parser.nextToken(); + Map object = new LinkedHashMap<>(); + while (currentToken != JsonToken.END_OBJECT) { + String fieldName = parser.getText(); + object.put(fieldName, parseToken(parser, parser.nextToken())); + currentToken = parser.nextToken(); + } + return new ObjectJsonNode(object); + } + + private JsonNode parseArray(JsonParser parser) throws IOException { + JsonToken currentToken = parser.nextToken(); + List array = new ArrayList<>(); + while (currentToken != JsonToken.END_ARRAY) { + array.add(parseToken(parser, currentToken)); + currentToken = parser.nextToken(); + } + return new ArrayJsonNode(array); + } + + /** + * A builder for configuring and creating {@link JsonNodeParser}. Created via {@link #builder()}. + */ + public static final class Builder { + private JsonFactory jsonFactory = DEFAULT_JSON_FACTORY; + private boolean removeErrorLocations = false; + + private Builder() { + } + + /** + * Whether error locations should be removed if parsing fails. This prevents the content of the JSON from appearing in + * error messages. This is useful when the content of the JSON may be sensitive and not want to be logged. + * + *

By default, this is false. + */ + public Builder removeErrorLocations(boolean removeErrorLocations) { + this.removeErrorLocations = removeErrorLocations; + return this; + } + + /** + * The {@link JsonFactory} implementation to be used when parsing the input. This allows JSON extensions like CBOR or + * Ion to be supported. + * + *

It's highly recommended us use a shared {@code JsonFactory} where possible, so they should be stored statically: + * http://wiki.fasterxml.com/JacksonBestPracticesPerformance + * + *

By default, this is {@link #DEFAULT_JSON_FACTORY}. + */ + public Builder jsonFactory(JsonFactory jsonFactory) { + this.jsonFactory = jsonFactory; + return this; + } + + /** + * Build a {@link JsonNodeParser} based on the current configuration of this builder. + */ + public JsonNodeParser build() { + return new JsonNodeParser(this); + } + } +} diff --git a/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/JsonNodeVisitor.java b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/JsonNodeVisitor.java new file mode 100644 index 000000000000..0b6efd8bb1f1 --- /dev/null +++ b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/JsonNodeVisitor.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.jsoncore; + +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkProtectedApi; + +/** + * Converter from a {@link JsonNode} to a new type. This is usually invoked via {@link JsonNode#visit(JsonNodeVisitor)}. + */ +@SdkProtectedApi +public interface JsonNodeVisitor { + /** + * Invoked if {@link JsonNode#visit(JsonNodeVisitor)} is invoked on a null JSON node. + */ + T visitNull(); + + /** + * Invoked if {@link JsonNode#visit(JsonNodeVisitor)} is invoked on a boolean JSON node. + */ + T visitBoolean(boolean bool); + + /** + * Invoked if {@link JsonNode#visit(JsonNodeVisitor)} is invoked on a number JSON node. + */ + T visitNumber(String number); + + /** + * Invoked if {@link JsonNode#visit(JsonNodeVisitor)} is invoked on a string JSON node. + */ + T visitString(String string); + + /** + * Invoked if {@link JsonNode#visit(JsonNodeVisitor)} is invoked on an array JSON node. + */ + T visitArray(List array); + + /** + * Invoked if {@link JsonNode#visit(JsonNodeVisitor)} is invoked on an object JSON node. + */ + T visitObject(Map object); + + /** + * Invoked if {@link JsonNode#visit(JsonNodeVisitor)} is invoked on an embedded object JSON node. + */ + T visitEmbeddedObject(Object embeddedObject); +} diff --git a/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/ArrayJsonNode.java b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/ArrayJsonNode.java new file mode 100644 index 000000000000..ccf43ad940ef --- /dev/null +++ b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/ArrayJsonNode.java @@ -0,0 +1,112 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.jsoncore.internal; + +import java.util.List; +import java.util.Map; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeVisitor; + +/** + * An array {@link JsonNode}. + */ +@SdkInternalApi +public final class ArrayJsonNode implements JsonNode { + private final List value; + + public ArrayJsonNode(List value) { + this.value = value; + } + + @Override + public boolean isArray() { + return true; + } + + @Override + public String asNumber() { + throw new UnsupportedOperationException("A JSON array cannot be converted to a number."); + } + + @Override + public String asString() { + throw new UnsupportedOperationException("A JSON array cannot be converted to a string."); + } + + @Override + public boolean asBoolean() { + throw new UnsupportedOperationException("A JSON array cannot be converted to a boolean."); + } + + @Override + public List asArray() { + return value; + } + + @Override + public Map asObject() { + throw new UnsupportedOperationException("A JSON array cannot be converted to an object."); + } + + @Override + public Object asEmbeddedObject() { + throw new UnsupportedOperationException("A JSON array cannot be converted to an embedded object."); + } + + @Override + public T visit(JsonNodeVisitor visitor) { + return visitor.visitArray(asArray()); + } + + @Override + public String text() { + return null; + } + + @Override + public Optional index(int child) { + if (child < 0 || child >= value.size()) { + return Optional.empty(); + } + return Optional.of(value.get(child)); + } + + @Override + public String toString() { + return value.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ArrayJsonNode that = (ArrayJsonNode) o; + + return value.equals(that.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} diff --git a/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/BooleanJsonNode.java b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/BooleanJsonNode.java new file mode 100644 index 000000000000..a77f469ab937 --- /dev/null +++ b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/BooleanJsonNode.java @@ -0,0 +1,103 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.jsoncore.internal; + +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeVisitor; + +/** + * A boolean {@link JsonNode}. + */ +@SdkInternalApi +public final class BooleanJsonNode implements JsonNode { + private final boolean value; + + public BooleanJsonNode(boolean value) { + this.value = value; + } + + @Override + public boolean isBoolean() { + return true; + } + + @Override + public String asNumber() { + throw new UnsupportedOperationException("A JSON boolean cannot be converted to a number."); + } + + @Override + public String asString() { + throw new UnsupportedOperationException("A JSON boolean cannot be converted to a string."); + } + + @Override + public boolean asBoolean() { + return value; + } + + @Override + public List asArray() { + throw new UnsupportedOperationException("A JSON boolean cannot be converted to an array."); + } + + @Override + public Map asObject() { + throw new UnsupportedOperationException("A JSON boolean cannot be converted to an object."); + } + + @Override + public Object asEmbeddedObject() { + throw new UnsupportedOperationException("A JSON boolean cannot be converted to an embedded object."); + } + + @Override + public T visit(JsonNodeVisitor visitor) { + return visitor.visitBoolean(asBoolean()); + } + + @Override + public String text() { + return Boolean.toString(value); + } + + @Override + public String toString() { + return Boolean.toString(value); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BooleanJsonNode that = (BooleanJsonNode) o; + + return value == that.value; + } + + @Override + public int hashCode() { + return (value ? 1 : 0); + } +} diff --git a/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/EmbeddedObjectJsonNode.java b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/EmbeddedObjectJsonNode.java new file mode 100644 index 000000000000..197b68e688d4 --- /dev/null +++ b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/EmbeddedObjectJsonNode.java @@ -0,0 +1,103 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.jsoncore.internal; + +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeVisitor; + +/** + * An embedded object {@link JsonNode}. + */ +@SdkInternalApi +public final class EmbeddedObjectJsonNode implements JsonNode { + private final Object embeddedObject; + + public EmbeddedObjectJsonNode(Object embeddedObject) { + this.embeddedObject = embeddedObject; + } + + @Override + public boolean isEmbeddedObject() { + return true; + } + + @Override + public String asNumber() { + throw new UnsupportedOperationException("A JSON embedded object cannot be converted to a number."); + } + + @Override + public String asString() { + throw new UnsupportedOperationException("A JSON embedded object cannot be converted to a string."); + } + + @Override + public boolean asBoolean() { + throw new UnsupportedOperationException("A JSON embedded object cannot be converted to a boolean."); + } + + @Override + public List asArray() { + throw new UnsupportedOperationException("A JSON embedded object cannot be converted to an array."); + } + + @Override + public Map asObject() { + throw new UnsupportedOperationException("A JSON embedded object cannot be converted to an object."); + } + + @Override + public Object asEmbeddedObject() { + return embeddedObject; + } + + @Override + public T visit(JsonNodeVisitor visitor) { + return visitor.visitEmbeddedObject(asEmbeddedObject()); + } + + @Override + public String text() { + return null; + } + + @Override + public String toString() { + return "<>"; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + EmbeddedObjectJsonNode that = (EmbeddedObjectJsonNode) o; + + return embeddedObject.equals(that.embeddedObject); + } + + @Override + public int hashCode() { + return embeddedObject.hashCode(); + } +} diff --git a/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/NullJsonNode.java b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/NullJsonNode.java new file mode 100644 index 000000000000..36b68ae8ad61 --- /dev/null +++ b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/NullJsonNode.java @@ -0,0 +1,97 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.jsoncore.internal; + +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeVisitor; + +/** + * A null {@link JsonNode}. + */ +@SdkInternalApi +public final class NullJsonNode implements JsonNode { + private static final NullJsonNode INSTANCE = new NullJsonNode(); + + private NullJsonNode() { + } + + public static NullJsonNode instance() { + return INSTANCE; + } + + @Override + public boolean isNull() { + return true; + } + + @Override + public String asNumber() { + throw new UnsupportedOperationException("A JSON null cannot be converted to a number."); + } + + @Override + public String asString() { + throw new UnsupportedOperationException("A JSON null cannot be converted to a string."); + } + + @Override + public boolean asBoolean() { + throw new UnsupportedOperationException("A JSON null cannot be converted to a boolean."); + } + + @Override + public List asArray() { + throw new UnsupportedOperationException("A JSON null cannot be converted to an array."); + } + + @Override + public Map asObject() { + throw new UnsupportedOperationException("A JSON null cannot be converted to an object."); + } + + @Override + public Object asEmbeddedObject() { + throw new UnsupportedOperationException("A JSON null cannot be converted to an embedded object."); + } + + @Override + public T visit(JsonNodeVisitor visitor) { + return visitor.visitNull(); + } + + @Override + public String text() { + return null; + } + + @Override + public String toString() { + return "null"; + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public int hashCode() { + return 0; + } +} diff --git a/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/NumberJsonNode.java b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/NumberJsonNode.java new file mode 100644 index 000000000000..b2042095b333 --- /dev/null +++ b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/NumberJsonNode.java @@ -0,0 +1,103 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.jsoncore.internal; + +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeVisitor; + +/** + * A numeric {@link JsonNode}. + */ +@SdkInternalApi +public final class NumberJsonNode implements JsonNode { + private final String value; + + public NumberJsonNode(String value) { + this.value = value; + } + + @Override + public boolean isNumber() { + return true; + } + + @Override + public String asNumber() { + return value; + } + + @Override + public String asString() { + throw new UnsupportedOperationException("A JSON number cannot be converted to a string."); + } + + @Override + public boolean asBoolean() { + throw new UnsupportedOperationException("A JSON number cannot be converted to a boolean."); + } + + @Override + public List asArray() { + throw new UnsupportedOperationException("A JSON number cannot be converted to an array."); + } + + @Override + public Map asObject() { + throw new UnsupportedOperationException("A JSON number cannot be converted to an object."); + } + + @Override + public Object asEmbeddedObject() { + throw new UnsupportedOperationException("A JSON number cannot be converted to an embedded object."); + } + + @Override + public T visit(JsonNodeVisitor visitor) { + return visitor.visitNumber(asNumber()); + } + + @Override + public String text() { + return value; + } + + @Override + public String toString() { + return value; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NumberJsonNode that = (NumberJsonNode) o; + + return value.equals(that.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} diff --git a/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/ObjectJsonNode.java b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/ObjectJsonNode.java new file mode 100644 index 000000000000..2a388421f4e2 --- /dev/null +++ b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/ObjectJsonNode.java @@ -0,0 +1,118 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.jsoncore.internal; + +import java.util.List; +import java.util.Map; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeVisitor; + +/** + * An object {@link JsonNode}. + */ +@SdkInternalApi +public final class ObjectJsonNode implements JsonNode { + private final Map value; + + public ObjectJsonNode(Map value) { + this.value = value; + } + + @Override + public boolean isObject() { + return true; + } + + @Override + public String asNumber() { + throw new UnsupportedOperationException("A JSON object cannot be converted to a number."); + } + + @Override + public String asString() { + throw new UnsupportedOperationException("A JSON object cannot be converted to a string."); + } + + @Override + public boolean asBoolean() { + throw new UnsupportedOperationException("A JSON object cannot be converted to a boolean."); + } + + @Override + public List asArray() { + throw new UnsupportedOperationException("A JSON object cannot be converted to an array."); + } + + @Override + public Map asObject() { + return value; + } + + @Override + public T visit(JsonNodeVisitor visitor) { + return visitor.visitObject(asObject()); + } + + @Override + public Object asEmbeddedObject() { + throw new UnsupportedOperationException("A JSON object cannot be converted to an embedded object."); + } + + @Override + public String text() { + return null; + } + + @Override + public Optional field(String child) { + return Optional.ofNullable(value.get(child)); + } + + @Override + public String toString() { + if (value.isEmpty()) { + return "{}"; + } + + StringBuilder output = new StringBuilder(); + output.append("{"); + value.forEach((k, v) -> output.append("\"").append(k).append("\": ") + .append(v.toString()).append(",")); + output.setCharAt(output.length() - 1, '}'); + return output.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ObjectJsonNode that = (ObjectJsonNode) o; + + return value.equals(that.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} diff --git a/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/StringJsonNode.java b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/StringJsonNode.java new file mode 100644 index 000000000000..2d74673c6c77 --- /dev/null +++ b/core/json-utils/src/main/java/software/amazon/awssdk/protocols/jsoncore/internal/StringJsonNode.java @@ -0,0 +1,109 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.jsoncore.internal; + +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeVisitor; +import software.amazon.awssdk.utils.Validate; + +/** + * A string {@link JsonNode}. + */ +@SdkInternalApi +public final class StringJsonNode implements JsonNode { + private final String value; + + public StringJsonNode(String value) { + Validate.paramNotNull(value, "value"); + this.value = value; + } + + @Override + public boolean isString() { + return true; + } + + @Override + public String asNumber() { + throw new UnsupportedOperationException("A JSON string cannot be converted to a number."); + } + + @Override + public String asString() { + return value; + } + + @Override + public boolean asBoolean() { + throw new UnsupportedOperationException("A JSON string cannot be converted to a boolean."); + } + + @Override + public List asArray() { + throw new UnsupportedOperationException("A JSON string cannot be converted to an array."); + } + + @Override + public Map asObject() { + throw new UnsupportedOperationException("A JSON string cannot be converted to an object."); + } + + @Override + public Object asEmbeddedObject() { + throw new UnsupportedOperationException("A JSON string cannot be converted to an embedded object."); + } + + @Override + public T visit(JsonNodeVisitor visitor) { + return visitor.visitString(asString()); + } + + @Override + public String text() { + return value; + } + + @Override + public String toString() { + // Does not handle unicode control characters + return "\"" + + value.replace("\\", "\\\\") + .replace("\"", "\\\"") + + "\""; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + StringJsonNode that = (StringJsonNode) o; + + return value.equals(that.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} \ No newline at end of file diff --git a/core/json-utils/src/test/java/software/amazon/awssdk/protocols/jsoncore/JsonNodeTest.java b/core/json-utils/src/test/java/software/amazon/awssdk/protocols/jsoncore/JsonNodeTest.java new file mode 100644 index 000000000000..756c891a9225 --- /dev/null +++ b/core/json-utils/src/test/java/software/amazon/awssdk/protocols/jsoncore/JsonNodeTest.java @@ -0,0 +1,269 @@ +package software.amazon.awssdk.protocols.jsoncore; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import org.junit.Test; +import software.amazon.awssdk.utils.StringInputStream; + +public class JsonNodeTest { + private static final JsonNodeParser PARSER = JsonNode.parser(); + + @Test + public void parseString_works() { + assertThat(PARSER.parse("{}").isObject()).isTrue(); + } + + @Test + public void parseInputStream_works() { + assertThat(PARSER.parse(new StringInputStream("{}")).isObject()).isTrue(); + } + + @Test + public void parseByteArray_works() { + assertThat(PARSER.parse("{}".getBytes(UTF_8)).isObject()).isTrue(); + } + + @Test + public void parseNull_givesCorrectType() { + JsonNode node = PARSER.parse("null"); + + assertThat(node.isNull()).isTrue(); + assertThat(node.isBoolean()).isFalse(); + assertThat(node.isNumber()).isFalse(); + assertThat(node.isString()).isFalse(); + assertThat(node.isArray()).isFalse(); + assertThat(node.isObject()).isFalse(); + assertThat(node.isEmbeddedObject()).isFalse(); + + assertThatThrownBy(node::asBoolean).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asNumber).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asString).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asArray).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asObject).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asEmbeddedObject).isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void parseBoolean_givesCorrectType() { + String[] options = { "true", "false" }; + for (String option : options) { + JsonNode node = PARSER.parse(option); + + + assertThat(node.isNull()).isFalse(); + assertThat(node.isBoolean()).isTrue(); + assertThat(node.isNumber()).isFalse(); + assertThat(node.isString()).isFalse(); + assertThat(node.isArray()).isFalse(); + assertThat(node.isObject()).isFalse(); + assertThat(node.isEmbeddedObject()).isFalse(); + + assertThatThrownBy(node::asNumber).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asString).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asArray).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asObject).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asEmbeddedObject).isInstanceOf(UnsupportedOperationException.class); + } + } + + @Test + public void parseNumber_givesCorrectType() { + String[] options = { "-1e100", "-1", "0", "1", "1e100" }; + for (String option : options) { + JsonNode node = PARSER.parse(option); + + assertThat(node.isNull()).isFalse(); + assertThat(node.isBoolean()).isFalse(); + assertThat(node.isNumber()).isTrue(); + assertThat(node.isString()).isFalse(); + assertThat(node.isArray()).isFalse(); + assertThat(node.isObject()).isFalse(); + assertThat(node.isEmbeddedObject()).isFalse(); + + assertThatThrownBy(node::asBoolean).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asString).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asArray).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asObject).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asEmbeddedObject).isInstanceOf(UnsupportedOperationException.class); + } + } + + @Test + public void parseString_givesCorrectType() { + String[] options = { "\"\"", "\"foo\"" }; + for (String option : options) { + JsonNode node = PARSER.parse(option); + + assertThat(node.isNull()).isFalse(); + assertThat(node.isBoolean()).isFalse(); + assertThat(node.isNumber()).isFalse(); + assertThat(node.isString()).isTrue(); + assertThat(node.isArray()).isFalse(); + assertThat(node.isObject()).isFalse(); + assertThat(node.isEmbeddedObject()).isFalse(); + + assertThatThrownBy(node::asBoolean).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asNumber).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asArray).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asObject).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asEmbeddedObject).isInstanceOf(UnsupportedOperationException.class); + } + } + + @Test + public void parseArray_givesCorrectType() { + String[] options = { "[]", "[null]" }; + for (String option : options) { + JsonNode node = PARSER.parse(option); + + assertThat(node.isNull()).isFalse(); + assertThat(node.isBoolean()).isFalse(); + assertThat(node.isNumber()).isFalse(); + assertThat(node.isString()).isFalse(); + assertThat(node.isArray()).isTrue(); + assertThat(node.isObject()).isFalse(); + assertThat(node.isEmbeddedObject()).isFalse(); + + assertThatThrownBy(node::asBoolean).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asNumber).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asString).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asObject).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asEmbeddedObject).isInstanceOf(UnsupportedOperationException.class); + } + } + + @Test + public void parseObject_givesCorrectType() { + String[] options = { "{}", "{ \"foo\": null }" }; + for (String option : options) { + JsonNode node = PARSER.parse(option); + + assertThat(node.isNull()).isFalse(); + assertThat(node.isBoolean()).isFalse(); + assertThat(node.isNumber()).isFalse(); + assertThat(node.isString()).isFalse(); + assertThat(node.isArray()).isFalse(); + assertThat(node.isObject()).isTrue(); + assertThat(node.isEmbeddedObject()).isFalse(); + + assertThatThrownBy(node::asBoolean).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asNumber).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asString).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asArray).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(node::asEmbeddedObject).isInstanceOf(UnsupportedOperationException.class); + } + } + + @Test + public void parseBoolean_givesCorrectValue() { + assertThat(PARSER.parse("true").asBoolean()).isTrue(); + assertThat(PARSER.parse("false").asBoolean()).isFalse(); + } + + @Test + public void parseNumber_givesCorrectValue() { + assertThat(PARSER.parse("0").asNumber()).isEqualTo("0"); + assertThat(PARSER.parse("-1").asNumber()).isEqualTo("-1"); + assertThat(PARSER.parse("1").asNumber()).isEqualTo("1"); + assertThat(PARSER.parse("1e10000").asNumber()).isEqualTo("1e10000"); + assertThat(PARSER.parse("-1e10000").asNumber()).isEqualTo("-1e10000"); + assertThat(PARSER.parse("1.23").asNumber()).isEqualTo("1.23"); + assertThat(PARSER.parse("-1.23").asNumber()).isEqualTo("-1.23"); + } + + @Test + public void parseString_givesCorrectValue() { + assertThat(PARSER.parse("\"foo\"").asString()).isEqualTo("foo"); + assertThat(PARSER.parse("\"\"").asString()).isEqualTo(""); + assertThat(PARSER.parse("\" \"").asString()).isEqualTo(" "); + assertThat(PARSER.parse("\"%20\"").asString()).isEqualTo("%20"); + assertThat(PARSER.parse("\"\\\"\"").asString()).isEqualTo("\""); + assertThat(PARSER.parse("\" \"").asString()).isEqualTo(" "); + } + + @Test + public void parseArray_givesCorrectValue() { + assertThat(PARSER.parse("[]").asArray()).isEmpty(); + assertThat(PARSER.parse("[null, 1]").asArray()).satisfies(list -> { + assertThat(list).hasSize(2); + assertThat(list.get(0).isNull()).isTrue(); + assertThat(list.get(1).asNumber()).isEqualTo("1"); + }); + } + + @Test + public void parseObject_givesCorrectValue() { + assertThat(PARSER.parse("{}").asObject()).isEmpty(); + assertThat(PARSER.parse("{\"foo\": \"bar\", \"baz\": 0}").asObject()).satisfies(map -> { + assertThat(map).hasSize(2); + assertThat(map.get("foo").asString()).isEqualTo("bar"); + assertThat(map.get("baz").asNumber()).isEqualTo("0"); + }); + } + + @Test + public void text_returnsContent() { + assertThat(PARSER.parse("null").text()).isEqualTo(null); + assertThat(PARSER.parse("0").text()).isEqualTo("0"); + assertThat(PARSER.parse("\"foo\"").text()).isEqualTo("foo"); + assertThat(PARSER.parse("true").text()).isEqualTo("true"); + assertThat(PARSER.parse("[]").text()).isEqualTo(null); + assertThat(PARSER.parse("{}").text()).isEqualTo(null); + } + + @Test + public void getString_returnsContent() { + assertThat(PARSER.parse("null").field("")).isEmpty(); + assertThat(PARSER.parse("0").field("")).isEmpty(); + assertThat(PARSER.parse("\"foo\"").field("")).isEmpty(); + assertThat(PARSER.parse("true").field("")).isEmpty(); + assertThat(PARSER.parse("[]").field("")).isEmpty(); + assertThat(PARSER.parse("{\"\":0}").field("")).map(JsonNode::asNumber).hasValue("0"); + } + + @Test + public void getArray_returnsContent() { + assertThat(PARSER.parse("null").index(0)).isEmpty(); + assertThat(PARSER.parse("0").index(0)).isEmpty(); + assertThat(PARSER.parse("\"foo\"").index(0)).isEmpty(); + assertThat(PARSER.parse("true").index(0)).isEmpty(); + assertThat(PARSER.parse("[]").index(0)).isEmpty(); + assertThat(PARSER.parse("[null]").index(0)).map(JsonNode::isNull).hasValue(true); + assertThat(PARSER.parse("{}").field("")).isEmpty(); + } + + @Test + public void toStringIsCorrect() { + String input = "{" + + "\"1\": \"2\"," + + "\"3\": 4," + + "\"5\": null," + + "\"6\": false," + + "\"7\": [[{}]]," + + "\"8\": \"\\\\n\\\"\"" + + "}"; + assertThat(PARSER.parse(input).toString()).isEqualTo(input); + } + + @Test + public void exceptionsIncludeErrorLocation() { + assertThatThrownBy(() -> PARSER.parse("{{foo}")).hasMessageContaining("foo"); + } + + @Test + public void removeErrorLocations_removesErrorLocations() { + assertThatThrownBy(() -> JsonNode.parserBuilder() + .removeErrorLocations(true) + .build() + .parse("{{foo}")) + .satisfies(exception -> { + Throwable cause = exception; + while (cause != null) { + assertThat(cause.getMessage()).doesNotContain("foo"); + cause = cause.getCause(); + } + }); + } +} \ No newline at end of file diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml index c7b23e5107ff..d3dfd2f88525 100644 --- a/core/metrics-spi/pom.xml +++ b/core/metrics-spi/pom.xml @@ -5,7 +5,7 @@ core software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 diff --git a/core/pom.xml b/core/pom.xml index ae4ab7f660b5..964a0853795f 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT core @@ -42,6 +42,7 @@ regions protocols metrics-spi + json-utils diff --git a/core/profiles/pom.xml b/core/profiles/pom.xml index c2175e6b86d5..8245da7616df 100644 --- a/core/profiles/pom.xml +++ b/core/profiles/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT profiles diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java index cc7b886495f8..88b0328871b9 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java @@ -121,6 +121,10 @@ public final class ProfileProperty { */ public static final String SSO_START_URL = "sso_start_url"; + public static final String EC2_METADATA_SERVICE_ENDPOINT_MODE = "ec2_metadata_service_endpoint_mode"; + + public static final String EC2_METADATA_SERVICE_ENDPOINT = "ec2_metadata_service_endpoint"; + private ProfileProperty() { } } diff --git a/core/protocols/aws-cbor-protocol/pom.xml b/core/protocols/aws-cbor-protocol/pom.xml index 09fd84f3c26c..d6244012655a 100644 --- a/core/protocols/aws-cbor-protocol/pom.xml +++ b/core/protocols/aws-cbor-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 @@ -47,12 +47,14 @@ ${awsjavasdk.version} - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor + software.amazon.awssdk + third-party-jackson-dataformat-cbor + ${awsjavasdk.version} - com.fasterxml.jackson.core - jackson-core + software.amazon.awssdk + third-party-jackson-core + ${awsjavasdk.version} junit diff --git a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/AwsStructuredCborFactory.java b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/AwsStructuredCborFactory.java index d9f9144d6bf2..64092399fef4 100644 --- a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/AwsStructuredCborFactory.java +++ b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/AwsStructuredCborFactory.java @@ -15,10 +15,10 @@ package software.amazon.awssdk.protocols.cbor.internal; -import com.fasterxml.jackson.core.JsonFactory; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.protocols.json.BaseAwsStructuredJsonFactory; import software.amazon.awssdk.protocols.json.StructuredJsonGenerator; +import software.amazon.awssdk.thirdparty.jackson.core.JsonFactory; /** * Creates generators and protocol handlers for CBOR wire format. diff --git a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkCborGenerator.java b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkCborGenerator.java index 0520530a5103..67f2bd7b21b0 100644 --- a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkCborGenerator.java +++ b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkCborGenerator.java @@ -15,13 +15,13 @@ package software.amazon.awssdk.protocols.cbor.internal; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.dataformat.cbor.CBORGenerator; import java.io.IOException; import java.time.Instant; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.protocols.json.SdkJsonGenerator; import software.amazon.awssdk.protocols.json.StructuredJsonGenerator; +import software.amazon.awssdk.thirdparty.jackson.core.JsonFactory; +import software.amazon.awssdk.thirdparty.jackson.dataformat.cbor.CBORGenerator; /** * Thin wrapper around Jackson's JSON generator for CBOR. diff --git a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkStructuredCborFactory.java b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkStructuredCborFactory.java index 3e014774db63..62d4d93ab8ce 100644 --- a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkStructuredCborFactory.java +++ b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkStructuredCborFactory.java @@ -15,11 +15,11 @@ package software.amazon.awssdk.protocols.cbor.internal; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.dataformat.cbor.CBORFactory; import java.util.function.BiFunction; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.protocols.json.StructuredJsonGenerator; +import software.amazon.awssdk.thirdparty.jackson.core.JsonFactory; +import software.amazon.awssdk.thirdparty.jackson.dataformat.cbor.CBORFactory; /** * Creates generators and protocol handlers for CBOR wire format. diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/AwsIonProtocolFactory.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/AwsIonProtocolFactory.java deleted file mode 100644 index 5e976913b51d..000000000000 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/AwsIonProtocolFactory.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion; - -import software.amazon.awssdk.annotations.SdkProtectedApi; -import software.amazon.awssdk.core.SdkSystemSetting; -import software.amazon.awssdk.protocols.ion.internal.AwsStructuredIonFactory; -import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory; -import software.amazon.awssdk.protocols.json.BaseAwsJsonProtocolFactory; -import software.amazon.awssdk.protocols.json.DefaultJsonContentTypeResolver; -import software.amazon.awssdk.protocols.json.JsonContentTypeResolver; -import software.amazon.awssdk.protocols.json.StructuredJsonFactory; - -/** - * Protocol factory for AWS/Ion protocols. Supports both JSON RPC and REST JSON versions of Ion. Defaults - * to Ion Binary but will use Ion Text if the system setting {@link SdkSystemSetting#BINARY_ION_ENABLED} is - * set to false. - */ -@SdkProtectedApi -public final class AwsIonProtocolFactory extends BaseAwsJsonProtocolFactory { - - /** - * Content type resolver implementation for Ion-enabled services. - */ - private static final JsonContentTypeResolver ION_BINARY = new DefaultJsonContentTypeResolver("application/x-amz-ion-"); - - /** - * Content type resolver implementation for debugging Ion-enabled services. - */ - private static final JsonContentTypeResolver ION_TEXT = new DefaultJsonContentTypeResolver("text/x-amz-ion-"); - - private AwsIonProtocolFactory(Builder builder) { - super(builder); - } - - public static Builder builder() { - return new Builder(); - } - - /** - * @return Content type resolver implementation to use. - */ - @Override - protected JsonContentTypeResolver getContentTypeResolver() { - return isIonBinaryEnabled() ? ION_BINARY : ION_TEXT; - } - - /** - * @return Instance of {@link StructuredJsonFactory} to use in creating handlers. - */ - @Override - protected StructuredJsonFactory getSdkFactory() { - return isIonBinaryEnabled() - ? AwsStructuredIonFactory.SDK_ION_BINARY_FACTORY - : AwsStructuredIonFactory.SDK_ION_TEXT_FACTORY; - } - - private boolean isIonBinaryEnabled() { - return SdkSystemSetting.BINARY_ION_ENABLED.getBooleanValueOrThrow(); - } - - /** - * Builder for {@link AwsJsonProtocolFactory}. - */ - public static final class Builder extends BaseAwsJsonProtocolFactory.Builder { - - private Builder() { - } - - public AwsIonProtocolFactory build() { - return new AwsIonProtocolFactory(this); - } - - } -} diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/AwsStructuredIonFactory.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/AwsStructuredIonFactory.java deleted file mode 100644 index 85345e33245e..000000000000 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/AwsStructuredIonFactory.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion.internal; - -import com.fasterxml.jackson.core.JsonFactory; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.protocols.json.BaseAwsStructuredJsonFactory; -import software.amazon.awssdk.protocols.json.ErrorCodeParser; -import software.amazon.awssdk.protocols.json.StructuredJsonGenerator; -import software.amazon.ion.system.IonBinaryWriterBuilder; -import software.amazon.ion.system.IonTextWriterBuilder; -import software.amazon.ion.system.IonWriterBuilder; - -@SdkInternalApi -public final class AwsStructuredIonFactory extends SdkStructuredIonFactory { - private static final IonWriterBuilder BINARY_WRITER_BUILDER = IonBinaryWriterBuilder.standard().immutable(); - private static final IonWriterBuilder TEXT_WRITER_BUILDER = IonTextWriterBuilder.standard().immutable(); - - - public static final BaseAwsStructuredJsonFactory SDK_ION_BINARY_FACTORY = - new AwsIonFactory(JSON_FACTORY, BINARY_WRITER_BUILDER); - - public static final BaseAwsStructuredJsonFactory SDK_ION_TEXT_FACTORY = new AwsIonFactory(JSON_FACTORY, TEXT_WRITER_BUILDER); - - static class AwsIonFactory extends BaseAwsStructuredJsonFactory { - private final JsonFactory jsonFactory; - - private final IonWriterBuilder builder; - - - AwsIonFactory(JsonFactory jsonFactory, IonWriterBuilder builder) { - super(jsonFactory); - this.jsonFactory = jsonFactory; - this.builder = builder; - } - - @Override - protected StructuredJsonGenerator createWriter(JsonFactory jsonFactory, String contentType) { - return ION_GENERATOR_SUPPLIER.apply(builder, contentType); - } - - @Override - public JsonFactory getJsonFactory() { - return jsonFactory; - } - - @Override - public ErrorCodeParser getErrorCodeParser(String customErrorCodeFieldName) { - return new CompositeErrorCodeParser( - new IonErrorCodeParser(ION_SYSTEM), - super.getErrorCodeParser(customErrorCodeFieldName)); - } - } -} diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/CompositeErrorCodeParser.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/CompositeErrorCodeParser.java deleted file mode 100644 index 0d3bdb20aeb0..000000000000 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/CompositeErrorCodeParser.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion.internal; - -import java.util.Arrays; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.http.SdkHttpFullResponse; -import software.amazon.awssdk.protocols.json.ErrorCodeParser; -import software.amazon.awssdk.protocols.json.JsonContent; - -@SdkInternalApi -class CompositeErrorCodeParser implements ErrorCodeParser { - private final Iterable parsers; - - CompositeErrorCodeParser(ErrorCodeParser... parsers) { - this.parsers = Arrays.asList(parsers); - } - - @Override - public String parseErrorCode(SdkHttpFullResponse response, JsonContent jsonContent) { - for (ErrorCodeParser parser : parsers) { - String errorCode = parser.parseErrorCode(response, jsonContent); - if (errorCode != null) { - return errorCode; - } - } - - return null; - } -} diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonErrorCodeParser.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonErrorCodeParser.java deleted file mode 100644 index 64d91e3b080b..000000000000 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonErrorCodeParser.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion.internal; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.http.SdkHttpFullResponse; -import software.amazon.awssdk.protocols.json.ErrorCodeParser; -import software.amazon.awssdk.protocols.json.JsonContent; -import software.amazon.awssdk.utils.IoUtils; -import software.amazon.ion.IonReader; -import software.amazon.ion.IonSystem; -import software.amazon.ion.IonType; - -//TODO Clean up ION parser -@SdkInternalApi -class IonErrorCodeParser implements ErrorCodeParser { - private static final Logger log = LoggerFactory.getLogger(IonErrorCodeParser.class); - - private static final String TYPE_PREFIX = "aws-type:"; - private static final String X_AMZN_REQUEST_ID_HEADER = "x-amzn-RequestId"; - - private final IonSystem ionSystem; - - IonErrorCodeParser(IonSystem ionSystem) { - this.ionSystem = ionSystem; - } - - private static String getRequestId(SdkHttpFullResponse response) { - return response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER).orElse(null); - } - - @Override - public String parseErrorCode(SdkHttpFullResponse response, JsonContent jsonContents) { - IonReader reader = ionSystem.newReader(jsonContents.getRawContent()); - try { - IonType type = reader.next(); - if (type != IonType.STRUCT) { - throw SdkClientException.builder() - .message(String.format("Can only get error codes from structs (saw %s), request id %s", - type, getRequestId(response))) - .build(); - } - - boolean errorCodeSeen = false; - String errorCode = null; - String[] annotations = reader.getTypeAnnotations(); - for (String annotation : annotations) { - if (annotation.startsWith(TYPE_PREFIX)) { - if (errorCodeSeen) { - throw SdkClientException.builder() - .message(String.format("Multiple error code annotations found for request id %s", - getRequestId(response))) - .build(); - } else { - errorCodeSeen = true; - errorCode = annotation.substring(TYPE_PREFIX.length()); - } - } - } - - return errorCode; - } finally { - IoUtils.closeQuietly(reader, log); - } - } -} diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonFactory.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonFactory.java deleted file mode 100644 index 7425516f5ee6..000000000000 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonFactory.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion.internal; - -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonParser; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.Reader; -import java.net.URL; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.ion.IonSystem; - -@SdkInternalApi -public final class IonFactory extends JsonFactory { - private static final long serialVersionUID = 1; - - private static final boolean SHOULD_CLOSE_READER_YES = true; - private static final boolean SHOULD_CLOSE_READER_NO = false; - - // TODO IonReaderBuilder will soon be available for constructing IonReaders - // without an IonSystem. This should use an IonReaderBuilder. - private final transient IonSystem ionSystem; - - public IonFactory(IonSystem ionSystem) { - this.ionSystem = ionSystem; - } - - @Override - public JsonParser createParser(InputStream in) { - return new IonParser(ionSystem.newReader(in), SHOULD_CLOSE_READER_NO); - } - - @Override - public JsonParser createParser(byte[] data) { - return new IonParser(ionSystem.newReader(data), SHOULD_CLOSE_READER_NO); - } - - @Override - public JsonParser createParser(byte[] data, int offset, int length) { - return new IonParser(ionSystem.newReader(data, offset, length), SHOULD_CLOSE_READER_NO); - } - - @Override - public JsonParser createParser(char[] data) { - throw new UnsupportedOperationException(); - } - - @Override - public JsonParser createParser(char[] data, int offset, int length) { - throw new UnsupportedOperationException(); - } - - @Override - public JsonParser createParser(String data) { - return new IonParser(ionSystem.newReader(data), SHOULD_CLOSE_READER_NO); - } - - @Override - public JsonParser createParser(Reader data) { - return new IonParser(ionSystem.newReader(data), SHOULD_CLOSE_READER_NO); - } - - @Override - public JsonParser createParser(File data) throws IOException { - return new IonParser(ionSystem.newReader(new FileInputStream(data)), SHOULD_CLOSE_READER_YES); - } - - @Override - public JsonParser createParser(URL data) throws IOException { - return new IonParser(ionSystem.newReader(data.openStream()), SHOULD_CLOSE_READER_YES); - } -} diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonParser.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonParser.java deleted file mode 100644 index f93568dd43af..000000000000 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonParser.java +++ /dev/null @@ -1,415 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion.internal; - -import static com.fasterxml.jackson.core.JsonParser.NumberType.BIG_DECIMAL; -import static com.fasterxml.jackson.core.JsonParser.NumberType.BIG_INTEGER; -import static com.fasterxml.jackson.core.JsonParser.NumberType.DOUBLE; -import static software.amazon.ion.IonType.STRUCT; - -import com.fasterxml.jackson.core.Base64Variant; -import com.fasterxml.jackson.core.JsonLocation; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonStreamContext; -import com.fasterxml.jackson.core.JsonToken; -import com.fasterxml.jackson.core.JsonTokenId; -import com.fasterxml.jackson.core.ObjectCodec; -import com.fasterxml.jackson.core.Version; -import java.io.IOException; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.ion.IonReader; -import software.amazon.ion.IonType; - -@SdkInternalApi -public final class IonParser extends JsonParser { - private final IonReader reader; - private final boolean shouldCloseReader; - private State state = State.BEFORE_VALUE; - private JsonToken currentToken; - private JsonToken lastClearedToken; - private boolean shouldSkipContainer; - private boolean closed; - - public IonParser(IonReader reader, boolean shouldCloseReader) { - super(Feature.collectDefaults()); - this.reader = reader; - this.shouldCloseReader = shouldCloseReader; - } - - @Override - public ObjectCodec getCodec() { - throw new UnsupportedOperationException(); - } - - @Override - public void setCodec(ObjectCodec c) { - throw new UnsupportedOperationException(); - } - - @Override - public Version version() { - throw new UnsupportedOperationException(); - } - - @Override - public void close() throws IOException { - if (shouldCloseReader) { - reader.close(); - } else if (Feature.AUTO_CLOSE_SOURCE.enabledIn(_features)) { - reader.close(); - } - closed = true; - } - - @Override - public JsonToken nextToken() { - currentToken = doNextToken(); - return currentToken; - } - - private JsonToken doNextToken() { - for (; ; ) { - switch (state) { - case BEFORE_VALUE: - IonType currentType = reader.next(); - - if (currentType == null) { - boolean topLevel = reader.getDepth() == 0; - if (topLevel) { - state = State.EOF; - continue; - } else { - state = State.END_OF_CONTAINER; - return reader.isInStruct() - ? JsonToken.END_OBJECT - : JsonToken.END_ARRAY; - } - } - - if (reader.isInStruct()) { - state = State.FIELD_NAME; - return JsonToken.FIELD_NAME; - } else { - state = State.VALUE; - return getJsonToken(); - } - - case END_OF_CONTAINER: - reader.stepOut(); - state = State.BEFORE_VALUE; - continue; - - case EOF: - return null; - - case FIELD_NAME: - state = State.VALUE; - return getJsonToken(); - - case VALUE: - state = State.BEFORE_VALUE; - if (IonType.isContainer(reader.getType()) && !reader.isNullValue() && !shouldSkipContainer) { - reader.stepIn(); - } - shouldSkipContainer = false; - continue; - default: - // Ignore. - } - } - } - - @Override - public JsonToken nextValue() { - JsonToken token = nextToken(); - return (token == JsonToken.FIELD_NAME) - ? nextToken() - : token; - } - - @Override - public JsonParser skipChildren() { - IonType currentType = reader.getType(); - if (IonType.isContainer(currentType)) { - shouldSkipContainer = true; - currentToken = currentType == STRUCT - ? JsonToken.END_OBJECT - : JsonToken.END_ARRAY; - } - return this; - } - - @Override - public boolean isClosed() { - return closed; - } - - @Override - public JsonToken getCurrentToken() { - return currentToken; - } - - @Override - public int getCurrentTokenId() { - return currentToken == null - ? JsonTokenId.ID_NO_TOKEN - : currentToken.id(); - } - - @Override - public boolean hasCurrentToken() { - return currentToken != null; - } - - @Override - public boolean hasTokenId(int id) { - return getCurrentTokenId() == id; - } - - @Override - public boolean hasToken(JsonToken t) { - return currentToken == t; - } - - @Override - public String getCurrentName() { - return reader.getFieldName(); - } - - @Override - public JsonStreamContext getParsingContext() { - throw new UnsupportedOperationException(); - } - - @Override - public JsonLocation getTokenLocation() { - throw new UnsupportedOperationException(); - } - - @Override - public JsonLocation getCurrentLocation() { - throw new UnsupportedOperationException(); - } - - @Override - public void clearCurrentToken() { - lastClearedToken = currentToken; - currentToken = null; - } - - @Override - public JsonToken getLastClearedToken() { - return lastClearedToken; - } - - @Override - public void overrideCurrentName(String name) { - throw new UnsupportedOperationException(); - } - - @Override - public String getText() { - if (state == State.FIELD_NAME) { - return reader.getFieldName(); - } - if (IonType.isText(reader.getType())) { - return reader.stringValue(); - } - if (currentToken == null) { - // start or end of stream - return null; - } - if (currentToken.isNumeric()) { - return getNumberValue().toString(); - } - return currentToken.asString(); - } - - @Override - public char[] getTextCharacters() { - throw new UnsupportedOperationException(); - } - - @Override - public int getTextLength() { - throw new UnsupportedOperationException(); - } - - @Override - public int getTextOffset() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean hasTextCharacters() { - return false; - } - - @Override - public Number getNumberValue() { - NumberType numberType = getNumberType(); - if (numberType == null) { - throw SdkClientException.builder() - .message(String.format("Unable to get number value for non-numeric token %s", - reader.getType())) - .build(); - } - switch (numberType) { - case BIG_DECIMAL: - return reader.bigDecimalValue(); - case BIG_INTEGER: - return reader.bigIntegerValue(); - case DOUBLE: - return reader.doubleValue(); - default: - throw SdkClientException.builder() - .message(String.format("Unable to get number value for number type %s", - numberType)) - .build(); - } - } - - @Override - public NumberType getNumberType() { - switch (reader.getType()) { - case DECIMAL: - return BIG_DECIMAL; - case FLOAT: - return DOUBLE; - case INT: - return BIG_INTEGER; - default: - return null; - } - } - - @Override - public int getIntValue() { - return reader.intValue(); - } - - @Override - public long getLongValue() { - return reader.longValue(); - } - - @Override - public BigInteger getBigIntegerValue() { - return reader.bigIntegerValue(); - } - - @Override - public float getFloatValue() { - return (float) reader.doubleValue(); - } - - @Override - public double getDoubleValue() { - return reader.doubleValue(); - } - - @Override - public BigDecimal getDecimalValue() { - return reader.decimalValue(); - } - - @Override - public Object getEmbeddedObject() { - if (currentToken != JsonToken.VALUE_EMBEDDED_OBJECT) { - return null; - } - IonType currentType = reader.getType(); - switch (currentType) { - case BLOB: - case CLOB: - return ByteBuffer.wrap(reader.newBytes()); - case TIMESTAMP: - return reader.timestampValue().dateValue(); - default: - throw SdkClientException.builder() - .message(String.format("Cannot return embedded object for Ion type %s", - currentType)) - .build(); - } - } - - @Override - public byte[] getBinaryValue(Base64Variant bv) { - throw new UnsupportedOperationException(); - } - - @Override - public String getValueAsString(String defaultValue) { - // The documentation is ambiguous about whether field names should - // return their text or the default value. To conform with the - // CBORParser, they will get the default value here. - if (currentToken != JsonToken.VALUE_STRING) { - if (currentToken == null || currentToken == JsonToken.VALUE_NULL || !currentToken.isScalarValue()) { - return defaultValue; - } - } - return getText(); - } - - private JsonToken getJsonToken() { - if (reader.isNullValue()) { - return JsonToken.VALUE_NULL; - } - - IonType currentType = reader.getType(); - switch (currentType) { - case BLOB: - case CLOB: - return JsonToken.VALUE_EMBEDDED_OBJECT; - case BOOL: - return reader.booleanValue() ? JsonToken.VALUE_TRUE : JsonToken.VALUE_FALSE; - case DECIMAL: - return JsonToken.VALUE_NUMBER_FLOAT; - case FLOAT: - return JsonToken.VALUE_NUMBER_FLOAT; - case INT: - return JsonToken.VALUE_NUMBER_INT; - case LIST: - return JsonToken.START_ARRAY; - case SEXP: - return JsonToken.START_ARRAY; - case STRING: - return JsonToken.VALUE_STRING; - case STRUCT: - return JsonToken.START_OBJECT; - case SYMBOL: - return JsonToken.VALUE_STRING; - case TIMESTAMP: - return JsonToken.VALUE_EMBEDDED_OBJECT; - default: - throw SdkClientException.builder() - .message(String.format("Unhandled Ion type %s", currentType)) - .build(); - } - } - - private enum State { - BEFORE_VALUE, - END_OF_CONTAINER, - EOF, - FIELD_NAME, - VALUE - } -} diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkIonGenerator.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkIonGenerator.java deleted file mode 100644 index 4d758970d2d2..000000000000 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkIonGenerator.java +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion.internal; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.util.Date; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.protocols.json.StructuredJsonGenerator; -import software.amazon.awssdk.utils.BinaryUtils; -import software.amazon.ion.IonType; -import software.amazon.ion.IonWriter; -import software.amazon.ion.Timestamp; -import software.amazon.ion.system.IonWriterBuilder; - -@SdkInternalApi -public abstract class SdkIonGenerator implements StructuredJsonGenerator { - protected final IonWriter writer; - private final String contentType; - - private SdkIonGenerator(IonWriter writer, String contentType) { - this.writer = writer; - this.contentType = contentType; - } - - public static SdkIonGenerator create(IonWriterBuilder builder, String contentType) { - ByteArrayOutputStream bytes = new ByteArrayOutputStream(); - IonWriter writer = builder.build(bytes); - return new ByteArraySdkIonGenerator(bytes, writer, contentType); - } - - @Override - public StructuredJsonGenerator writeStartArray() { - try { - writer.stepIn(IonType.LIST); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeNull() { - try { - writer.writeNull(); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeEndArray() { - try { - writer.stepOut(); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeStartObject() { - try { - writer.stepIn(IonType.STRUCT); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeEndObject() { - try { - writer.stepOut(); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeFieldName(String fieldName) { - writer.setFieldName(fieldName); - return this; - } - - @Override - public StructuredJsonGenerator writeValue(String val) { - try { - writer.writeString(val); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeValue(boolean bool) { - try { - writer.writeBool(bool); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeValue(long val) { - try { - writer.writeInt(val); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeValue(double val) { - try { - writer.writeFloat(val); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeValue(float val) { - try { - writer.writeFloat(val); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeValue(short val) { - try { - writer.writeInt(val); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeValue(int val) { - try { - writer.writeInt(val); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeValue(ByteBuffer bytes) { - try { - writer.writeBlob(BinaryUtils.copyAllBytesFrom(bytes)); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeValue(Instant instant) { - try { - Date d = instant != null ? Date.from(instant) : null; - writer.writeTimestamp(Timestamp.forDateZ(d)); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeValue(BigDecimal value) { - try { - writer.writeDecimal(value); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeValue(BigInteger value) { - try { - writer.writeInt(value); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return this; - } - - @Override - public StructuredJsonGenerator writeNumber(String number) { - return writeValue(new BigDecimal(number)); - } - - @Override - public abstract byte[] getBytes(); - - @Override - public String getContentType() { - return contentType; - } - - private static class ByteArraySdkIonGenerator extends SdkIonGenerator { - private final ByteArrayOutputStream bytes; - - ByteArraySdkIonGenerator(ByteArrayOutputStream bytes, IonWriter writer, String contentType) { - super(writer, contentType); - this.bytes = bytes; - } - - @Override - public byte[] getBytes() { - try { - writer.finish(); - } catch (IOException e) { - throw SdkClientException.builder().cause(e).build(); - } - return bytes.toByteArray(); - } - } -} diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkStructuredIonFactory.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkStructuredIonFactory.java deleted file mode 100644 index 639ea5e60bbe..000000000000 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkStructuredIonFactory.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion.internal; - -import com.fasterxml.jackson.core.JsonFactory; -import java.util.function.BiFunction; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.protocols.json.StructuredJsonGenerator; -import software.amazon.ion.IonSystem; -import software.amazon.ion.system.IonSystemBuilder; -import software.amazon.ion.system.IonWriterBuilder; - -@SdkInternalApi -abstract class SdkStructuredIonFactory { - - protected static final IonSystem ION_SYSTEM = IonSystemBuilder.standard().build(); - - protected static final JsonFactory JSON_FACTORY = new IonFactory(ION_SYSTEM); - - protected static final IonGeneratorSupplier ION_GENERATOR_SUPPLIER = SdkIonGenerator::create; - - SdkStructuredIonFactory() { - } - - @FunctionalInterface - protected interface IonGeneratorSupplier extends BiFunction { - StructuredJsonGenerator apply(IonWriterBuilder writerBuilder, String contentType); - } -} diff --git a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/AwsStructuredIonFactoryTest.java b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/AwsStructuredIonFactoryTest.java deleted file mode 100644 index 2b2fab841717..000000000000 --- a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/AwsStructuredIonFactoryTest.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.Assert.assertEquals; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.awscore.exception.AwsServiceException; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.http.AbortableInputStream; -import software.amazon.awssdk.http.SdkHttpFullResponse; -import software.amazon.awssdk.protocols.core.ExceptionMetadata; -import software.amazon.awssdk.protocols.json.JsonOperationMetadata; -import software.amazon.ion.IonStruct; -import software.amazon.ion.IonSystem; -import software.amazon.ion.IonWriter; -import software.amazon.ion.Timestamp; -import software.amazon.ion.system.IonSystemBuilder; - -public class AwsStructuredIonFactoryTest { - private static final String ERROR_PREFIX = "aws-type:"; - private static final String ERROR_TYPE = "InvalidParameterException"; - private static final String ERROR_MESSAGE = "foo"; - - private static final String NO_CUSTOM_ERROR_CODE_FIELD_NAME = null; - - private static IonSystem system; - - @BeforeClass - public static void beforeClass() { - system = IonSystemBuilder.standard().build(); - } - - private static IonStruct createPayload() { - IonStruct payload = system.newEmptyStruct(); - payload.add("NotValidJson", system.newTimestamp(Timestamp.nowZ())); - payload.add("errorMessage", system.newString(ERROR_MESSAGE)); - return payload; - } - - private static SdkHttpFullResponse createResponse(IonStruct payload) throws Exception { - ByteArrayOutputStream bytes = new ByteArrayOutputStream(); - IonWriter writer = system.newBinaryWriter(bytes); - payload.writeTo(writer); - writer.close(); - - return ValidSdkObjects.sdkHttpFullResponse() - .content(AbortableInputStream.create(new ByteArrayInputStream(bytes.toByteArray()))) - .build(); - } - - @Test - public void handlesErrorsUsingHttpHeader() throws Exception { - IonStruct payload = createPayload(); - - SdkHttpFullResponse error = - createResponse(payload).toBuilder().putHeader("x-amzn-ErrorType", ERROR_TYPE).build(); - - AwsServiceException exception = handleError(error); - assertThat(exception).isInstanceOf(InvalidParameterException.class); - assertEquals(ERROR_MESSAGE, exception.awsErrorDetails().errorMessage()); - } - - @Test - public void handlesErrorsUsingMagicField() throws Exception { - IonStruct payload = createPayload(); - payload.add("__type", system.newString(ERROR_TYPE)); - - SdkHttpFullResponse error = createResponse(payload); - - AwsServiceException exception = handleError(error); - assertThat(exception).isInstanceOf(InvalidParameterException.class); - assertEquals(ERROR_MESSAGE, exception.awsErrorDetails().errorMessage()); - } - - @Test - public void handlesErrorsUsingAnnotation() throws Exception { - IonStruct payload = createPayload(); - payload.addTypeAnnotation(ERROR_PREFIX + ERROR_TYPE); - - SdkHttpFullResponse error = createResponse(payload); - - AwsServiceException exception = handleError(error); - assertThat(exception).isInstanceOf(InvalidParameterException.class); - assertEquals(ERROR_MESSAGE, exception.awsErrorDetails().errorMessage()); - } - - @Test(expected = SdkClientException.class) - public void rejectPayloadsWithMultipleErrorAnnotations() throws Exception { - IonStruct payload = createPayload(); - payload.addTypeAnnotation(ERROR_PREFIX + ERROR_TYPE); - payload.addTypeAnnotation(ERROR_PREFIX + "foo"); - - SdkHttpFullResponse error = createResponse(payload); - - handleError(error); - } - - @Test - public void handlesErrorsWithMutipleAnnotations() throws Exception { - IonStruct payload = createPayload(); - payload.addTypeAnnotation("foo"); - payload.addTypeAnnotation(ERROR_PREFIX + ERROR_TYPE); - payload.addTypeAnnotation("bar"); - - SdkHttpFullResponse error = createResponse(payload); - - AwsServiceException exception = handleError(error); - assertThat(exception).isInstanceOf(InvalidParameterException.class); - assertEquals(ERROR_MESSAGE, exception.awsErrorDetails().errorMessage()); - } - - private AwsServiceException handleError(SdkHttpFullResponse error) throws Exception { - return AwsIonProtocolFactory.builder() - .registerModeledException( - ExceptionMetadata.builder() - .exceptionBuilderSupplier(InvalidParameterException::builder) - .errorCode(ERROR_TYPE) - .build()) - .customErrorCodeFieldName(NO_CUSTOM_ERROR_CODE_FIELD_NAME) - .build() - .createErrorResponseHandler(JsonOperationMetadata.builder() - .hasStreamingSuccessResponse(false) - .isPayloadJson(true) - .build()) - .handle(error, new ExecutionAttributes()); - } - - private static class InvalidParameterException extends AwsServiceException { - private static final long serialVersionUID = 0; - - public InvalidParameterException(BeanStyleBuilder builder) { - super(builder); - } - - public static Class serializableBuilderClass() { - return BeanStyleBuilder.class; - } - - @Override - public Builder toBuilder() { - return new BeanStyleBuilder(this); - } - - public static Builder builder() { - return new BeanStyleBuilder(); - } - - public interface Builder extends AwsServiceException.Builder { - @Override - Builder message(String message); - - @Override - InvalidParameterException build(); - } - - private static class BeanStyleBuilder extends BuilderImpl implements Builder { - private String message; - - private BeanStyleBuilder() {} - - private BeanStyleBuilder(InvalidParameterException ex) { - this.message = ex.getMessage(); - } - - @Override - public Builder message(String message) { - this.message = message; - return this; - } - - public void setMessage(String message) { - this.message = message; - } - - @Override - public InvalidParameterException build() { - return new InvalidParameterException(this); - } - } - } -} diff --git a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonFactoryTest.java b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonFactoryTest.java deleted file mode 100644 index 31c9b82298a3..000000000000 --- a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonFactoryTest.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion; - -import static org.hamcrest.Matchers.instanceOf; -import static org.junit.Assert.assertThat; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.net.URL; -import java.net.URLConnection; -import java.net.URLStreamHandler; -import org.junit.Before; -import org.junit.Test; -import software.amazon.awssdk.protocols.ion.internal.IonFactory; -import software.amazon.awssdk.protocols.ion.internal.IonParser; -import software.amazon.ion.system.IonSystemBuilder; - -public class IonFactoryTest { - private IonFactory factory; - - @Before - public void before() { - factory = new IonFactory(IonSystemBuilder.standard().build()); - } - - @Test - public void createParserFromByteArray() throws Exception { - assertThat(factory.createParser(new byte[0]), instanceOf(IonParser.class)); - } - - @Test(expected = UnsupportedOperationException.class) - public void createParserFromCharArray() throws Exception { - assertThat(factory.createParser(new char[0]), instanceOf(IonParser.class)); - } - - @Test - public void createParserFromFile() throws Exception { - File file = File.createTempFile("IonFactoryTest", null); - file.deleteOnExit(); - assertThat(factory.createParser(file), instanceOf(IonParser.class)); - } - - @Test - public void createParserFromInputStream() throws Exception { - assertThat(factory.createParser(new ByteArrayInputStream(new byte[0])), instanceOf(IonParser.class)); - } - - @Test - public void createParserFromReader() throws Exception { - assertThat(factory.createParser(new InputStreamReader(new ByteArrayInputStream(new byte[0]))), instanceOf(IonParser.class)); - } - - @Test - public void createParserFromString() throws Exception { - assertThat(factory.createParser(""), instanceOf(IonParser.class)); - } - - @Test - public void createParserFromUrl() throws Exception { - class NullUrlConnection extends URLConnection { - protected NullUrlConnection(URL url) { - super(url); - } - - @Override - public void connect() throws IOException { - } - - @Override - public InputStream getInputStream() { - return new ByteArrayInputStream(new byte[0]); - } - } - ; - - class NullUrlStreamHandler extends URLStreamHandler { - @Override - protected URLConnection openConnection(URL u) throws IOException { - return new NullUrlConnection(u); - } - } - ; - - assertThat(factory.createParser(new URL("foo", "bar", 99, "baz", new NullUrlStreamHandler())), instanceOf(IonParser.class)); - } - - @Test - public void createParserFromByteArrayWithOffsetAndLength() throws Exception { - assertThat(factory.createParser(new byte[0], 0, 0), instanceOf(IonParser.class)); - } - - @Test(expected = UnsupportedOperationException.class) - public void createParserFromCharArrayWithOffsetAndLength() throws Exception { - factory.createParser(new char[0], 0, 0); - } -} diff --git a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonParserTest.java b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonParserTest.java deleted file mode 100644 index ca829c900c83..000000000000 --- a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonParserTest.java +++ /dev/null @@ -1,424 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonToken; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.protocols.ion.internal.IonParser; -import software.amazon.awssdk.protocols.ion.internal.SdkIonGenerator; -import software.amazon.ion.IonReader; -import software.amazon.ion.IonSystem; -import software.amazon.ion.IonWriter; -import software.amazon.ion.system.IonBinaryWriterBuilder; -import software.amazon.ion.system.IonSystemBuilder; -import software.amazon.ion.system.IonTextWriterBuilder; - -/** - * Tests the {@link IonParser} for conformity with the {@link JsonParser} API. - * Also tests that the IonParser correctly converts Ion-only value types to - * the correct {@link JsonToken}s. For testing of additional value types and - * roundtrip testing with the {@link SdkIonGenerator}, see {@link IonRoundtripTest}. - */ -@RunWith(Parameterized.class) -public class IonParserTest { - - private static IonSystem SYSTEM = IonSystemBuilder.standard().build(); - @Rule - public ExpectedException thrown = ExpectedException.none(); - private WriteFormat format; - - public IonParserTest(WriteFormat format) { - this.format = format; - } - - @Parameters - public static Collection data() { - List parameters = new ArrayList(); - for (WriteFormat format : WriteFormat.values()) { - parameters.add(new Object[] {format}); - } - return parameters; - } - - private IonParser parse(String data) throws IOException { - byte[] ion = format.write(data); - return new IonParser(SYSTEM.newReader(ion), false); - } - - @Test - public void testEmptySexp() throws IOException { - IonParser parser = parse("()"); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - assertEquals(JsonToken.END_ARRAY, parser.nextToken()); - assertNull(parser.nextToken()); - } - - @Test - public void testSexp() throws IOException { - IonParser parser = parse("(a+b)"); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("a", parser.getText()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("+", parser.getText()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("b", parser.getText()); - assertEquals(JsonToken.END_ARRAY, parser.nextToken()); - assertNull(parser.nextToken()); - } - - @Test - public void testNestedSexp() throws IOException { - IonParser parser = parse("((a)+(b))"); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("a", parser.getText()); - assertEquals(JsonToken.END_ARRAY, parser.nextToken()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("+", parser.getText()); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("b", parser.getText()); - assertEquals(JsonToken.END_ARRAY, parser.nextToken()); - assertEquals(JsonToken.END_ARRAY, parser.nextToken()); - assertNull(parser.nextToken()); - } - - @Test - public void testSexpSkip() throws IOException { - IonParser parser = parse("(a+b)"); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - parser.skipChildren(); - assertEquals(JsonToken.END_ARRAY, parser.getCurrentToken()); - assertNull(parser.nextToken()); - } - - @Test - public void testNestedSexpSkip() throws IOException { - IonParser parser = parse("((a)+(b))"); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - parser.skipChildren(); - assertEquals(JsonToken.END_ARRAY, parser.getCurrentToken()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("+", parser.getText()); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - parser.skipChildren(); - assertEquals(JsonToken.END_ARRAY, parser.getCurrentToken()); - assertEquals(JsonToken.END_ARRAY, parser.nextToken()); - assertNull(parser.nextToken()); - } - - @Test - public void testEmptyClob() throws IOException { - IonParser parser = parse("{{}}"); - assertEquals(JsonToken.VALUE_EMBEDDED_OBJECT, parser.nextToken()); - assertEquals(ByteBuffer.wrap(new byte[0]), parser.getEmbeddedObject()); - assertNull(parser.nextToken()); - } - - @Test - public void testClob() throws IOException { - IonParser parser = parse("{{\"abc123\"}}"); - assertEquals(JsonToken.VALUE_EMBEDDED_OBJECT, parser.nextToken()); - assertEquals(ByteBuffer.wrap("abc123".getBytes(StandardCharsets.UTF_8)), parser.getEmbeddedObject()); - assertNull(parser.nextToken()); - } - - @Test - public void testSymbolValue() throws IOException { - IonParser parser = parse("a1 _1 $foo '123' 'sp ace'"); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("a1", parser.getText()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("_1", parser.getText()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("$foo", parser.getText()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("123", parser.getText()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("sp ace", parser.getText()); - assertNull(parser.nextToken()); - } - - @Test - public void testSkipChildrenNotAtContainerStartDoesNothing() throws IOException { - IonParser parser = parse("123 (a+b)"); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - parser.skipChildren(); // should do nothing - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.getCurrentToken()); - assertEquals(123, parser.getIntValue()); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - parser.skipChildren(); // should do nothing - assertEquals(JsonToken.VALUE_STRING, parser.getCurrentToken()); - assertEquals("a", parser.getText()); - } - - @Test - public void testGetEmbeddedObjectOnBasicValueReturnsNull() throws IOException { - IonParser parser = parse("123 (a+b) abc"); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertNull(parser.getEmbeddedObject()); - assertEquals(123, parser.getIntValue()); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - assertNull(parser.getEmbeddedObject()); - parser.skipChildren(); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertNull(parser.getEmbeddedObject()); - assertEquals("abc", parser.getText()); - assertNull(parser.nextToken()); - } - - @Test - public void testNulls() throws IOException { - IonParser parser = parse("null " - + "null.null " - + "null.bool " - + "null.int " - + "null.float " - + "null.decimal " - + "null.timestamp " - + "null.string " - + "null.symbol " - + "null.blob " - + "null.clob " - + "null.struct " - + "null.list " - + "null.sexp" - ); - JsonToken token = null; - int count = 0; - while ((token = parser.nextToken()) != null) { - assertEquals(JsonToken.VALUE_NULL, token); - count++; - } - assertEquals(14, count); - } - - @Test - public void testNextValue() throws IOException { - IonParser parser = parse("{foo:{bar:\"abc\"}, baz:123} 42.0"); - assertEquals(JsonToken.START_OBJECT, parser.nextValue()); - assertEquals(JsonToken.START_OBJECT, parser.nextValue()); - assertEquals("foo", parser.getCurrentName()); - assertEquals(JsonToken.VALUE_STRING, parser.nextValue()); - assertEquals("abc", parser.getText()); - assertEquals("bar", parser.getCurrentName()); - assertEquals(JsonToken.END_OBJECT, parser.nextValue()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextValue()); - assertEquals(123, parser.getIntValue()); - assertEquals("baz", parser.getCurrentName()); - assertEquals(JsonToken.END_OBJECT, parser.nextValue()); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextValue()); - assertEquals(42.0, parser.getFloatValue(), 1e-9); - assertNull(parser.nextValue()); - } - - @Test - public void testGetCurrentNameNotAtFieldReturnsNull() throws IOException { - IonParser parser = parse("{foo:\"abc\"} [a, b] {{}} \"bar\""); - assertEquals(JsonToken.START_OBJECT, parser.nextToken()); - assertNull(parser.getCurrentName()); - assertEquals(JsonToken.VALUE_STRING, parser.nextValue()); - assertEquals(JsonToken.END_OBJECT, parser.nextToken()); - assertNull(parser.getCurrentName()); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertNull(parser.getCurrentName()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertNull(parser.getCurrentName()); - assertEquals(JsonToken.END_ARRAY, parser.nextToken()); - assertNull(parser.getCurrentName()); - assertEquals(JsonToken.VALUE_EMBEDDED_OBJECT, parser.nextToken()); - assertNull(parser.getCurrentName()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertNull(parser.getCurrentName()); - assertNull(parser.nextToken()); - assertNull(parser.getCurrentName()); - } - - @Test - public void testClearCurrentToken() throws IOException { - IonParser parser = parse("{}"); - assertEquals(JsonToken.START_OBJECT, parser.nextToken()); - parser.clearCurrentToken(); - assertNull(parser.getCurrentToken()); - assertFalse(parser.hasCurrentToken()); - assertEquals(JsonToken.START_OBJECT, parser.getLastClearedToken()); - } - - @Test - public void testGetText() throws IOException { - String defaultText = "default"; - String integer = String.valueOf(123); - String flt = String.valueOf(42.0); - IonParser parser = parse("{foo:" + integer + ", bar:" + flt + "} {{\"abc\"}} null true false"); - assertNull(parser.getText()); - assertEquals(defaultText, parser.getValueAsString(defaultText)); - assertEquals(JsonToken.START_OBJECT, parser.nextToken()); - assertEquals(JsonToken.START_OBJECT.asString(), parser.getText()); // "{" - assertEquals(defaultText, parser.getValueAsString(defaultText)); - assertEquals(JsonToken.FIELD_NAME, parser.nextToken()); - assertEquals("foo", parser.getText()); - assertEquals(defaultText, parser.getValueAsString(defaultText)); - assertEquals("foo", parser.getCurrentName()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(integer, parser.getText()); - assertEquals(integer, parser.getValueAsString(defaultText)); - assertEquals(123, parser.getIntValue()); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextValue()); - assertEquals(flt, parser.getText()); - assertEquals(flt, parser.getValueAsString(defaultText)); - assertEquals(42.0, parser.getFloatValue(), 1e-9); - assertEquals("bar", parser.getCurrentName()); - assertEquals(JsonToken.END_OBJECT, parser.nextToken()); - assertEquals(JsonToken.END_OBJECT.asString(), parser.getText()); // "}" - assertEquals(defaultText, parser.getValueAsString(defaultText)); - assertEquals(JsonToken.VALUE_EMBEDDED_OBJECT, parser.nextToken()); - assertNull(parser.getText()); // embedded objects have undefined text - assertEquals(JsonToken.VALUE_NULL, parser.nextToken()); - assertEquals(JsonToken.VALUE_NULL.asString(), parser.getText()); // "null" - assertEquals(defaultText, parser.getValueAsString(defaultText)); - assertEquals(JsonToken.VALUE_TRUE, parser.nextToken()); - assertEquals(JsonToken.VALUE_TRUE.asString(), parser.getText()); // "true" - assertEquals(JsonToken.VALUE_TRUE.asString(), parser.getValueAsString(defaultText)); - assertEquals(JsonToken.VALUE_FALSE, parser.nextToken()); - assertEquals(JsonToken.VALUE_FALSE.asString(), parser.getText()); // "false" - assertEquals(JsonToken.VALUE_FALSE.asString(), parser.getValueAsString(defaultText)); - assertNull(parser.nextToken()); - assertNull(parser.getText()); - assertEquals(defaultText, parser.getValueAsString(defaultText)); - } - - @Test - public void testGetNumberValue() throws IOException { - String integer = String.valueOf(Integer.MAX_VALUE); - String lng = String.valueOf(Long.MAX_VALUE); - String bigInteger = BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE).toString(); - String flt = String.valueOf(Float.MAX_VALUE); - String dbl = String.valueOf(Double.MAX_VALUE); - String inf = "1.7976931348623157E309"; // Double.MAX_VALUE * 10; - String bigDecimal = new BigDecimal(inf).toString(); - IonParser parser = parse(integer + " " - + lng + " " - + bigInteger + " " - + flt + " " - + dbl + " " - + inf + " " - + bigDecimal.toLowerCase().replace("e", "D") - ); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(integer, parser.getNumberValue().toString()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(lng, parser.getNumberValue().toString()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(bigInteger, parser.getNumberValue().toString()); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertEquals(flt, parser.getNumberValue().toString()); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertEquals(dbl, parser.getNumberValue().toString()); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertTrue(Double.isInfinite(parser.getDoubleValue())); - assertTrue(Double.isInfinite(parser.getFloatValue())); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertEquals(bigDecimal, parser.getNumberValue().toString()); - } - - @Test - public void testGetNumberValueNotOnNumberFails() throws IOException { - IonParser parser = parse("foo {{}} {abc:123}"); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertNull(parser.getNumberType()); - thrown.expect(SdkClientException.class); - parser.getNumberValue(); - } - - @Test - public void testSpecialFloatValues() throws IOException { - IonParser parser = parse("1.7976931348623157E309 " // Double.MAX_VALUE * 10 - + "-1.7976931348623157E309 " - + "+inf " - + "-inf " - + "nan" - ); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertTrue(Double.isInfinite(parser.getDoubleValue())); - assertTrue(Double.isInfinite(parser.getFloatValue())); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertTrue(Double.isInfinite(parser.getDoubleValue())); - assertTrue(Double.isInfinite(parser.getFloatValue())); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertTrue(Double.isInfinite(parser.getDoubleValue())); - assertTrue(Double.isInfinite(parser.getFloatValue())); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertTrue(Double.isInfinite(parser.getDoubleValue())); - assertTrue(Double.isInfinite(parser.getFloatValue())); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertTrue(Double.isNaN(parser.getDoubleValue())); - assertTrue(Double.isNaN(parser.getFloatValue())); - } - - private enum WriteFormat { - TEXT { - @Override - public byte[] write(String data) throws IOException { - ByteArrayOutputStream out = new ByteArrayOutputStream(); - write(data, IonTextWriterBuilder.standard().build(out)); - return out.toByteArray(); - } - }, - BINARY { - @Override - public byte[] write(String data) throws IOException { - ByteArrayOutputStream out = new ByteArrayOutputStream(); - write(data, IonBinaryWriterBuilder.standard().build(out)); - return out.toByteArray(); - } - }; - - public static void write(String data, IonWriter writer) throws IOException { - IonReader reader = SYSTEM.newReader(data); - writer.writeValues(reader); - writer.close(); - } - - public abstract byte[] write(String data) throws IOException; - } - -} diff --git a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonRoundtripTest.java b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonRoundtripTest.java deleted file mode 100644 index ba5429205563..000000000000 --- a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonRoundtripTest.java +++ /dev/null @@ -1,504 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import com.fasterxml.jackson.core.JsonToken; -import java.io.IOException; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Date; -import java.util.List; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import software.amazon.awssdk.protocols.ion.internal.IonParser; -import software.amazon.awssdk.protocols.ion.internal.SdkIonGenerator; -import software.amazon.ion.IonException; -import software.amazon.ion.IonSystem; -import software.amazon.ion.system.IonBinaryWriterBuilder; -import software.amazon.ion.system.IonSystemBuilder; - -/** - * Tests that data written by the {@link SdkIonGenerator} is correctly read - * by the {@link IonParser}. For additional stand-alone testing of the - * {@link IonParser}, see {@link IonParserTest}. - */ -@RunWith(Parameterized.class) -public class IonRoundtripTest { - - private static final IonSystem SYSTEM = IonSystemBuilder.standard().build(); - private final Data data; - - public IonRoundtripTest(Data data) { - this.data = data; - } - - @Parameters - public static Collection data() { - List parameters = new ArrayList(); - for (Data data : Data.values()) { - parameters.add(new Object[] {data}); - } - return parameters; - } - - @Test - public void testRoundtrip() throws IOException { - SdkIonGenerator generator = SdkIonGenerator.create(IonBinaryWriterBuilder.standard(), "foo"); - data.generate(generator); - IonParser parser = new IonParser(SYSTEM.newReader(generator.getBytes()), false); - data.parse(parser); - assertNull(parser.nextToken()); // Asserts data was read fully. - assertFalse(parser.hasCurrentToken()); - assertFalse(parser.isClosed()); - parser.close(); - assertTrue(parser.isClosed()); - } - - private enum Data { - NULL { - @Override - public void generate(SdkIonGenerator generator) { - // Is this the only way to write a null value? - generator.writeValue((String) null); - generator.writeValue((BigInteger) null); - generator.writeValue((BigDecimal) null); - generator.writeValue((Instant) null); - generator.writeValue((ByteBuffer) null); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_NULL, parser.nextToken()); - assertEquals(JsonToken.VALUE_NULL, parser.nextToken()); - assertEquals(JsonToken.VALUE_NULL, parser.nextToken()); - assertEquals(JsonToken.VALUE_NULL, parser.nextToken()); - assertEquals(JsonToken.VALUE_NULL, parser.nextToken()); - } - }, - BOOL { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(true); - generator.writeValue(false); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_TRUE, parser.nextToken()); - assertEquals(true, parser.getBooleanValue()); - assertEquals(JsonToken.VALUE_FALSE, parser.nextToken()); - assertEquals(false, parser.getBooleanValue()); - } - }, - SHORT { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(0); - // There's no writeValue(byte) method, but there is writeValue(short)... - generator.writeValue(Byte.MAX_VALUE); - generator.writeValue(Byte.MIN_VALUE); - generator.writeValue(Short.MAX_VALUE); - generator.writeValue(Short.MIN_VALUE); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(0, parser.getIntValue()); - assertEquals(0, parser.getLongValue()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(Byte.MAX_VALUE, parser.getByteValue()); - assertEquals((short) Byte.MAX_VALUE, parser.getShortValue()); - assertEquals((int) Byte.MAX_VALUE, parser.getIntValue()); - assertEquals((long) Byte.MAX_VALUE, parser.getLongValue()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(Byte.MIN_VALUE, parser.getByteValue()); - assertEquals((short) Byte.MIN_VALUE, parser.getShortValue()); - assertEquals((int) Byte.MIN_VALUE, parser.getIntValue()); - assertEquals((long) Byte.MIN_VALUE, parser.getLongValue()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(Short.MAX_VALUE, parser.getShortValue()); - assertEquals((int) Short.MAX_VALUE, parser.getIntValue()); - assertEquals((long) Short.MAX_VALUE, parser.getLongValue()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(Short.MIN_VALUE, parser.getShortValue()); - assertEquals((int) Short.MIN_VALUE, parser.getIntValue()); - assertEquals((long) Short.MIN_VALUE, parser.getLongValue()); - - } - }, - INT { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(Integer.MAX_VALUE); - generator.writeValue(Integer.MIN_VALUE); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(Integer.MAX_VALUE, parser.getIntValue()); - assertEquals((long) Integer.MAX_VALUE, parser.getLongValue()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(Integer.MIN_VALUE, parser.getIntValue()); - assertEquals((long) Integer.MIN_VALUE, parser.getLongValue()); - } - }, - LONG { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(Long.MAX_VALUE); - generator.writeValue(Long.MIN_VALUE); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(Long.MAX_VALUE, parser.getLongValue()); - assertEquals(BigInteger.valueOf(Long.MAX_VALUE), parser.getBigIntegerValue()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(Long.MIN_VALUE, parser.getLongValue()); - assertEquals(BigInteger.valueOf(Long.MIN_VALUE), parser.getBigIntegerValue()); - - } - }, - BIG_INTEGER { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE)); - generator.writeValue(BigInteger.valueOf(Long.MIN_VALUE).subtract(BigInteger.ONE)); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE), parser.getBigIntegerValue()); - try { - parser.getLongValue(); - } catch (IonException e1) { - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(BigInteger.valueOf(Long.MIN_VALUE).subtract(BigInteger.ONE), parser.getBigIntegerValue()); - try { - parser.getLongValue(); - } catch (IonException e2) { - return; - } - } - throw new AssertionError("number shouldn't fit in a long"); - } - }, - FLOAT { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(Float.MAX_VALUE); - generator.writeValue(Float.MIN_VALUE); - generator.writeValue(-Float.MAX_VALUE); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertEquals(Float.MAX_VALUE, parser.getFloatValue(), 1e-9); - assertEquals((double) Float.MAX_VALUE, parser.getDoubleValue(), 1e-9); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertEquals(Float.MIN_VALUE, parser.getFloatValue(), 1e-9); - assertEquals((double) Float.MIN_VALUE, parser.getDoubleValue(), 1e-9); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertEquals(-Float.MAX_VALUE, parser.getFloatValue(), 1e-9); - assertEquals((double) -Float.MAX_VALUE, parser.getDoubleValue(), 1e-9); - - } - }, - DOUBLE { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(Double.MAX_VALUE); - generator.writeValue(Double.MIN_VALUE); - generator.writeValue(-Double.MAX_VALUE); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertEquals(Double.MAX_VALUE, parser.getDoubleValue(), 1e-9); - assertEquals(BigDecimal.valueOf(Double.MAX_VALUE), parser.getDecimalValue()); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertEquals(Float.MIN_VALUE, parser.getDoubleValue(), 1e-9); - assertEquals(BigDecimal.valueOf(Double.MIN_VALUE), parser.getDecimalValue()); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertEquals(-Double.MAX_VALUE, parser.getDoubleValue(), 1e-9); - assertEquals(BigDecimal.valueOf(-Double.MAX_VALUE), parser.getDecimalValue()); - - } - }, - BIG_DECIMAL { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(BigDecimal.valueOf(Double.MAX_VALUE).add(BigDecimal.ONE)); - generator.writeValue(BigDecimal.valueOf(-Double.MAX_VALUE).subtract(BigDecimal.ONE)); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertEquals(BigDecimal.valueOf(Double.MAX_VALUE).add(BigDecimal.ONE), parser.getDecimalValue()); - assertEquals(JsonToken.VALUE_NUMBER_FLOAT, parser.nextToken()); - assertEquals(BigDecimal.valueOf(-Double.MAX_VALUE).subtract(BigDecimal.ONE), parser.getDecimalValue()); - } - }, - TIMESTAMP { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(Instant.ofEpochMilli(0)); - // Note: dates too far in the future are rejected by Ion - generator.writeValue(Instant.ofEpochMilli(Integer.MAX_VALUE)); - generator.writeValue(Instant.ofEpochMilli(Integer.MIN_VALUE)); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_EMBEDDED_OBJECT, parser.nextToken()); - assertEquals(new Date(0), parser.getEmbeddedObject()); - assertEquals(JsonToken.VALUE_EMBEDDED_OBJECT, parser.nextToken()); - assertEquals(new Date(Integer.MAX_VALUE), parser.getEmbeddedObject()); - assertEquals(JsonToken.VALUE_EMBEDDED_OBJECT, parser.nextToken()); - assertEquals(new Date(Integer.MIN_VALUE), parser.getEmbeddedObject()); - } - }, - BYTES { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(ByteBuffer.wrap("foobar".getBytes(StandardCharsets.UTF_8))); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_EMBEDDED_OBJECT, parser.nextToken()); - assertEquals(ByteBuffer.wrap("foobar".getBytes(StandardCharsets.UTF_8)), parser.getEmbeddedObject()); - } - }, - EMPTY_STRUCT { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeStartObject(); - generator.writeEndObject(); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.START_OBJECT, parser.nextToken()); - assertEquals(JsonToken.END_OBJECT, parser.nextToken()); - } - }, - EMPTY_LIST { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeStartArray(); - generator.writeEndArray(); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - assertEquals(JsonToken.END_ARRAY, parser.nextToken()); - } - }, - STRUCT { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeStartObject(); - generator.writeFieldName("int"); - generator.writeValue(1); - generator.writeFieldName("string"); - generator.writeValue("foo"); - generator.writeFieldName("bool"); - generator.writeValue(false); - generator.writeEndObject(); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.START_OBJECT, parser.nextToken()); - assertEquals(JsonToken.FIELD_NAME, parser.nextToken()); - assertEquals("int", parser.getText()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(1, parser.getIntValue()); - assertEquals(JsonToken.FIELD_NAME, parser.nextToken()); - assertEquals("string", parser.getText()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("foo", parser.getText()); - assertEquals(JsonToken.FIELD_NAME, parser.nextToken()); - assertEquals("bool", parser.getText()); - assertEquals(JsonToken.VALUE_FALSE, parser.nextToken()); - assertEquals(false, parser.getBooleanValue()); - assertEquals(JsonToken.END_OBJECT, parser.nextToken()); - } - }, - LIST { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeStartArray(); - generator.writeValue(1); - generator.writeValue("foo"); - generator.writeValue(true); - generator.writeEndArray(); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(1, parser.getIntValue()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("foo", parser.getText()); - assertEquals(JsonToken.VALUE_TRUE, parser.nextToken()); - assertEquals(true, parser.getBooleanValue()); - assertEquals(JsonToken.END_ARRAY, parser.nextToken()); - - } - }, - STRUCT_IN_LIST { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeStartArray(); - STRUCT.generate(generator); - generator.writeEndArray(); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - STRUCT.parse(parser); - assertEquals(JsonToken.END_ARRAY, parser.nextToken()); - } - }, - LIST_IN_STRUCT { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeStartObject(); - generator.writeFieldName("list"); - LIST.generate(generator); - generator.writeEndObject(); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.START_OBJECT, parser.nextToken()); - assertEquals(JsonToken.FIELD_NAME, parser.nextToken()); - assertEquals("list", parser.getText()); - LIST.parse(parser); - assertEquals(JsonToken.END_OBJECT, parser.nextToken()); - } - }, - STRUCT_SKIP { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(42); - STRUCT.generate(generator); - generator.writeValue("foo"); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(42, parser.getIntValue()); - assertEquals(JsonToken.START_OBJECT, parser.nextToken()); - parser.skipChildren(); - assertEquals(JsonToken.END_OBJECT, parser.getCurrentToken()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("foo", parser.getText()); - } - }, - LIST_SKIP { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(42); - LIST.generate(generator); - generator.writeValue("foo"); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(42, parser.getIntValue()); - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - parser.skipChildren(); - assertEquals(JsonToken.END_ARRAY, parser.getCurrentToken()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("foo", parser.getText()); - } - }, - NESTED_SKIP { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeValue(42); - LIST_IN_STRUCT.generate(generator); - generator.writeValue("foo"); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(42, parser.getIntValue()); - assertEquals(JsonToken.START_OBJECT, parser.nextToken()); - parser.skipChildren(); - assertEquals(JsonToken.END_OBJECT, parser.getCurrentToken()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("foo", parser.getText()); - } - }, - NESTED_INNER_SKIP { - @Override - public void generate(SdkIonGenerator generator) { - generator.writeStartArray(); - generator.writeValue(42); - STRUCT.generate(generator); - generator.writeValue("foo"); - generator.writeEndArray(); - } - - @Override - public void parse(IonParser parser) throws IOException { - assertEquals(JsonToken.START_ARRAY, parser.nextToken()); - assertEquals(JsonToken.VALUE_NUMBER_INT, parser.nextToken()); - assertEquals(42, parser.getIntValue()); - assertEquals(JsonToken.START_OBJECT, parser.nextToken()); - parser.skipChildren(); - assertEquals(JsonToken.END_OBJECT, parser.getCurrentToken()); - assertEquals(JsonToken.VALUE_STRING, parser.nextToken()); - assertEquals("foo", parser.getText()); - assertEquals(JsonToken.END_ARRAY, parser.nextToken()); - } - }; - - public abstract void generate(SdkIonGenerator generator); - - public abstract void parse(IonParser parser) throws IOException; - } - -} diff --git a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/ValidSdkObjects.java b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/ValidSdkObjects.java deleted file mode 100644 index eabb349822ba..000000000000 --- a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/ValidSdkObjects.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.ion; - -import java.net.URI; -import software.amazon.awssdk.http.SdkHttpFullRequest; -import software.amazon.awssdk.http.SdkHttpFullResponse; -import software.amazon.awssdk.http.SdkHttpMethod; - -/** - * A collection of objects (or object builder) pre-populated with all required fields. This allows tests to focus on what data - * they care about, not necessarily what data is required. - */ -public final class ValidSdkObjects { - private ValidSdkObjects() {} - - public static SdkHttpFullRequest.Builder sdkHttpFullRequest() { - return SdkHttpFullRequest.builder() - .uri(URI.create("http://test.com:80")) - .method(SdkHttpMethod.GET); - } - - public static SdkHttpFullResponse.Builder sdkHttpFullResponse() { - return SdkHttpFullResponse.builder() - .statusCode(200); - } -} diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index 7392c9522eb1..c79162caaf54 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 @@ -62,13 +62,14 @@ ${awsjavasdk.version} - com.fasterxml.jackson.core - jackson-core + software.amazon.awssdk + third-party-jackson-core + ${awsjavasdk.version} - com.fasterxml.jackson.core - jackson-databind - test + software.amazon.awssdk + json-utils + ${awsjavasdk.version} junit diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsJsonProtocolFactory.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsJsonProtocolFactory.java index 2c2df900eadf..c42911e15e0c 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsJsonProtocolFactory.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsJsonProtocolFactory.java @@ -41,13 +41,13 @@ import software.amazon.awssdk.protocols.core.OperationInfo; import software.amazon.awssdk.protocols.core.ProtocolMarshaller; import software.amazon.awssdk.protocols.json.internal.AwsStructuredPlainJsonFactory; -import software.amazon.awssdk.protocols.json.internal.dom.JsonDomParser; import software.amazon.awssdk.protocols.json.internal.marshall.JsonProtocolMarshallerBuilder; import software.amazon.awssdk.protocols.json.internal.unmarshall.AwsJsonErrorMessageParser; import software.amazon.awssdk.protocols.json.internal.unmarshall.AwsJsonProtocolErrorUnmarshaller; import software.amazon.awssdk.protocols.json.internal.unmarshall.AwsJsonResponseHandler; import software.amazon.awssdk.protocols.json.internal.unmarshall.JsonProtocolUnmarshaller; import software.amazon.awssdk.protocols.json.internal.unmarshall.JsonResponseHandler; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeParser; @SdkProtectedApi public abstract class BaseAwsJsonProtocolFactory { @@ -72,7 +72,9 @@ protected BaseAwsJsonProtocolFactory(Builder builder) { this.clientConfiguration = builder.clientConfiguration; this.protocolUnmarshaller = JsonProtocolUnmarshaller .builder() - .parser(JsonDomParser.create(getSdkFactory().getJsonFactory())) + .parser(JsonNodeParser.builder() + .jsonFactory(getSdkFactory().getJsonFactory()) + .build()) .defaultTimestampFormats(getDefaultTimestampFormats()) .build(); } diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsStructuredJsonFactory.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsStructuredJsonFactory.java index a27196287e4c..c1b6ca5deb45 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsStructuredJsonFactory.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsStructuredJsonFactory.java @@ -15,9 +15,9 @@ package software.amazon.awssdk.protocols.json; -import com.fasterxml.jackson.core.JsonFactory; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.protocols.json.internal.unmarshall.JsonErrorCodeParser; +import software.amazon.awssdk.thirdparty.jackson.core.JsonFactory; /** * Generic implementation of a structured JSON factory that is pluggable for different variants of diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonContent.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonContent.java index bb5edba9ee11..86f58e9049d0 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonContent.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonContent.java @@ -15,16 +15,14 @@ package software.amazon.awssdk.protocols.json; -import com.fasterxml.jackson.core.JsonFactory; -import java.io.ByteArrayInputStream; import java.io.IOException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.http.SdkHttpFullResponse; -import software.amazon.awssdk.protocols.json.internal.dom.JsonDomParser; -import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; -import software.amazon.awssdk.protocols.json.internal.dom.SdkObjectNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeParser; +import software.amazon.awssdk.thirdparty.jackson.core.JsonFactory; import software.amazon.awssdk.utils.IoUtils; /** @@ -37,9 +35,9 @@ public class JsonContent { private static final Logger LOG = LoggerFactory.getLogger(JsonContent.class); private final byte[] rawContent; - private final SdkJsonNode jsonNode; + private final JsonNode jsonNode; - JsonContent(byte[] rawJsonContent, SdkJsonNode jsonNode) { + JsonContent(byte[] rawJsonContent, JsonNode jsonNode) { this.rawContent = rawJsonContent; this.jsonNode = jsonNode; } @@ -67,16 +65,16 @@ public static JsonContent createJsonContent(SdkHttpFullResponse httpResponse, return new JsonContent(rawJsonContent, jsonFactory); } - private static SdkJsonNode parseJsonContent(byte[] rawJsonContent, JsonFactory jsonFactory) { + private static JsonNode parseJsonContent(byte[] rawJsonContent, JsonFactory jsonFactory) { if (rawJsonContent == null || rawJsonContent.length == 0) { - return SdkObjectNode.emptyObject(); + return JsonNode.emptyObjectNode(); } try { - JsonDomParser parser = JsonDomParser.create(jsonFactory); - return parser.parse(new ByteArrayInputStream(rawJsonContent)); + JsonNodeParser parser = JsonNodeParser.builder().jsonFactory(jsonFactory).build(); + return parser.parse(rawJsonContent); } catch (Exception e) { LOG.debug("Unable to parse HTTP response content", e); - return SdkObjectNode.emptyObject(); + return JsonNode.emptyObjectNode(); } } @@ -84,7 +82,7 @@ public byte[] getRawContent() { return rawContent; } - public SdkJsonNode getJsonNode() { + public JsonNode getJsonNode() { return jsonNode; } } diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/SdkJsonGenerator.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/SdkJsonGenerator.java index 0e91bfbd019e..228333bdf724 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/SdkJsonGenerator.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/SdkJsonGenerator.java @@ -15,8 +15,6 @@ package software.amazon.awssdk.protocols.json; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonGenerator; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.math.BigDecimal; @@ -25,6 +23,8 @@ import java.time.Instant; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.thirdparty.jackson.core.JsonFactory; +import software.amazon.awssdk.thirdparty.jackson.core.JsonGenerator; import software.amazon.awssdk.utils.BinaryUtils; import software.amazon.awssdk.utils.DateUtils; diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/StructuredJsonFactory.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/StructuredJsonFactory.java index c88840d4d927..0e4c726e6445 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/StructuredJsonFactory.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/StructuredJsonFactory.java @@ -15,8 +15,8 @@ package software.amazon.awssdk.protocols.json; -import com.fasterxml.jackson.core.JsonFactory; import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.thirdparty.jackson.core.JsonFactory; /** * Common interface for creating generators (writers) and protocol handlers for JSON like protocols. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/AwsStructuredPlainJsonFactory.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/AwsStructuredPlainJsonFactory.java index 14d97b854627..fcdb6d705237 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/AwsStructuredPlainJsonFactory.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/AwsStructuredPlainJsonFactory.java @@ -15,11 +15,12 @@ package software.amazon.awssdk.protocols.json.internal; -import com.fasterxml.jackson.core.JsonFactory; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.protocols.json.BaseAwsStructuredJsonFactory; import software.amazon.awssdk.protocols.json.SdkJsonGenerator; import software.amazon.awssdk.protocols.json.StructuredJsonGenerator; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeParser; +import software.amazon.awssdk.thirdparty.jackson.core.JsonFactory; /** * Creates generators and protocol handlers for plain text JSON wire format. @@ -42,7 +43,7 @@ protected StructuredJsonGenerator createWriter(JsonFactory jsonFactory, @Override public JsonFactory getJsonFactory() { - return JSON_FACTORY; + return JsonNodeParser.DEFAULT_JSON_FACTORY; } }; diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParser.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParser.java deleted file mode 100644 index ab42d8a765be..000000000000 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParser.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.json.internal.dom; - -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonToken; -import java.io.IOException; -import java.io.InputStream; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.exception.SdkClientException; - -/** - * Parses an JSON document into a simple DOM like structure, {@link SdkJsonNode}. - */ -@SdkInternalApi -public final class JsonDomParser { - - private final JsonFactory jsonFactory; - - private JsonDomParser(JsonFactory jsonFactory) { - this.jsonFactory = jsonFactory; - } - - public SdkJsonNode parse(InputStream content) throws IOException { - try (JsonParser parser = jsonFactory.createParser(content) - .configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false)) { - return parseToken(parser, parser.nextToken()); - } - } - - private SdkJsonNode parseToken(JsonParser parser, JsonToken token) throws IOException { - if (token == null) { - return null; - } - switch (token) { - case VALUE_EMBEDDED_OBJECT: - return SdkEmbeddedObject.create(parser.getEmbeddedObject()); - case VALUE_STRING: - return SdkScalarNode.create(parser.getText()); - case VALUE_FALSE: - return SdkScalarNode.createBoolean(false); - case VALUE_TRUE: - return SdkScalarNode.createBoolean(true); - case VALUE_NULL: - return SdkNullNode.instance(); - case VALUE_NUMBER_FLOAT: - case VALUE_NUMBER_INT: - return SdkScalarNode.createNumber(parser.getNumberValue()); - case START_OBJECT: - return parseObject(parser); - case START_ARRAY: - return parseArray(parser); - default: - throw SdkClientException.create("Unexpected JSON token - " + token); - } - } - - private SdkJsonNode parseObject(JsonParser parser) throws IOException { - JsonToken currentToken = parser.nextToken(); - SdkObjectNode.Builder builder = SdkObjectNode.builder(); - while (currentToken != JsonToken.END_OBJECT) { - String fieldName = parser.getText(); - builder.putField(fieldName, parseToken(parser, parser.nextToken())); - currentToken = parser.nextToken(); - } - return builder.build(); - } - - private SdkJsonNode parseArray(JsonParser parser) throws IOException { - JsonToken currentToken = parser.nextToken(); - SdkArrayNode.Builder builder = SdkArrayNode.builder(); - while (currentToken != JsonToken.END_ARRAY) { - builder.addItem(parseToken(parser, currentToken)); - currentToken = parser.nextToken(); - } - return builder.build(); - } - - public static JsonDomParser create(JsonFactory jsonFactory) { - return new JsonDomParser(jsonFactory); - } -} diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkArrayNode.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkArrayNode.java deleted file mode 100644 index 2f2521946205..000000000000 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkArrayNode.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.json.internal.dom; - -import static java.util.Collections.unmodifiableList; - -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; -import software.amazon.awssdk.annotations.SdkInternalApi; - -/** - * Represents a JSON array. - */ -@SdkInternalApi -public final class SdkArrayNode implements SdkJsonNode { - - private final List items; - - private SdkArrayNode(Builder builder) { - this.items = unmodifiableList(new ArrayList<>(builder.items)); - } - - @Override - public List items() { - return items; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - SdkArrayNode that = (SdkArrayNode) o; - return Objects.equals(items, that.items); - } - - @Override - public int hashCode() { - return Objects.hashCode(items); - } - - @Override - public String toString() { - return items.stream() - .map(Object::toString) - .collect(Collectors.joining(",", "[", "]")); - } - - static Builder builder() { - return new Builder(); - } - - static final class Builder { - - private final List items = new ArrayList<>(); - - private Builder() { - } - - Builder addItem(SdkJsonNode item) { - this.items.add(item); - return this; - } - - SdkArrayNode build() { - return new SdkArrayNode(this); - } - } -} diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkEmbeddedObject.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkEmbeddedObject.java deleted file mode 100644 index 1d2d413df357..000000000000 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkEmbeddedObject.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.json.internal.dom; - -import com.fasterxml.jackson.core.JsonParser; -import java.nio.ByteBuffer; -import java.util.Date; -import java.util.Objects; -import software.amazon.awssdk.annotations.SdkInternalApi; - -/** - * Represents an embedded object returned by a {@link JsonParser}. This is used for the ION - * format which embeds {@link Date} and {@link ByteBuffer} objects. - */ -@SdkInternalApi -public final class SdkEmbeddedObject implements SdkJsonNode { - - private final Object embeddedObject; - - private SdkEmbeddedObject(Object embeddedObject) { - this.embeddedObject = embeddedObject; - } - - /** - * @return The embedded object that was returned by the {@link JsonParser}. - */ - @Override - public Object embeddedObject() { - return embeddedObject; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - SdkEmbeddedObject that = (SdkEmbeddedObject) o; - return Objects.equals(embeddedObject, that.embeddedObject); - } - - @Override - public int hashCode() { - return Objects.hashCode(embeddedObject); - } - - static SdkEmbeddedObject create(Object embeddedObject) { - return new SdkEmbeddedObject(embeddedObject); - } -} diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkJsonNode.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkJsonNode.java deleted file mode 100644 index 83a93e7370b5..000000000000 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkJsonNode.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.json.internal.dom; - -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.annotations.SdkInternalApi; - -/** - * DOM interface for reading a JSON document. - */ -@SdkInternalApi -public interface SdkJsonNode { - - /** - * @return True if the node represents an explicit JSON null, false otherwise. - */ - default boolean isNull() { - return false; - } - - /** - * @return The value of the node as text. Returns null for most nodes except for {@link SdkScalarNode}. - */ - default String asText() { - return null; - } - - /** - * @return The embedded object value of the node. See {@link SdkEmbeddedObject}. - */ - default Object embeddedObject() { - return null; - } - - /** - * @param fieldName Field to get value for. - * @return Value of field in the JSON object if this node represents an object, otherwise returns null. - */ - default SdkJsonNode get(String fieldName) { - return null; - } - - /** - * @return If this node represents a JSON array, then this returns the list of items in that array. Otherwise returns null. - */ - default List items() { - return null; - } - - /** - * @return If this node represents a JSON object, then this returns the map of field names to field values in that - * object. Otherwise returns null. - */ - default Map fields() { - return null; - } -} diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkNullNode.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkNullNode.java deleted file mode 100644 index 3d8739cd4345..000000000000 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkNullNode.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.json.internal.dom; - -import software.amazon.awssdk.annotations.SdkInternalApi; - -/** - * Represents an explicit JSON null. - */ -@SdkInternalApi -public final class SdkNullNode implements SdkJsonNode { - - private static final SdkNullNode INSTANCE = new SdkNullNode(); - - private SdkNullNode() { - } - - @Override - public boolean isNull() { - return true; - } - - @Override - public boolean equals(Object obj) { - return obj instanceof SdkNullNode; - } - - @Override - public int hashCode() { - return 0; - } - - @Override - public String toString() { - return "null"; - } - - static SdkNullNode instance() { - return INSTANCE; - } -} diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkObjectNode.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkObjectNode.java deleted file mode 100644 index 59ab40887a02..000000000000 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkObjectNode.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.json.internal.dom; - -import static java.util.Collections.unmodifiableMap; - -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; -import java.util.stream.Collectors; -import software.amazon.awssdk.annotations.SdkInternalApi; - -@SdkInternalApi -public final class SdkObjectNode implements SdkJsonNode { - - private static final SdkObjectNode EMPTY = SdkObjectNode.builder().build(); - - private final Map fields; - - private SdkObjectNode(Builder builder) { - this.fields = unmodifiableMap(new HashMap<>(builder.fields)); - } - - @Override - public SdkJsonNode get(String fieldName) { - return fields.get(fieldName); - } - - @Override - public Map fields() { - return fields; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - SdkObjectNode that = (SdkObjectNode) o; - return Objects.equals(fields, that.fields); - } - - @Override - public int hashCode() { - return Objects.hashCode(fields); - } - - @Override - public String toString() { - return fields.entrySet().stream() - .map(e -> String.format("\"%s\": %s", e.getKey(), e.getValue())) - .collect(Collectors.joining(",\n", "{\n", "\n}")); - } - - static Builder builder() { - return new Builder(); - } - - /** - * @return An empty JSON object. - */ - public static SdkObjectNode emptyObject() { - return EMPTY; - } - - static final class Builder { - - private final Map fields = new HashMap<>(); - - private Builder() { - } - - Builder putField(String fieldName, SdkJsonNode value) { - fields.put(fieldName, value); - return this; - } - - SdkObjectNode build() { - return new SdkObjectNode(this); - } - } -} diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkScalarNode.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkScalarNode.java deleted file mode 100644 index f59172b39b93..000000000000 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkScalarNode.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.json.internal.dom; - -import java.util.Objects; -import software.amazon.awssdk.annotations.SdkInternalApi; - -/** - * Represents a simple scalar JSON value. This can either be a JSON string, JSON number, or JSON boolean. All values - * are coerced into a string value {@link #value()}. - */ -@SdkInternalApi -public final class SdkScalarNode implements SdkJsonNode { - - private final String value; - private final SdkScalarNodeType nodeType; - - - private SdkScalarNode(String value, SdkScalarNodeType nodeType) { - this.value = value; - this.nodeType = nodeType; - } - - public SdkScalarNodeType getNodeType() { - return nodeType; - } - - public String value() { - return value; - } - - @Override - public String asText() { - return value; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - SdkScalarNode that = (SdkScalarNode) o; - return Objects.equals(value, that.value); - } - - @Override - public int hashCode() { - return Objects.hashCode(value); - } - - static SdkScalarNode create(String value) { - return new SdkScalarNode(value, SdkScalarNodeType.STRING); - } - - static SdkScalarNode createNumber(Number value) { - return new SdkScalarNode(String.valueOf(value), SdkScalarNodeType.NUMBER); - } - - static SdkScalarNode createBoolean(boolean value) { - return new SdkScalarNode(String.valueOf(value), SdkScalarNodeType.BOOLEAN); - } - - /** - * This does not preserve the type of the original node. For example a JSON number will be printed out - * as a JSON string here. As such this should be used for debugging and tests only. - */ - @Override - public String toString() { - return "\"" + value + "\""; - } - -} diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkScalarNodeType.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkScalarNodeType.java deleted file mode 100644 index 2446f31ebe8c..000000000000 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkScalarNodeType.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.json.internal.dom; - -public enum SdkScalarNodeType { - - /** - * {{@link Number}} type {{@link SdkScalarNode}}. - */ - NUMBER, - - /** - * {{@link Boolean}} type {{@link SdkScalarNode}}. - - */ - BOOLEAN, - - /** - * {{@link String}} type {{@link SdkScalarNode}}. - - */ - STRING -} diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/HeaderMarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/HeaderMarshaller.java index 8d7fac6d7d21..69d2e75ea87b 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/HeaderMarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/HeaderMarshaller.java @@ -15,11 +15,16 @@ package software.amazon.awssdk.protocols.json.internal.marshall; +import static software.amazon.awssdk.utils.CollectionUtils.isNullOrEmpty; + import java.nio.charset.StandardCharsets; import java.time.Instant; +import java.util.List; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.protocol.MarshallLocation; import software.amazon.awssdk.core.traits.JsonValueTrait; +import software.amazon.awssdk.core.traits.ListTrait; import software.amazon.awssdk.protocols.core.ValueToStringConverter; import software.amazon.awssdk.utils.BinaryUtils; @@ -45,6 +50,19 @@ public final class HeaderMarshaller { public static final JsonMarshaller INSTANT = new SimpleHeaderMarshaller<>(JsonProtocolMarshaller.INSTANT_VALUE_TO_STRING); + public static final JsonMarshaller> LIST = (list, context, paramName, sdkField) -> { + // Null or empty lists cannot be meaningfully (or safely) represented in an HTTP header message since header-fields must + // typically have a non-empty field-value. https://datatracker.ietf.org/doc/html/rfc7230#section-3.2 + if (isNullOrEmpty(list)) { + return; + } + SdkField memberFieldInfo = sdkField.getRequiredTrait(ListTrait.class).memberFieldInfo(); + for (Object listValue : list) { + JsonMarshaller marshaller = context.marshallerRegistry().getMarshaller(MarshallLocation.HEADER, listValue); + marshaller.marshall(listValue, context, paramName, memberFieldInfo); + } + }; + private HeaderMarshaller() { } @@ -58,8 +76,7 @@ private SimpleHeaderMarshaller(ValueToStringConverter.ValueToString converter @Override public void marshall(T val, JsonMarshallerContext context, String paramName, SdkField sdkField) { - context.request().putHeader(paramName, converter.convert(val, sdkField)); + context.request().appendHeader(paramName, converter.convert(val, sdkField)); } } - } diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshaller.java index 311dbb3b152f..6d3c39bd3bc5 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshaller.java @@ -110,6 +110,7 @@ private static JsonMarshallerRegistry createMarshallerRegistry() { .headerMarshaller(MarshallingType.FLOAT, HeaderMarshaller.FLOAT) .headerMarshaller(MarshallingType.BOOLEAN, HeaderMarshaller.BOOLEAN) .headerMarshaller(MarshallingType.INSTANT, HeaderMarshaller.INSTANT) + .headerMarshaller(MarshallingType.LIST, HeaderMarshaller.LIST) .headerMarshaller(MarshallingType.NULL, JsonMarshaller.NULL) .queryParamMarshaller(MarshallingType.STRING, QueryParamMarshaller.STRING) diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonErrorMessageParser.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonErrorMessageParser.java index 954504d7d25f..ca34e0e04d67 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonErrorMessageParser.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonErrorMessageParser.java @@ -17,7 +17,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.SdkHttpFullResponse; -import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; @SdkInternalApi public final class AwsJsonErrorMessageParser implements ErrorMessageParser { @@ -52,7 +52,7 @@ public AwsJsonErrorMessageParser(SdkJsonErrorMessageParser errorMessageJsonLocat * @return Error Code of exceptional response or null if it can't be determined */ @Override - public String parseErrorMessage(SdkHttpFullResponse httpResponse, SdkJsonNode jsonNode) { + public String parseErrorMessage(SdkHttpFullResponse httpResponse, JsonNode jsonNode) { String headerMessage = httpResponse.firstMatchingHeader(X_AMZN_ERROR_MESSAGE).orElse(null); if (headerMessage != null) { return headerMessage; diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonProtocolErrorUnmarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonProtocolErrorUnmarshaller.java index ea832b762e8e..9772d5d465b3 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonProtocolErrorUnmarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonProtocolErrorUnmarshaller.java @@ -15,7 +15,6 @@ package software.amazon.awssdk.protocols.json.internal.unmarshall; -import com.fasterxml.jackson.core.JsonFactory; import java.time.Duration; import java.util.List; import java.util.Map; @@ -33,6 +32,7 @@ import software.amazon.awssdk.protocols.core.ExceptionMetadata; import software.amazon.awssdk.protocols.json.ErrorCodeParser; import software.amazon.awssdk.protocols.json.JsonContent; +import software.amazon.awssdk.thirdparty.jackson.core.JsonFactory; import software.amazon.awssdk.utils.http.SdkHttpUtils; /** diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/ErrorMessageParser.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/ErrorMessageParser.java index 9eab0e57cd36..92d579e72e98 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/ErrorMessageParser.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/ErrorMessageParser.java @@ -17,10 +17,10 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.SdkHttpFullResponse; -import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; @SdkInternalApi public interface ErrorMessageParser { - String parseErrorMessage(SdkHttpFullResponse httpResponse, SdkJsonNode jsonNode); + String parseErrorMessage(SdkHttpFullResponse httpResponse, JsonNode jsonNode); } diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/HeaderUnmarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/HeaderUnmarshaller.java index cd088e2c6e7c..8a1f352f4086 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/HeaderUnmarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/HeaderUnmarshaller.java @@ -15,14 +15,18 @@ package software.amazon.awssdk.protocols.json.internal.unmarshall; +import static java.util.stream.Collectors.toList; + import java.nio.charset.StandardCharsets; import java.time.Instant; +import java.util.List; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.traits.JsonValueTrait; import software.amazon.awssdk.protocols.core.StringToValueConverter; -import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.awssdk.utils.http.SdkHttpUtils; /** * Header unmarshallers for all the simple types we support. @@ -39,6 +43,11 @@ final class HeaderUnmarshaller { public static final JsonUnmarshaller BOOLEAN = new SimpleHeaderUnmarshaller<>(StringToValueConverter.TO_BOOLEAN); public static final JsonUnmarshaller FLOAT = new SimpleHeaderUnmarshaller<>(StringToValueConverter.TO_FLOAT); + // Only supports string value type + public static final JsonUnmarshaller> LIST = (context, jsonContent, field) -> { + return SdkHttpUtils.allMatchingHeaders(context.response().headers(), field.locationName()).collect(toList()); + }; + private HeaderUnmarshaller() { } @@ -78,7 +87,7 @@ private SimpleHeaderUnmarshaller(StringToValueConverter.StringToValue stringT @Override public T unmarshall(JsonUnmarshallerContext context, - SdkJsonNode jsonContent, + JsonNode jsonContent, SdkField field) { return context.response().firstMatchingHeader(field.locationName()) .map(s -> stringToValue.convert(s, field)) diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonErrorCodeParser.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonErrorCodeParser.java index dcaf190e3565..8bdffecadfaf 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonErrorCodeParser.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonErrorCodeParser.java @@ -25,7 +25,7 @@ import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.protocols.json.ErrorCodeParser; import software.amazon.awssdk.protocols.json.JsonContent; -import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; @SdkInternalApi public class JsonErrorCodeParser implements ErrorCodeParser { @@ -115,15 +115,15 @@ private String parseErrorCodeFromXAmzErrorType(String headerValue) { * "prefix#typeName" Examples : "AccessDeniedException", * "software.amazon.awssdk.dynamodb.v20111205#ProvisionedThroughputExceededException" */ - private String parseErrorCodeFromContents(SdkJsonNode jsonContents) { + private String parseErrorCodeFromContents(JsonNode jsonContents) { if (jsonContents == null) { return null; } - SdkJsonNode errorCodeField = jsonContents.get(errorCodeFieldName); + JsonNode errorCodeField = jsonContents.field(errorCodeFieldName).orElse(null); if (errorCodeField == null) { return null; } - String code = errorCodeField.asText(); + String code = errorCodeField.text(); int separator = code.lastIndexOf("#"); return code.substring(separator + 1); } diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonProtocolUnmarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonProtocolUnmarshaller.java index 18b1f8fe3673..4d21b26c4be6 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonProtocolUnmarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonProtocolUnmarshaller.java @@ -30,7 +30,6 @@ import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.document.Document; -import software.amazon.awssdk.core.io.ReleasableInputStream; import software.amazon.awssdk.core.protocol.MarshallLocation; import software.amazon.awssdk.core.protocol.MarshallingType; import software.amazon.awssdk.core.traits.ListTrait; @@ -41,9 +40,9 @@ import software.amazon.awssdk.protocols.core.StringToInstant; import software.amazon.awssdk.protocols.core.StringToValueConverter; import software.amazon.awssdk.protocols.json.internal.MarshallerUtil; -import software.amazon.awssdk.protocols.json.internal.dom.JsonDomParser; -import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; import software.amazon.awssdk.protocols.json.internal.unmarshall.document.DocumentUnmarshaller; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeParser; import software.amazon.awssdk.utils.builder.Buildable; /** @@ -58,7 +57,7 @@ public final class JsonProtocolUnmarshaller { private final JsonUnmarshallerRegistry registry; - private final JsonDomParser parser; + private final JsonNodeParser parser; private JsonProtocolUnmarshaller(Builder builder) { this.parser = builder.parser; @@ -82,6 +81,7 @@ private static JsonUnmarshallerRegistry createUnmarshallerRegistry( .headerUnmarshaller(MarshallingType.BOOLEAN, HeaderUnmarshaller.BOOLEAN) .headerUnmarshaller(MarshallingType.INSTANT, HeaderUnmarshaller.createInstantHeaderUnmarshaller(instantStringToValue)) .headerUnmarshaller(MarshallingType.FLOAT, HeaderUnmarshaller.FLOAT) + .headerUnmarshaller(MarshallingType.LIST, HeaderUnmarshaller.LIST) .payloadUnmarshaller(MarshallingType.STRING, new SimpleTypeJsonUnmarshaller<>(StringToValueConverter.TO_STRING)) .payloadUnmarshaller(MarshallingType.INTEGER, new SimpleTypeJsonUnmarshaller<>(StringToValueConverter.TO_INTEGER)) @@ -102,21 +102,21 @@ private static JsonUnmarshallerRegistry createUnmarshallerRegistry( } private static SdkBytes unmarshallSdkBytes(JsonUnmarshallerContext context, - SdkJsonNode jsonContent, + JsonNode jsonContent, SdkField field) { if (jsonContent == null || jsonContent.isNull()) { return null; } // Binary protocols like CBOR may already have the raw bytes extracted. - if (jsonContent.embeddedObject() != null) { - return SdkBytes.fromByteArray((byte[]) jsonContent.embeddedObject()); + if (jsonContent.isEmbeddedObject()) { + return SdkBytes.fromByteArray((byte[]) jsonContent.asEmbeddedObject()); } else { // Otherwise decode the JSON string as Base64 - return TO_SDK_BYTES.convert(jsonContent.asText(), field); + return TO_SDK_BYTES.convert(jsonContent.text(), field); } } - private static SdkPojo unmarshallStructured(JsonUnmarshallerContext context, SdkJsonNode jsonContent, SdkField f) { + private static SdkPojo unmarshallStructured(JsonUnmarshallerContext context, JsonNode jsonContent, SdkField f) { if (jsonContent == null || jsonContent.isNull()) { return null; } else { @@ -125,35 +125,35 @@ private static SdkPojo unmarshallStructured(JsonUnmarshallerContext context, Sdk } private static Document unmarshallDocument(JsonUnmarshallerContext context, - SdkJsonNode jsonContent, + JsonNode jsonContent, SdkField field) { return jsonContent != null && !jsonContent.isNull() ? getDocumentFromJsonContent(jsonContent) : null; } - private static Document getDocumentFromJsonContent(SdkJsonNode jsonContent) { - return new DocumentUnmarshaller().visit(jsonContent); + private static Document getDocumentFromJsonContent(JsonNode jsonContent) { + return jsonContent.visit(new DocumentUnmarshaller()); } private static Map unmarshallMap(JsonUnmarshallerContext context, - SdkJsonNode jsonContent, + JsonNode jsonContent, SdkField> field) { if (jsonContent == null || jsonContent.isNull()) { return null; } SdkField valueInfo = field.getTrait(MapTrait.class).valueFieldInfo(); Map map = new HashMap<>(); - jsonContent.fields().forEach((fieldName, value) -> { + jsonContent.asObject().forEach((fieldName, value) -> { JsonUnmarshaller unmarshaller = context.getUnmarshaller(valueInfo.location(), valueInfo.marshallingType()); map.put(fieldName, unmarshaller.unmarshall(context, value, valueInfo)); }); return map; } - private static List unmarshallList(JsonUnmarshallerContext context, SdkJsonNode jsonContent, SdkField> field) { + private static List unmarshallList(JsonUnmarshallerContext context, JsonNode jsonContent, SdkField> field) { if (jsonContent == null || jsonContent.isNull()) { return null; } - return jsonContent.items() + return jsonContent.asArray() .stream() .map(item -> { SdkField memberInfo = field.getTrait(ListTrait.class).memberFieldInfo(); @@ -174,16 +174,16 @@ private SimpleTypeJsonUnmarshaller(StringToValueConverter.StringToValue strin @Override public T unmarshall(JsonUnmarshallerContext context, - SdkJsonNode jsonContent, + JsonNode jsonContent, SdkField field) { - return jsonContent != null && !jsonContent.isNull() ? stringToValue.convert(jsonContent.asText(), field) : null; + return jsonContent != null && !jsonContent.isNull() ? stringToValue.convert(jsonContent.text(), field) : null; } } public TypeT unmarshall(SdkPojo sdkPojo, SdkHttpFullResponse response) throws IOException { if (hasPayloadMembersOnUnmarshall(sdkPojo) && !hasExplicitBlobPayloadMember(sdkPojo) && response.content().isPresent()) { - SdkJsonNode jsonNode = parser.parse(ReleasableInputStream.wrap(response.content().get()).disableClose()); + JsonNode jsonNode = parser.parse(response.content().get()); return unmarshall(sdkPojo, response, jsonNode); } else { return unmarshall(sdkPojo, response, null); @@ -209,7 +209,7 @@ private boolean hasPayloadMembersOnUnmarshall(SdkPojo sdkPojo) { public TypeT unmarshall(SdkPojo sdkPojo, SdkHttpFullResponse response, - SdkJsonNode jsonContent) { + JsonNode jsonContent) { JsonUnmarshallerContext context = JsonUnmarshallerContext.builder() .unmarshallerRegistry(registry) .response(response) @@ -219,14 +219,14 @@ public TypeT unmarshall(SdkPojo sdkPojo, @SuppressWarnings("unchecked") private static TypeT unmarshallStructured(SdkPojo sdkPojo, - SdkJsonNode jsonContent, + JsonNode jsonContent, JsonUnmarshallerContext context) { for (SdkField field : sdkPojo.sdkFields()) { if (isExplicitPayloadMember(field) && field.marshallingType() == MarshallingType.SDK_BYTES && context.response().content().isPresent()) { field.set(sdkPojo, SdkBytes.fromInputStream(context.response().content().get())); } else { - SdkJsonNode jsonFieldContent = getSdkJsonNode(jsonContent, field); + JsonNode jsonFieldContent = getJsonNode(jsonContent, field); JsonUnmarshaller unmarshaller = context.getUnmarshaller(field.location(), field.marshallingType()); field.set(sdkPojo, unmarshaller.unmarshall(context, jsonFieldContent, (SdkField) field)); } @@ -234,11 +234,12 @@ private static TypeT unmarshallStructured(SdkPojo sdkPoj return (TypeT) ((Buildable) sdkPojo).build(); } - private static SdkJsonNode getSdkJsonNode(SdkJsonNode jsonContent, SdkField field) { + private static JsonNode getJsonNode(JsonNode jsonContent, SdkField field) { if (jsonContent == null) { return null; } - return isExplicitPayloadMember(field) ? jsonContent : jsonContent.get(field.locationName()); + return isExplicitPayloadMember(field) ? jsonContent : jsonContent.field(field.locationName()) + .orElse(null); } /** @@ -253,7 +254,7 @@ public static Builder builder() { */ public static final class Builder { - private JsonDomParser parser; + private JsonNodeParser parser; private Map defaultTimestampFormats; private Builder() { @@ -263,7 +264,7 @@ private Builder() { * @param parser JSON parser to use. * @return This builder for method chaining. */ - public Builder parser(JsonDomParser parser) { + public Builder parser(JsonNodeParser parser) { this.parser = parser; return this; } diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshaller.java index b71e0adbbdac..fc6af9f6b4c8 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshaller.java @@ -17,7 +17,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.SdkField; -import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; /** * Interface for unmarshalling a field from a JSON based service. @@ -34,7 +34,7 @@ public interface JsonUnmarshaller { * @return Unmarshalled value. */ T unmarshall(JsonUnmarshallerContext context, - SdkJsonNode jsonContent, + JsonNode jsonContent, SdkField field); } diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/SdkJsonErrorMessageParser.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/SdkJsonErrorMessageParser.java index 02fac7fb163b..9d10f40bea2a 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/SdkJsonErrorMessageParser.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/SdkJsonErrorMessageParser.java @@ -20,7 +20,7 @@ import java.util.List; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.SdkHttpFullResponse; -import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; @SdkInternalApi public class SdkJsonErrorMessageParser implements ErrorMessageParser { @@ -50,11 +50,11 @@ private SdkJsonErrorMessageParser(List errorMessageJsonLocations) { * @return Error Code of exceptional response or null if it can't be determined */ @Override - public String parseErrorMessage(SdkHttpFullResponse httpResponse, SdkJsonNode jsonNode) { + public String parseErrorMessage(SdkHttpFullResponse httpResponse, JsonNode jsonNode) { for (String field : errorMessageJsonLocations) { - SdkJsonNode value = jsonNode.get(field); + String value = jsonNode.field(field).map(JsonNode::text).orElse(null); if (value != null) { - return value.asText(); + return value; } } return null; diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/document/DocumentUnmarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/document/DocumentUnmarshaller.java index a9f8714cb7f4..be9e0afa4794 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/document/DocumentUnmarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/document/DocumentUnmarshaller.java @@ -16,64 +16,54 @@ package software.amazon.awssdk.protocols.json.internal.unmarshall.document; import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.SdkNumber; import software.amazon.awssdk.core.document.Document; -import software.amazon.awssdk.protocols.json.internal.dom.SdkArrayNode; -import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; -import software.amazon.awssdk.protocols.json.internal.dom.SdkNullNode; -import software.amazon.awssdk.protocols.json.internal.dom.SdkObjectNode; -import software.amazon.awssdk.protocols.json.internal.dom.SdkScalarNode; -import software.amazon.awssdk.protocols.json.internal.visitor.SdkJsonNodeVisitor; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeVisitor; @SdkInternalApi -public class DocumentUnmarshaller implements SdkJsonNodeVisitor { - - private Document visitMap(SdkJsonNode jsonContent) { - return Document.fromMap(jsonContent.fields().entrySet() - .stream().collect(Collectors.toMap(entry -> entry.getKey(), entry -> visit(entry.getValue()), - (left, right) -> left, - LinkedHashMap::new))); +public class DocumentUnmarshaller implements JsonNodeVisitor { + @Override + public Document visitNull() { + return Document.fromNull(); } - private Document visitList(SdkJsonNode jsonContent) { - return Document.fromList( - ((SdkArrayNode) jsonContent).items().stream() - .map(item -> visit(item)).collect(Collectors.toList())); + @Override + public Document visitBoolean(boolean bool) { + return Document.fromBoolean(bool); } - private Document visitScalar(SdkJsonNode jsonContent) { - SdkScalarNode sdkScalarNode = (SdkScalarNode) jsonContent; - - switch (sdkScalarNode.getNodeType()) { - case BOOLEAN: - return Document.fromBoolean(Boolean.valueOf(sdkScalarNode.asText())); - case NUMBER: - return Document.fromNumber(SdkNumber.fromString(jsonContent.asText())); - default: - return Document.fromString(sdkScalarNode.asText()); - } + @Override + public Document visitNumber(String number) { + return Document.fromNumber(number); } @Override - public Document visit(SdkJsonNode sdkJsonNode) { + public Document visitString(String string) { + return Document.fromString(string); + } - if (sdkJsonNode instanceof SdkScalarNode) { - return visitScalar(sdkJsonNode); - } else if (sdkJsonNode instanceof SdkObjectNode) { - return visitMap(sdkJsonNode); - } else if (sdkJsonNode instanceof SdkArrayNode) { - return visitList(sdkJsonNode); - } else if (sdkJsonNode instanceof SdkNullNode) { - return visitNull(); - } else { - throw new IllegalStateException("Visitor not defined for " + sdkJsonNode); - } + @Override + public Document visitArray(List array) { + return Document.fromList(array.stream() + .map(node -> node.visit(this)) + .collect(Collectors.toList())); } - private Document visitNull() { - return Document.fromNull(); + @Override + public Document visitObject(Map object) { + return Document.fromMap(object.entrySet() + .stream().collect(Collectors.toMap(entry -> entry.getKey(), + entry -> entry.getValue().visit(this), + (left, right) -> left, + LinkedHashMap::new))); } + @Override + public Document visitEmbeddedObject(Object embeddedObject) { + throw new UnsupportedOperationException("Embedded objects are not supported within Document types."); + } } diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/visitor/SdkJsonNodeVisitor.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/visitor/SdkJsonNodeVisitor.java deleted file mode 100644 index 77069137fd97..000000000000 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/visitor/SdkJsonNodeVisitor.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.json.internal.visitor; - -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; - -@SdkInternalApi -public interface SdkJsonNodeVisitor { - R visit(SdkJsonNode sdkJsonNode); -} \ No newline at end of file diff --git a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/AwsJsonErrorMessageParserTest.java b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/AwsJsonErrorMessageParserTest.java index 073ba292b3ca..54e2a89c591e 100644 --- a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/AwsJsonErrorMessageParserTest.java +++ b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/AwsJsonErrorMessageParserTest.java @@ -18,17 +18,14 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; -import com.fasterxml.jackson.core.JsonFactory; -import java.io.IOException; -import java.io.UncheckedIOException; import java.util.UUID; import org.junit.Before; import org.junit.Test; import software.amazon.awssdk.http.SdkHttpFullResponse; -import software.amazon.awssdk.protocols.json.internal.dom.JsonDomParser; -import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; import software.amazon.awssdk.protocols.json.internal.unmarshall.AwsJsonErrorMessageParser; import software.amazon.awssdk.protocols.json.internal.unmarshall.ErrorMessageParser; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeParser; import software.amazon.awssdk.utils.StringInputStream; public class AwsJsonErrorMessageParserTest { @@ -41,47 +38,39 @@ public class AwsJsonErrorMessageParserTest { private SdkHttpFullResponse.Builder responseBuilder; - private JsonDomParser jsonParser; + private JsonNodeParser jsonParser; @Before public void setup() { - jsonParser = JsonDomParser.create(new JsonFactory()); + jsonParser = JsonNode.parser(); responseBuilder = ValidSdkObjects.sdkHttpFullResponse(); } @Test public void testErrorMessageAt_message() { - SdkJsonNode jsonNode = parseJson("message", MESSAGE_CONTENT); + JsonNode jsonNode = parseJson("message", MESSAGE_CONTENT); String parsed = parser.parseErrorMessage(responseBuilder.build(), jsonNode); assertEquals(MESSAGE_CONTENT, parsed); } - private SdkJsonNode parseJson(String fieldName, String value) { - try { - return jsonParser.parse(new StringInputStream(String.format("{\"%s\": \"%s\"}", fieldName, value))); - } catch (IOException e) { - throw new UncheckedIOException(e); - } + private JsonNode parseJson(String fieldName, String value) { + return jsonParser.parse(new StringInputStream(String.format("{\"%s\": \"%s\"}", fieldName, value))); } - private SdkJsonNode parseJson(String json) { - try { - return jsonParser.parse(new StringInputStream(json)); - } catch (IOException e) { - throw new UncheckedIOException(e); - } + private JsonNode parseJson(String json) { + return jsonParser.parse(new StringInputStream(json)); } @Test public void testErrorMessageAt_Message() { - SdkJsonNode jsonNode = parseJson("Message", MESSAGE_CONTENT); + JsonNode jsonNode = parseJson("Message", MESSAGE_CONTENT); String parsed = parser.parseErrorMessage(responseBuilder.build(), jsonNode); assertEquals(MESSAGE_CONTENT, parsed); } @Test public void testErrorMessageAt_errorMessage() { - SdkJsonNode jsonNode = parseJson("errorMessage", MESSAGE_CONTENT); + JsonNode jsonNode = parseJson("errorMessage", MESSAGE_CONTENT); String parsed = parser.parseErrorMessage(responseBuilder.build(), jsonNode); assertEquals(MESSAGE_CONTENT, parsed); } @@ -94,14 +83,14 @@ public void testNoErrorMessage_ReturnsNull() { @Test public void testErrorMessageIsNumber_ReturnsStringValue() { - SdkJsonNode jsonNode = parseJson("{\"message\": 1}"); + JsonNode jsonNode = parseJson("{\"message\": 1}"); String parsed = parser.parseErrorMessage(responseBuilder.build(), jsonNode); assertEquals("1", parsed); } @Test public void testErrorMessageIsObject_ReturnsNull() { - SdkJsonNode jsonNode = parseJson("{\"message\": {\"foo\": \"bar\"}}"); + JsonNode jsonNode = parseJson("{\"message\": {\"foo\": \"bar\"}}"); String parsed = parser.parseErrorMessage(responseBuilder.build(), jsonNode); assertNull(parsed); } @@ -137,7 +126,7 @@ public void errorMessageInHeader_ReturnsHeaderValue_CaseInsensitive() { @Test public void errorMessageInHeader_TakesPrecedenceOverMessageInBody() { responseBuilder.putHeader(X_AMZN_ERROR_MESSAGE, MESSAGE_CONTENT); - SdkJsonNode jsonNode = parseJson("message", "other message in body"); + JsonNode jsonNode = parseJson("message", "other message in body"); String parsed = parser.parseErrorMessage(responseBuilder.build(), jsonNode); assertEquals(MESSAGE_CONTENT, parsed); } diff --git a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/JsonErrorCodeParserTest.java b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/JsonErrorCodeParserTest.java index 9c4b94faee02..bc814da1d1ad 100644 --- a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/JsonErrorCodeParserTest.java +++ b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/JsonErrorCodeParserTest.java @@ -18,14 +18,11 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; -import com.fasterxml.jackson.core.JsonFactory; import java.io.IOException; import org.junit.Test; import software.amazon.awssdk.http.SdkHttpFullResponse; -import software.amazon.awssdk.protocols.json.internal.dom.JsonDomParser; -import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; -import software.amazon.awssdk.protocols.json.internal.dom.SdkObjectNode; import software.amazon.awssdk.protocols.json.internal.unmarshall.JsonErrorCodeParser; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; import software.amazon.awssdk.utils.StringInputStream; public class JsonErrorCodeParserTest { @@ -45,7 +42,7 @@ public class JsonErrorCodeParserTest { private final JsonErrorCodeParser parser = new JsonErrorCodeParser(ERROR_FIELD_NAME); private static JsonContent toJsonContent(String errorType) throws IOException { - SdkJsonNode node = JsonDomParser.create(new JsonFactory()).parse(new StringInputStream( + JsonNode node = JsonNode.parser().parse(new StringInputStream( String.format("{\"%s\": \"%s\"}", ERROR_FIELD_NAME, errorType))); return new JsonContent(null, node); } @@ -103,6 +100,6 @@ public void parseErrorType_NotPresentInHeadersAndNullContent_ReturnsNull() { @Test public void parseErrorType_NotPresentInHeadersAndEmptyContent_ReturnsNull() { assertNull(parser.parseErrorCode(httpResponseWithoutHeaders(), - new JsonContent(null, SdkObjectNode.emptyObject()))); + new JsonContent(null, JsonNode.emptyObjectNode()))); } } diff --git a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/SdkJsonGeneratorTest.java b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/SdkJsonGeneratorTest.java index 7a81210c06fe..59058d76f251 100644 --- a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/SdkJsonGeneratorTest.java +++ b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/SdkJsonGeneratorTest.java @@ -19,21 +19,18 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.Charset; import java.time.Instant; import org.junit.Before; import org.junit.Test; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.thirdparty.jackson.core.JsonFactory; import software.amazon.awssdk.utils.BinaryUtils; public class SdkJsonGeneratorTest { - - private static final ObjectMapper MAPPER = new ObjectMapper(); - /** * Delta for comparing double values */ @@ -43,7 +40,7 @@ public class SdkJsonGeneratorTest { @Before public void setup() { - jsonGenerator = new SdkJsonGenerator(new JsonFactory(), "application/json"); + jsonGenerator = new SdkJsonGenerator(JsonFactory.builder().build(), "application/json"); } @Test @@ -56,10 +53,10 @@ public void simpleObject_AllPrimitiveTypes() throws IOException { jsonGenerator.writeEndObject(); JsonNode node = toJsonNode(); assertTrue(node.isObject()); - assertEquals("stringVal", node.get("stringProp").textValue()); - assertEquals(42, node.get("integralProp").longValue()); - assertEquals(true, node.get("booleanProp").booleanValue()); - assertEquals(123.456, node.get("doubleProp").doubleValue(), DELTA); + assertEquals("stringVal", node.asObject().get("stringProp").text()); + assertEquals("42", node.asObject().get("integralProp").asNumber()); + assertEquals(true, node.asObject().get("booleanProp").asBoolean()); + assertEquals(123.456, Double.parseDouble(node.asObject().get("doubleProp").asNumber()), DELTA); } @Test @@ -68,7 +65,7 @@ public void simpleObject_WithLongProperty_PreservesLongValue() throws IOExceptio jsonGenerator.writeFieldName("longProp").writeValue(Long.MAX_VALUE); jsonGenerator.writeEndObject(); JsonNode node = toJsonNode(); - assertEquals(Long.MAX_VALUE, node.get("longProp").longValue()); + assertEquals(Long.toString(Long.MAX_VALUE), node.asObject().get("longProp").asNumber()); } @Test @@ -78,7 +75,7 @@ public void simpleObject_WithBinaryData_WritesAsBase64() throws IOException { jsonGenerator.writeFieldName("binaryProp").writeValue(ByteBuffer.wrap(data)); jsonGenerator.writeEndObject(); JsonNode node = toJsonNode(); - assertEquals(BinaryUtils.toBase64(data), node.get("binaryProp").textValue()); + assertEquals(BinaryUtils.toBase64(data), node.asObject().get("binaryProp").text()); } @Test @@ -88,7 +85,7 @@ public void simpleObject_WithServiceDate() throws IOException { jsonGenerator.writeFieldName("dateProp").writeValue(instant); jsonGenerator.writeEndObject(); JsonNode node = toJsonNode(); - assertEquals(123.456, node.get("dateProp").doubleValue(), DELTA); + assertEquals(123.456, Double.parseDouble(node.asObject().get("dateProp").asNumber()), DELTA); } @Test @@ -100,9 +97,9 @@ public void stringArray() throws IOException { jsonGenerator.writeEndArray(); JsonNode node = toJsonNode(); assertTrue(node.isArray()); - assertEquals("valOne", node.get(0).textValue()); - assertEquals("valTwo", node.get(1).textValue()); - assertEquals("valThree", node.get(2).textValue()); + assertEquals("valOne", node.asArray().get(0).text()); + assertEquals("valTwo", node.asArray().get(1).text()); + assertEquals("valThree", node.asArray().get(2).text()); } @Test @@ -113,7 +110,7 @@ public void complexArray() throws IOException { jsonGenerator.writeEndObject(); jsonGenerator.writeEndArray(); JsonNode node = toJsonNode(); - assertEquals("nestedVal", node.get(0).get("nestedProp").textValue()); + assertEquals("nestedVal", node.asArray().get(0).asObject().get("nestedProp").text()); } @Test @@ -132,7 +129,7 @@ public void unclosedArray_AutoClosesOnClose() throws IOException { jsonGenerator.writeValue("valThree"); JsonNode node = toJsonNode(); assertTrue(node.isArray()); - assertEquals(3, node.size()); + assertEquals(3, node.asArray().size()); } // See https://forums.aws.amazon.com/thread.jspa?threadID=158756 @@ -175,7 +172,7 @@ public void testNumericNoQuote() { } private JsonNode toJsonNode() throws IOException { - return MAPPER.readTree(jsonGenerator.getBytes()); + return JsonNode.parser().parse(new ByteArrayInputStream(jsonGenerator.getBytes())); } } diff --git a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/internal/dom/DocumentUnmarshallerTest.java b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/internal/dom/DocumentUnmarshallerTest.java index 5d597529716b..cbd57689cf90 100644 --- a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/internal/dom/DocumentUnmarshallerTest.java +++ b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/internal/dom/DocumentUnmarshallerTest.java @@ -15,63 +15,50 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.internal.EmbeddedObjectJsonNode; import static org.assertj.core.api.Assertions.assertThatExceptionOfType; import static org.hamcrest.MatcherAssert.assertThat; public class DocumentUnmarshallerTest { - @Test public void testDocumentFromNumberNode() throws ParseException { - - SdkJsonNode sdkJsonNode = SdkScalarNode.createNumber(100); - assertThat( Document.fromNumber(SdkNumber.fromString("100"))) - .isEqualTo(new DocumentUnmarshaller().visit(sdkJsonNode)); - - SdkJsonNode sdkJsonNodeInt = SdkScalarNode.createNumber(100); - assertThat( Document.fromNumber(SdkNumber.fromInteger(100)).asNumber().intValue()) - .isEqualTo(new DocumentUnmarshaller().visit(sdkJsonNodeInt).asNumber().intValue()); - + JsonNode node = JsonNode.parser().parse("100"); + assertThat(Document.fromNumber(SdkNumber.fromInteger(100)).asNumber().intValue()) + .isEqualTo(node.visit(new DocumentUnmarshaller()).asNumber().intValue()); } - @Test public void testDocumentFromBoolean() { - - SdkJsonNode sdkScalarNode = SdkScalarNode.createBoolean(true); - - assertThat( Document.fromBoolean(true)) - .isEqualTo(new DocumentUnmarshaller().visit(sdkScalarNode)); - + JsonNode node = JsonNode.parser().parse("true"); + assertThat(Document.fromBoolean(true)).isEqualTo(node.visit(new DocumentUnmarshaller())); } @Test public void testDocumentFromString() { - SdkJsonNode sdkScalarNode = SdkScalarNode.create("100.00"); - assertThat( Document.fromString("100.00")) - .isEqualTo(new DocumentUnmarshaller().visit(sdkScalarNode)); + JsonNode node = JsonNode.parser().parse("\"100.00\""); + assertThat(Document.fromString("100.00")).isEqualTo(node.visit(new DocumentUnmarshaller())); } @Test public void testDocumentFromNull() { - assertThat( Document.fromNull()) - .isEqualTo(new DocumentUnmarshaller().visit(SdkNullNode.instance())); + JsonNode node = JsonNode.parser().parse("null"); + assertThat(Document.fromNull()).isEqualTo(node.visit(new DocumentUnmarshaller())); } - @Test public void testExceptionIsThrownFromEmbededObjectType() { - assertThatExceptionOfType(IllegalStateException.class) - .isThrownBy(() -> new DocumentUnmarshaller().visit(SdkEmbeddedObject.create(new HashMap<>())) ); + assertThatExceptionOfType(UnsupportedOperationException.class) + .isThrownBy(() -> new EmbeddedObjectJsonNode(new Object()).visit(new DocumentUnmarshaller())); } - @Test public void testDocumentFromObjectNode(){ - final SdkJsonNode sdkObjectNode = SdkObjectNode.builder().putField("firstKey", SdkScalarNode.create("firstValue")) - .putField("secondKey", SdkScalarNode.create("secondValue")).build(); - final Document documentMap = new DocumentUnmarshaller().visit(sdkObjectNode); + JsonNode node = JsonNode.parser().parse("{\"firstKey\": \"firstValue\", \"secondKey\": \"secondValue\"}"); + + Document documentMap = node.visit(new DocumentUnmarshaller()); Map expectedMap = new LinkedHashMap<>(); expectedMap.put("firstKey", Document.fromString("firstValue")); expectedMap.put("secondKey", Document.fromString("secondValue")); @@ -79,20 +66,16 @@ public void testDocumentFromObjectNode(){ assertThat(documentMap).isEqualTo(expectedDocumentMap); } - @Test public void testDocumentFromArrayNode(){ - final SdkArrayNode sdkArrayNode = SdkArrayNode.builder().addItem(SdkScalarNode.create("One")).addItem(SdkScalarNode.createNumber(10)) - .addItem(SdkScalarNode.createBoolean(true)).addItem(SdkNullNode.instance()).build(); + JsonNode node = JsonNode.parser().parse("[\"One\", 10, true, null]"); List documentList = new ArrayList<>(); documentList.add(Document.fromString("One")); documentList.add(Document.fromNumber(SdkNumber.fromBigDecimal(BigDecimal.TEN))); documentList.add(Document.fromBoolean(true)); documentList.add(Document.fromNull()); final Document document = Document.fromList(documentList); - final Document actualDocument = new DocumentUnmarshaller().visit(sdkArrayNode); + final Document actualDocument = node.visit(new DocumentUnmarshaller()); assertThat(actualDocument).isEqualTo(document); - } - } diff --git a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParserTest.java b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParserTest.java deleted file mode 100644 index ab8ae765a369..000000000000 --- a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParserTest.java +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocols.json.internal.dom; - - -import static org.assertj.core.api.Assertions.assertThat; - -import com.fasterxml.jackson.core.JsonFactory; -import java.io.IOException; -import java.util.Arrays; -import org.junit.Before; -import org.junit.Test; -import software.amazon.awssdk.utils.StringInputStream; - -public class JsonDomParserTest { - - private JsonDomParser parser; - - @Before - public void setup() { - parser = JsonDomParser.create(new JsonFactory()); - } - - @Test - public void simpleString_ParsedCorrectly() throws IOException { - SdkJsonNode node = parse("\"foo\""); - assertThat(node) - .isInstanceOf(SdkScalarNode.class) - .matches(s -> ((SdkScalarNode) s).value().equals("foo")); - } - - @Test - public void simpleNumber_ParsedCorrectly() throws IOException { - SdkJsonNode node = parse("42"); - assertThat(node) - .isInstanceOf(SdkScalarNode.class) - .matches(s -> ((SdkScalarNode) s).value().equals("42")); - } - - @Test - public void decimalNumber_ParsedCorrectly() throws IOException { - SdkJsonNode node = parse("1234.56"); - assertThat(node) - .isInstanceOf(SdkScalarNode.class) - .matches(s -> ((SdkScalarNode) s).value().equals("1234.56")); - } - - @Test - public void falseBoolean_ParsedCorrectly() throws IOException { - SdkJsonNode node = parse("false"); - assertThat(node) - .isInstanceOf(SdkScalarNode.class) - .matches(s -> ((SdkScalarNode) s).value().equals("false")); - } - - @Test - public void trueBoolean_ParsedCorrectly() throws IOException { - SdkJsonNode node = parse("true"); - assertThat(node) - .isInstanceOf(SdkScalarNode.class) - .matches(s -> ((SdkScalarNode) s).value().equals("true")); - } - - @Test - public void jsonNull_ParsedCorrectly() throws IOException { - SdkJsonNode node = parse("null"); - assertThat(node).isInstanceOf(SdkNullNode.class); - } - - @Test - public void emptyObject_ParsedCorrecty() throws IOException { - SdkJsonNode node = parse("{}"); - SdkObjectNode expected = SdkObjectNode.builder().build(); - assertThat(node).isInstanceOf(SdkObjectNode.class) - .isEqualTo(expected); - } - - @Test - public void simpleObjectOfScalars_ParsedCorrectly() throws IOException { - SdkJsonNode node = parse("{" - + " \"stringMember\": \"foo\"," - + " \"integerMember\": 42," - + " \"floatMember\": 1234.56," - + " \"booleanMember\": true," - + " \"nullMember\": null" - + "}"); - - SdkObjectNode expected = SdkObjectNode.builder() - .putField("stringMember", scalar("foo")) - .putField("integerMember", scalar("42")) - .putField("floatMember", scalar("1234.56")) - .putField("booleanMember", scalar("true")) - .putField("nullMember", nullNode()) - .build(); - assertThat(node).isInstanceOf(SdkObjectNode.class) - .isEqualTo(expected); - } - - @Test - public void nestedObject_ParsedCorrectly() throws IOException { - SdkJsonNode node = parse("{" - + " \"structMember\": {" - + " \"floatMember\": 1234.56," - + " \"booleanMember\": true," - + " \"nullMember\": null" - + " }," - + " \"integerMember\": 42" - + "}"); - - SdkObjectNode expected = SdkObjectNode.builder() - .putField("structMember", - SdkObjectNode.builder() - .putField("floatMember", scalar("1234.56")) - .putField("booleanMember", scalar("true")) - .putField("nullMember", nullNode()) - .build()) - .putField("integerMember", scalar("42")) - .build(); - assertThat(node).isInstanceOf(SdkObjectNode.class) - .isEqualTo(expected); - } - - @Test - public void emptyArray_ParsedCorrectly() throws IOException { - SdkJsonNode node = parse("[]"); - SdkArrayNode expected = SdkArrayNode.builder().build(); - assertThat(node).isInstanceOf(SdkArrayNode.class) - .isEqualTo(expected); - } - - @Test - public void arrayOfScalars_ParsedCorrectly() throws IOException { - SdkJsonNode node = parse("[\"foo\", 42, null, false, 1234.56]"); - SdkArrayNode expected = SdkArrayNode.builder() - .addItem(scalar("foo")) - .addItem(scalar("42")) - .addItem(nullNode()) - .addItem(scalar("false")) - .addItem(scalar("1234.56")) - .build(); - assertThat(node).isInstanceOf(SdkArrayNode.class) - .isEqualTo(expected); - } - - @Test - public void nestedArray_ParsedCorrectly() throws IOException { - SdkJsonNode node = parse("[[\"valOne\", \"valTwo\"], [\"valThree\", \"valFour\"]]"); - SdkArrayNode expected = SdkArrayNode.builder() - .addItem(array(scalar("valOne"), scalar("valTwo"))) - .addItem(array(scalar("valThree"), scalar("valFour"))) - .build(); - assertThat(node).isInstanceOf(SdkArrayNode.class) - .isEqualTo(expected); - } - - @Test - public void complexObject_ParsedCorrectly() throws IOException { - SdkJsonNode node = parse("{" - + " \"stringMember\":\"foo\"," - + " \"deeplyNestedArray\":[" - + " [\"valOne\", 42, null]," - + " \"valTwo\"," - + " [" - + " []," - + " [\"valThree\"]" - + " ]" - + " ]," - + " \"deeplyNestedObject\":{\n" - + " \"deeplyNestedArray\":[" - + " [\"valOne\", 42, null]," - + " \"valTwo\"," - + " [" - + " []," - + " [\"valThree\"]" - + " ]" - + " ]," - + " \"nestedObject\":{" - + " \"stringMember\":\"foo\"," - + " \"integerMember\":42," - + " \"floatMember\":1234.56," - + " \"booleanMember\":true," - + " \"furtherNestedObject\":{" - + " \"stringMember\":\"foo\"," - + " \"arrayMember\":[" - + " \"valOne\"," - + " \"valTwo\"" - + " ],\n" - + " \"nullMember\":null" - + " }" - + " }" - + " }" - + "}"); - SdkArrayNode deeplyNestedArray = array( - array(scalar("valOne"), scalar("42"), nullNode()), - scalar("valTwo"), - array(array(), array(scalar("valThree"))) - ); - - SdkObjectNode furtherNestedObject = - SdkObjectNode.builder() - .putField("stringMember", scalar("foo")) - .putField("arrayMember", array(scalar("valOne"), scalar("valTwo"))) - .putField("nullMember", nullNode()) - .build(); - - SdkObjectNode deeplyNestedObject = - SdkObjectNode.builder() - .putField("deeplyNestedArray", deeplyNestedArray) - .putField("nestedObject", - SdkObjectNode.builder() - .putField("stringMember", scalar("foo")) - .putField("integerMember", scalar("42")) - .putField("floatMember", scalar("1234.56")) - .putField("booleanMember", scalar("true")) - .putField("furtherNestedObject", furtherNestedObject) - .build()) - .build(); - - SdkObjectNode expected = SdkObjectNode.builder() - .putField("stringMember", scalar("foo")) - .putField("deeplyNestedArray", deeplyNestedArray) - .putField("deeplyNestedObject", - deeplyNestedObject) - .build(); - assertThat(node).isInstanceOf(SdkObjectNode.class) - .isEqualTo(expected); - } - - private SdkNullNode nullNode() { - return SdkNullNode.instance(); - } - - private SdkScalarNode scalar(String value) { - return SdkScalarNode.create(value); - } - - private SdkArrayNode array(SdkJsonNode... nodes) { - SdkArrayNode.Builder builder = SdkArrayNode.builder(); - Arrays.stream(nodes).forEach(builder::addItem); - return builder.build(); - } - - private SdkJsonNode parse(String json) throws IOException { - return parser.parse(new StringInputStream(json)); - } - - -} diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index 8d2928f12ae0..76bea1fc812b 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index f87518ac87cc..a726698f90fa 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/HeaderMarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/HeaderMarshaller.java index 8d377395d488..102d4239d3c8 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/HeaderMarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/HeaderMarshaller.java @@ -15,11 +15,15 @@ package software.amazon.awssdk.protocols.xml.internal.marshall; +import static software.amazon.awssdk.utils.CollectionUtils.isNullOrEmpty; + import java.time.Instant; +import java.util.List; import java.util.Map; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.protocol.MarshallLocation; +import software.amazon.awssdk.core.traits.ListTrait; import software.amazon.awssdk.protocols.core.ValueToStringConverter; @SdkInternalApi @@ -62,10 +66,30 @@ public void marshall(Map map, XmlMarshallerContext context, String pa @Override protected boolean shouldEmit(Map map) { - return map != null && !map.isEmpty(); + return !isNullOrEmpty(map); } }; + public static final XmlMarshaller> LIST = new SimpleHeaderMarshaller>(null) { + @Override + public void marshall(List list, XmlMarshallerContext context, String paramName, SdkField> sdkField) { + if (!shouldEmit(list)) { + return; + } + SdkField memberFieldInfo = sdkField.getRequiredTrait(ListTrait.class).memberFieldInfo(); + for (Object listValue : list) { + XmlMarshaller marshaller = context.marshallerRegistry().getMarshaller(MarshallLocation.HEADER, listValue); + marshaller.marshall(listValue, context, paramName, memberFieldInfo); + } + } + + @Override + protected boolean shouldEmit(List list) { + // Null or empty lists cannot be meaningfully (or safely) represented in an HTTP header message since header-fields + // must typically have a non-empty field-value. https://datatracker.ietf.org/doc/html/rfc7230#section-3.2 + return !isNullOrEmpty(list); + } + }; private HeaderMarshaller() { } @@ -83,7 +107,7 @@ public void marshall(T val, XmlMarshallerContext context, String paramName, SdkF return; } - context.request().putHeader(paramName, converter.convert(val, sdkField)); + context.request().appendHeader(paramName, converter.convert(val, sdkField)); } protected boolean shouldEmit(T val) { diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/QueryParamMarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/QueryParamMarshaller.java index 43bde6641d14..5c354009fe9e 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/QueryParamMarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/QueryParamMarshaller.java @@ -60,8 +60,7 @@ public final class QueryParamMarshaller { return; } - MapTrait mapTrait = sdkField.getOptionalTrait(MapTrait.class) - .orElseThrow(() -> new IllegalStateException("SdkField of list type is missing List trait")); + MapTrait mapTrait = sdkField.getRequiredTrait(MapTrait.class); SdkField valueField = mapTrait.valueFieldInfo(); for (Map.Entry entry : map.entrySet()) { diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlPayloadMarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlPayloadMarshaller.java index ebf247c23812..a81d8b02d6ea 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlPayloadMarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlPayloadMarshaller.java @@ -81,9 +81,7 @@ public void marshall(List val, XmlMarshallerContext context, String paramName @Override public void marshall(List list, XmlMarshallerContext context, String paramName, SdkField> sdkField, ValueToStringConverter.ValueToString> converter) { - ListTrait listTrait = sdkField - .getOptionalTrait(ListTrait.class) - .orElseThrow(() -> new IllegalStateException(paramName + " member is missing ListTrait")); + ListTrait listTrait = sdkField.getRequiredTrait(ListTrait.class); if (!listTrait.isFlattened()) { context.xmlGenerator().startElement(paramName); @@ -125,8 +123,7 @@ protected boolean shouldEmit(List list, String paramName) { public void marshall(Map map, XmlMarshallerContext context, String paramName, SdkField> sdkField, ValueToStringConverter.ValueToString> converter) { - MapTrait mapTrait = sdkField.getOptionalTrait(MapTrait.class) - .orElseThrow(() -> new IllegalStateException(paramName + " member is missing MapTrait")); + MapTrait mapTrait = sdkField.getRequiredTrait(MapTrait.class); for (Map.Entry entry : map.entrySet()) { context.xmlGenerator().startElement("entry"); diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlProtocolMarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlProtocolMarshaller.java index 3173c343cd99..09352806eac1 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlProtocolMarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlProtocolMarshaller.java @@ -176,6 +176,7 @@ private static XmlMarshallerRegistry createMarshallerRegistry() { .headerMarshaller(MarshallingType.BOOLEAN, HeaderMarshaller.BOOLEAN) .headerMarshaller(MarshallingType.INSTANT, HeaderMarshaller.INSTANT) .headerMarshaller(MarshallingType.MAP, HeaderMarshaller.MAP) + .headerMarshaller(MarshallingType.LIST, HeaderMarshaller.LIST) .headerMarshaller(MarshallingType.NULL, XmlMarshaller.NULL) .queryParamMarshaller(MarshallingType.STRING, QueryParamMarshaller.STRING) diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/HeaderUnmarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/HeaderUnmarshaller.java index 08a2da0981aa..13f135057ae1 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/HeaderUnmarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/HeaderUnmarshaller.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.protocols.xml.internal.unmarshall; +import static java.util.stream.Collectors.toList; import static software.amazon.awssdk.utils.StringUtils.replacePrefixIgnoreCase; import static software.amazon.awssdk.utils.StringUtils.startsWithIgnoreCase; @@ -26,6 +27,7 @@ import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.protocols.core.StringToValueConverter; import software.amazon.awssdk.protocols.query.unmarshall.XmlElement; +import software.amazon.awssdk.utils.http.SdkHttpUtils; @SdkInternalApi public final class HeaderUnmarshaller { @@ -39,6 +41,7 @@ public final class HeaderUnmarshaller { public static final XmlUnmarshaller INSTANT = new SimpleHeaderUnmarshaller<>(XmlProtocolUnmarshaller.INSTANT_STRING_TO_VALUE); + // Only supports string value type public static final XmlUnmarshaller> MAP = ((context, content, field) -> { Map result = new HashMap<>(); context.response().headers().entrySet().stream() @@ -48,6 +51,11 @@ public final class HeaderUnmarshaller { return result; }); + // Only supports string value type + public static final XmlUnmarshaller> LIST = (context, content, field) -> { + return SdkHttpUtils.allMatchingHeaders(context.response().headers(), field.locationName()).collect(toList()); + }; + private HeaderUnmarshaller() { } diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlProtocolUnmarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlProtocolUnmarshaller.java index 813a9f4467c9..b29521b58853 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlProtocolUnmarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlProtocolUnmarshaller.java @@ -150,6 +150,7 @@ private static XmlUnmarshallerRegistry createUnmarshallerRegistry() { .headerUnmarshaller(MarshallingType.INSTANT, HeaderUnmarshaller.INSTANT) .headerUnmarshaller(MarshallingType.FLOAT, HeaderUnmarshaller.FLOAT) .headerUnmarshaller(MarshallingType.MAP, HeaderUnmarshaller.MAP) + .headerUnmarshaller(MarshallingType.LIST, HeaderUnmarshaller.LIST) .payloadUnmarshaller(MarshallingType.STRING, XmlPayloadUnmarshaller.STRING) .payloadUnmarshaller(MarshallingType.INTEGER, XmlPayloadUnmarshaller.INTEGER) diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index 508ab39c38b9..767695dac00a 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 @@ -34,7 +34,6 @@ aws-query-protocol aws-json-protocol aws-cbor-protocol - aws-ion-protocol aws-xml-protocol protocol-core diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index eddeea6f9f08..1f66a412d496 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 diff --git a/core/regions/pom.xml b/core/regions/pom.xml index c55096877b2c..d9aa7d6b00fe 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT regions @@ -49,12 +49,9 @@ ${awsjavasdk.version} - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations + software.amazon.awssdk + json-utils + ${awsjavasdk.version} org.slf4j diff --git a/core/regions/src/it/java/software/amazon/awssdk/regions/util/EC2MetadataUtilsIntegrationTest.java b/core/regions/src/it/java/software/amazon/awssdk/regions/util/EC2MetadataUtilsIntegrationTest.java index 07d94e80bf58..b19a2a3703f0 100644 --- a/core/regions/src/it/java/software/amazon/awssdk/regions/util/EC2MetadataUtilsIntegrationTest.java +++ b/core/regions/src/it/java/software/amazon/awssdk/regions/util/EC2MetadataUtilsIntegrationTest.java @@ -49,39 +49,6 @@ public static void cleanUp() throws IOException { System.clearProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.property()); } - @Test - public void testIamInfo() { - EC2MetadataUtils.IamInfo info = EC2MetadataUtils - .getIamInstanceProfileInfo(); - - Assert.assertEquals("Success", info.code); - Assert.assertNull(info.message); - Assert.assertEquals("2014-04-07T08:18:41Z", info.lastUpdated); - Assert.assertEquals("foobar", info.instanceProfileArn); - Assert.assertEquals("moobily", info.instanceProfileId); - } - - @Test - public void testIamCredentials() { - Map map = EC2MetadataUtils - .getIamSecurityCredentials(); - - Assert.assertEquals(2, map.size()); - - for (Map.Entry entry : map - .entrySet()) { - - Assert.assertNotNull(entry.getKey()); - Assert.assertNotNull(entry.getValue().code); - Assert.assertNotNull(entry.getValue().lastUpdated); - Assert.assertEquals("AWS-HMAC", entry.getValue().type); - Assert.assertEquals("foobar", entry.getValue().accessKeyId); - Assert.assertEquals("moobily", entry.getValue().secretAccessKey); - Assert.assertEquals("beebop", entry.getValue().token); - Assert.assertNotNull(entry.getValue().expiration); - } - } - @Test(expected = SdkClientException.class) public void ec2MetadataDisabled_shouldThrowException() { try { @@ -92,6 +59,12 @@ public void ec2MetadataDisabled_shouldThrowException() { } } + @Test + public void testInstanceSignature() { + String signature = EC2MetadataUtils.getInstanceSignature(); + Assert.assertEquals("foobar", signature); + } + @Test public void testInstanceInfo() { EC2MetadataUtils.InstanceInfo info = EC2MetadataUtils.getInstanceInfo(); @@ -111,10 +84,4 @@ public void testInstanceInfo() { Assert.assertEquals("bar", info.getDevpayProductCodes()[0]); Assert.assertEquals("qaz", info.getMarketplaceProductCodes()[0]); } - - @Test - public void testInstanceSignature() { - String signature = EC2MetadataUtils.getInstanceSignature(); - Assert.assertEquals("foobar", signature); - } } diff --git a/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/EC2MetadataUtils.java b/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/EC2MetadataUtils.java index dd391338844b..13a2dbb245eb 100644 --- a/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/EC2MetadataUtils.java +++ b/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/EC2MetadataUtils.java @@ -15,16 +15,9 @@ package software.amazon.awssdk.regions.internal.util; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.databind.DeserializationFeature; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.PropertyNamingStrategy; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; -import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -40,7 +33,8 @@ import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.exception.SdkServiceException; import software.amazon.awssdk.core.util.SdkUserAgent; -import software.amazon.awssdk.core.util.json.JacksonUtils; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeParser; import software.amazon.awssdk.regions.util.HttpResourcesUtils; import software.amazon.awssdk.regions.util.ResourcesEndpointProvider; @@ -67,6 +61,7 @@ //TODO: cleanup @SdkInternalApi public final class EC2MetadataUtils { + private static final JsonNodeParser JSON_PARSER = JsonNode.parser(); /** Default resource path for credentials in the Amazon EC2 Instance Metadata Service. */ private static final String REGION = "region"; @@ -80,19 +75,16 @@ public final class EC2MetadataUtils { private static final int DEFAULT_QUERY_RETRIES = 3; private static final int MINIMUM_RETRY_WAIT_TIME_MILLISECONDS = 250; - private static final ObjectMapper MAPPER = new ObjectMapper(); private static final Logger log = LoggerFactory.getLogger(EC2MetadataUtils.class); - private static Map cache = new ConcurrentHashMap<>(); + private static final Map CACHE = new ConcurrentHashMap<>(); private static final InstanceProviderTokenEndpointProvider TOKEN_ENDPOINT_PROVIDER = new InstanceProviderTokenEndpointProvider(); - private EC2MetadataUtils() { - } + private static final Ec2MetadataConfigProvider EC2_METADATA_CONFIG_PROVIDER = Ec2MetadataConfigProvider.builder() + .build(); - static { - MAPPER.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); - MAPPER.setPropertyNamingStrategy(PropertyNamingStrategy.PASCAL_CASE_TO_CAMEL_CASE); + private EC2MetadataUtils() { } /** @@ -216,57 +208,6 @@ public static List getSecurityGroups() { return getItems(EC2_METADATA_ROOT + "/security-groups"); } - /** - * Get information about the last time the instance profile was updated, - * including the instance's LastUpdated date, InstanceProfileArn, and - * InstanceProfileId. - */ - public static IamInfo getIamInstanceProfileInfo() { - String json = getData(EC2_METADATA_ROOT + "/iam/info"); - if (null == json) { - return null; - } - - try { - - return MAPPER.readValue(json, IamInfo.class); - - } catch (Exception e) { - log.warn("Unable to parse IAM Instance profile info (" + json - + "): " + e.getMessage(), e); - return null; - } - } - - /** - * The instance info is only guaranteed to be a JSON document per - * http://docs - * .aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html - *

- * This method is only a best attempt to capture the instance info as a - * typed object. - *

- * Get an InstanceInfo object with dynamic information about this instance. - */ - public static InstanceInfo getInstanceInfo() { - return doGetInstanceInfo(getData( - EC2_DYNAMICDATA_ROOT + INSTANCE_IDENTITY_DOCUMENT)); - } - - static InstanceInfo doGetInstanceInfo(String json) { - if (null != json) { - try { - InstanceInfo instanceInfo = JacksonUtils.fromJsonString(json, - InstanceInfo.class); - return instanceInfo; - } catch (Exception e) { - log.warn("Unable to parse dynamic EC2 instance info (" + json - + ") : " + e.getMessage(), e); - } - } - return null; - } - /** * Get the signature of the instance. */ @@ -291,45 +232,17 @@ public static String getEC2InstanceRegion() { static String doGetEC2InstanceRegion(final String json) { if (null != json) { try { - JsonNode node = MAPPER.readTree(json.getBytes(StandardCharsets.UTF_8)); - JsonNode region = node.findValue(REGION); - return region.asText(); + return JSON_PARSER.parse(json) + .field(REGION) + .map(JsonNode::text) + .orElseThrow(() -> new IllegalStateException("Region not included in metadata.")); } catch (Exception e) { - log.warn("Unable to parse EC2 instance info (" + json - + ") : " + e.getMessage(), e); + log.warn("Unable to parse EC2 instance info (" + json + ") : " + e.getMessage(), e); } } return null; } - /** - * Returns the temporary security credentials (AccessKeyId, SecretAccessKey, - * SessionToken, and Expiration) associated with the IAM roles on the - * instance. - */ - public static Map getIamSecurityCredentials() { - Map credentialsInfoMap = new HashMap<>(); - - List credentials = getItems(EC2_METADATA_ROOT - + "/iam/security-credentials"); - - if (null != credentials) { - for (String credential : credentials) { - String json = getData(EC2_METADATA_ROOT - + "/iam/security-credentials/" + credential); - try { - IamSecurityCredential credentialInfo = MAPPER - .readValue(json, IamSecurityCredential.class); - credentialsInfoMap.put(credential, credentialInfo); - } catch (Exception e) { - log.warn("Unable to process the credential (" + credential - + "). " + e.getMessage(), e); - } - } - } - return credentialsInfoMap; - } - /** * Get the virtual devices associated with the ami, root, ebs, and swap. */ @@ -370,6 +283,63 @@ public static String getUserData() { return getData(EC2_USERDATA_ROOT); } + /** + * Retrieve some of the data from http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html as a typed + * object. This entire class will be removed as part of https://github.com/aws/aws-sdk-java-v2/issues/61, so don't rely on + * this sticking around. + * + * This should not be removed until https://github.com/aws/aws-sdk-java-v2/issues/61 is implemented. + */ + public static InstanceInfo getInstanceInfo() { + return doGetInstanceInfo(getData(EC2_DYNAMICDATA_ROOT + INSTANCE_IDENTITY_DOCUMENT)); + } + + static InstanceInfo doGetInstanceInfo(String json) { + if (json != null) { + try { + Map jsonNode = JSON_PARSER.parse(json).asObject(); + return new InstanceInfo(stringValue(jsonNode.get("pendingTime")), + stringValue(jsonNode.get("instanceType")), + stringValue(jsonNode.get("imageId")), + stringValue(jsonNode.get("instanceId")), + stringArrayValue(jsonNode.get("billingProducts")), + stringValue(jsonNode.get("architecture")), + stringValue(jsonNode.get("accountId")), + stringValue(jsonNode.get("kernelId")), + stringValue(jsonNode.get("ramdiskId")), + stringValue(jsonNode.get("region")), + stringValue(jsonNode.get("version")), + stringValue(jsonNode.get("availabilityZone")), + stringValue(jsonNode.get("privateIp")), + stringArrayValue(jsonNode.get("devpayProductCodes")), + stringArrayValue(jsonNode.get("marketplaceProductCodes"))); + } catch (Exception e) { + log.warn("Unable to parse dynamic EC2 instance info (" + json + ") : " + e.getMessage(), e); + } + } + return null; + } + + private static String stringValue(JsonNode jsonNode) { + if (jsonNode == null || !jsonNode.isString()) { + return null; + } + + return jsonNode.asString(); + } + + private static String[] stringArrayValue(JsonNode jsonNode) { + if (jsonNode == null || !jsonNode.isArray()) { + return null; + } + + return jsonNode.asArray() + .stream() + .filter(JsonNode::isString) + .map(JsonNode::asString) + .toArray(String[]::new); + } + public static String getData(String path) { return getData(path, DEFAULT_QUERY_RETRIES); } @@ -392,7 +362,7 @@ public static List getItems(String path, int tries) { @SdkTestInternalApi static void clearCache() { - cache.clear(); + CACHE.clear(); } private static List getItems(String path, int tries, boolean slurp) { @@ -409,7 +379,7 @@ private static List getItems(String path, int tries, boolean slurp) { String token = getToken(); try { - String hostAddress = SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.getStringValueOrThrow(); + String hostAddress = EC2_METADATA_CONFIG_PROVIDER.getEndpoint(); String response = doReadResource(new URI(hostAddress + path), token); if (slurp) { items = Collections.singletonList(response); @@ -468,10 +438,10 @@ private static String fetchData(String path, boolean force) { } try { - if (force || !cache.containsKey(path)) { - cache.put(path, getData(path)); + if (force || !CACHE.containsKey(path)) { + CACHE.put(path, getData(path)); } - return cache.get(path); + return CACHE.get(path); } catch (SdkClientException e) { throw e; } catch (RuntimeException e) { @@ -479,165 +449,6 @@ private static String fetchData(String path, boolean force) { } } - /** - * Information about the last time the instance profile was updated, - * including the instance's LastUpdated date, InstanceProfileArn, and - * InstanceProfileId. - */ - public static class IamInfo { - public String code; - public String message; - public String lastUpdated; - public String instanceProfileArn; - public String instanceProfileId; - } - - /** - * The temporary security credentials (AccessKeyId, SecretAccessKey, - * SessionToken, and Expiration) associated with the IAM role. - */ - public static class IamSecurityCredential { - public String code; - public String message; - public String lastUpdated; - public String type; - public String accessKeyId; - public String secretAccessKey; - public String token; - public String expiration; - - /** - * @deprecated because it is spelled incorrectly - * @see #accessKeyId - */ - @Deprecated - public String secretAcessKey; - } - - /** - * This POJO is a best attempt to capture the instance info which is only - * guaranteed to be a JSON document per - * http://docs.aws.amazon.com/AWSEC2/latest - * /UserGuide/ec2-instance-metadata.html - * - * Instance info includes dynamic information about the current instance - * such as region, instanceId, private IP address, etc. - */ - public static class InstanceInfo { - private final String pendingTime; - private final String instanceType; - private final String imageId; - private final String instanceId; - private final String[] billingProducts; - private final String architecture; - private final String accountId; - private final String kernelId; - private final String ramdiskId; - private final String region; - private final String version; - private final String availabilityZone; - private final String privateIp; - private final String[] devpayProductCodes; - private final String[] marketplaceProductCodes; - - @JsonCreator - public InstanceInfo( - @JsonProperty(value = "pendingTime", required = true) String pendingTime, - @JsonProperty(value = "instanceType", required = true) String instanceType, - @JsonProperty(value = "imageId", required = true) String imageId, - @JsonProperty(value = "instanceId", required = true) String instanceId, - @JsonProperty(value = "billingProducts", required = false) String[] billingProducts, - @JsonProperty(value = "architecture", required = true) String architecture, - @JsonProperty(value = "accountId", required = true) String accountId, - @JsonProperty(value = "kernelId", required = true) String kernelId, - @JsonProperty(value = "ramdiskId", required = false) String ramdiskId, - @JsonProperty(value = REGION, required = true) String region, - @JsonProperty(value = "version", required = true) String version, - @JsonProperty(value = "availabilityZone", required = true) String availabilityZone, - @JsonProperty(value = "privateIp", required = true) String privateIp, - @JsonProperty(value = "devpayProductCodes", required = false) String[] devpayProductCodes, - @JsonProperty(value = "marketplaceProductCodes", required = false) String[] marketplaceProductCodes) { - this.pendingTime = pendingTime; - this.instanceType = instanceType; - this.imageId = imageId; - this.instanceId = instanceId; - this.billingProducts = billingProducts == null - ? null : billingProducts.clone(); - this.architecture = architecture; - this.accountId = accountId; - this.kernelId = kernelId; - this.ramdiskId = ramdiskId; - this.region = region; - this.version = version; - this.availabilityZone = availabilityZone; - this.privateIp = privateIp; - this.devpayProductCodes = devpayProductCodes == null - ? null : devpayProductCodes.clone(); - this.marketplaceProductCodes = marketplaceProductCodes == null - ? null : marketplaceProductCodes.clone(); - } - - public String getPendingTime() { - return pendingTime; - } - - public String getInstanceType() { - return instanceType; - } - - public String getImageId() { - return imageId; - } - - public String getInstanceId() { - return instanceId; - } - - public String[] getBillingProducts() { - return billingProducts == null ? null : billingProducts.clone(); - } - - public String getArchitecture() { - return architecture; - } - - public String getAccountId() { - return accountId; - } - - public String getKernelId() { - return kernelId; - } - - public String getRamdiskId() { - return ramdiskId; - } - - public String getRegion() { - return region; - } - - public String getVersion() { - return version; - } - - public String getAvailabilityZone() { - return availabilityZone; - } - - public String getPrivateIp() { - return privateIp; - } - - public String[] getDevpayProductCodes() { - return devpayProductCodes == null ? null : devpayProductCodes.clone(); - } - - public String[] getMarketplaceProductCodes() { - return marketplaceProductCodes == null ? null : marketplaceProductCodes.clone(); - } - } - /** * All of the metada associated with a network interface on the instance. */ @@ -833,4 +644,120 @@ public Map headers() { return requestHeaders; } } + + + public static class InstanceInfo { + private final String pendingTime; + private final String instanceType; + private final String imageId; + private final String instanceId; + private final String[] billingProducts; + private final String architecture; + private final String accountId; + private final String kernelId; + private final String ramdiskId; + private final String region; + private final String version; + private final String availabilityZone; + private final String privateIp; + private final String[] devpayProductCodes; + private final String[] marketplaceProductCodes; + + public InstanceInfo( + String pendingTime, + String instanceType, + String imageId, + String instanceId, + String[] billingProducts, + String architecture, + String accountId, + String kernelId, + String ramdiskId, + String region, + String version, + String availabilityZone, + String privateIp, + String[] devpayProductCodes, + String[] marketplaceProductCodes) { + + this.pendingTime = pendingTime; + this.instanceType = instanceType; + this.imageId = imageId; + this.instanceId = instanceId; + this.billingProducts = billingProducts == null + ? null : billingProducts.clone(); + this.architecture = architecture; + this.accountId = accountId; + this.kernelId = kernelId; + this.ramdiskId = ramdiskId; + this.region = region; + this.version = version; + this.availabilityZone = availabilityZone; + this.privateIp = privateIp; + this.devpayProductCodes = devpayProductCodes == null + ? null : devpayProductCodes.clone(); + this.marketplaceProductCodes = marketplaceProductCodes == null + ? null : marketplaceProductCodes.clone(); + } + + public String getPendingTime() { + return pendingTime; + } + + public String getInstanceType() { + return instanceType; + } + + public String getImageId() { + return imageId; + } + + public String getInstanceId() { + return instanceId; + } + + public String[] getBillingProducts() { + return billingProducts == null ? null : billingProducts.clone(); + } + + public String getArchitecture() { + return architecture; + } + + public String getAccountId() { + return accountId; + } + + public String getKernelId() { + return kernelId; + } + + public String getRamdiskId() { + return ramdiskId; + } + + public String getRegion() { + return region; + } + + public String getVersion() { + return version; + } + + public String getAvailabilityZone() { + return availabilityZone; + } + + public String getPrivateIp() { + return privateIp; + } + + public String[] getDevpayProductCodes() { + return devpayProductCodes == null ? null : devpayProductCodes.clone(); + } + + public String[] getMarketplaceProductCodes() { + return marketplaceProductCodes == null ? null : marketplaceProductCodes.clone(); + } + } } diff --git a/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/Ec2MetadataConfigProvider.java b/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/Ec2MetadataConfigProvider.java new file mode 100644 index 000000000000..17f0a0465c41 --- /dev/null +++ b/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/Ec2MetadataConfigProvider.java @@ -0,0 +1,161 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.regions.internal.util; + +import java.util.Optional; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.profiles.Profile; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.profiles.ProfileProperty; + +@SdkInternalApi +// TODO: Remove or consolidate this class with the one from the auth module. +// There's currently no good way for both auth and regions to share the same +// class since there's no suitable common dependency between the two where this +// can live. Ideally, we can do this when the EC2MetadataUtils is replaced with +// the IMDS client. +public final class Ec2MetadataConfigProvider { + /** Default IPv4 endpoint for the Amazon EC2 Instance Metadata Service. */ + private static final String EC2_METADATA_SERVICE_URL_IPV4 = "http://169.254.169.254"; + + /** Default IPv6 endpoint for the Amazon EC2 Instance Metadata Service. */ + private static final String EC2_METADATA_SERVICE_URL_IPV6 = "http://[fd00:ec2::254]"; + + private final Supplier profileFile; + private final String profileName; + + private Ec2MetadataConfigProvider(Builder builder) { + this.profileFile = builder.profileFile; + this.profileName = builder.profileName; + } + + public enum EndpointMode { + IPV4, + IPV6, + ; + + public static EndpointMode fromValue(String s) { + if (s == null) { + return null; + } + + for (EndpointMode value : EndpointMode.values()) { + if (value.name().equalsIgnoreCase(s)) { + return value; + } + } + + throw new IllegalArgumentException("Unrecognized value for endpoint mode: " + s); + } + } + + public String getEndpoint() { + String endpointOverride = getEndpointOverride(); + if (endpointOverride != null) { + return endpointOverride; + } + + EndpointMode endpointMode = getEndpointMode(); + switch (endpointMode) { + case IPV4: + return EC2_METADATA_SERVICE_URL_IPV4; + case IPV6: + return EC2_METADATA_SERVICE_URL_IPV6; + default: + throw SdkClientException.create("Unknown endpoint mode: " + endpointMode); + } + } + + public EndpointMode getEndpointMode() { + Optional endpointMode = SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE.getNonDefaultStringValue(); + if (endpointMode.isPresent()) { + return EndpointMode.fromValue(endpointMode.get()); + } + + return configFileEndpointMode().orElseGet(() -> + EndpointMode.fromValue(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE.defaultValue())); + } + + public String getEndpointOverride() { + Optional endpointOverride = SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.getNonDefaultStringValue(); + if (endpointOverride.isPresent()) { + return endpointOverride.get(); + } + + Optional configFileValue = configFileEndpointOverride(); + + return configFileValue.orElse(null); + } + + public static Builder builder() { + return new Builder(); + } + + private Optional configFileEndpointMode() { + return resolveProfile().flatMap(p -> p.property(ProfileProperty.EC2_METADATA_SERVICE_ENDPOINT_MODE)) + .map(EndpointMode::fromValue); + } + + private Optional configFileEndpointOverride() { + return resolveProfile().flatMap(p -> p.property(ProfileProperty.EC2_METADATA_SERVICE_ENDPOINT)); + } + + private Optional resolveProfile() { + ProfileFile profileFileToUse = resolveProfileFile(); + String profileNameToUse = resolveProfileName(); + + return profileFileToUse.profile(profileNameToUse); + } + + private ProfileFile resolveProfileFile() { + if (profileFile != null) { + return profileFile.get(); + } + + return ProfileFile.defaultProfileFile(); + } + + private String resolveProfileName() { + if (profileName != null) { + return profileName; + } + + return ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow(); + } + + public static class Builder { + private Supplier profileFile; + private String profileName; + + public Builder profileFile(Supplier profileFile) { + this.profileFile = profileFile; + return this; + } + + public Builder profileName(String profileName) { + this.profileName = profileName; + return this; + } + + public Ec2MetadataConfigProvider build() { + return new Ec2MetadataConfigProvider(this); + } + } +} \ No newline at end of file diff --git a/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/InstanceProviderTokenEndpointProvider.java b/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/InstanceProviderTokenEndpointProvider.java index e19b4c07f5fc..0eb655cac855 100644 --- a/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/InstanceProviderTokenEndpointProvider.java +++ b/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/InstanceProviderTokenEndpointProvider.java @@ -19,7 +19,6 @@ import java.util.HashMap; import java.util.Map; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.util.SdkUserAgent; import software.amazon.awssdk.regions.util.ResourcesEndpointProvider; @@ -29,9 +28,12 @@ public final class InstanceProviderTokenEndpointProvider implements ResourcesEnd private static final String EC2_METADATA_TOKEN_TTL_HEADER = "x-aws-ec2-metadata-token-ttl-seconds"; private static final String DEFAULT_TOKEN_TTL = "21600"; + private static final Ec2MetadataConfigProvider EC2_METADATA_CONFIG_PROVIDER = Ec2MetadataConfigProvider.builder() + .build(); + @Override public URI endpoint() { - String host = SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.getStringValueOrThrow(); + String host = EC2_METADATA_CONFIG_PROVIDER.getEndpoint(); if (host.endsWith("/")) { host = host.substring(0, host.length() - 1); } diff --git a/core/regions/src/main/java/software/amazon/awssdk/regions/util/HttpResourcesUtils.java b/core/regions/src/main/java/software/amazon/awssdk/regions/util/HttpResourcesUtils.java index d87ed42aa076..f3abfcc92506 100644 --- a/core/regions/src/main/java/software/amazon/awssdk/regions/util/HttpResourcesUtils.java +++ b/core/regions/src/main/java/software/amazon/awssdk/regions/util/HttpResourcesUtils.java @@ -15,17 +15,18 @@ package software.amazon.awssdk.regions.util; -import com.fasterxml.jackson.databind.JsonNode; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URI; +import java.util.Optional; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.exception.SdkServiceException; -import software.amazon.awssdk.core.util.json.JacksonUtils; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeParser; import software.amazon.awssdk.regions.internal.util.ConnectionUtils; import software.amazon.awssdk.utils.IoUtils; @@ -33,6 +34,7 @@ public final class HttpResourcesUtils { private static final Logger log = LoggerFactory.getLogger(HttpResourcesUtils.class); + private static final JsonNodeParser JSON_PARSER = JsonNode.parser(); private static volatile HttpResourcesUtils instance; @@ -154,21 +156,18 @@ private void handleErrorResponse(InputStream errorStream, int statusCode, String String errorResponse = IoUtils.toUtf8String(errorStream); try { - JsonNode node = JacksonUtils.jsonNodeOf(errorResponse); - JsonNode code = node.get("code"); - JsonNode message = node.get("message"); - if (code != null && message != null) { - responseMessage = message.asText(); + Optional message = JSON_PARSER.parse(errorResponse).field("message"); + if (message.isPresent()) { + responseMessage = message.get().text(); } } catch (RuntimeException exception) { log.debug("Unable to parse error stream", exception); } } - SdkServiceException exception = SdkServiceException.builder() - .message(responseMessage) - .statusCode(statusCode) - .build(); - throw exception; + throw SdkServiceException.builder() + .message(responseMessage) + .statusCode(statusCode) + .build(); } } diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index d1a8a031aaae..2470d1770247 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -261,6 +261,29 @@ "us-west-2" : { } } }, + "amplify" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "amplifybackend" : { "endpoints" : { "ap-northeast-1" : { }, @@ -678,6 +701,7 @@ }, "appflow" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -787,6 +811,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -1249,6 +1274,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -1786,6 +1812,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2041,6 +2068,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2749,6 +2777,36 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "emr-containers-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "emr-containers-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "emr-containers-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "emr-containers-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "emr-containers-fips.us-west-2.amazonaws.com" + }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -3125,6 +3183,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -4287,6 +4346,7 @@ "eu-west-3" : { }, "sa-east-1" : { }, "us-east-1" : { }, + "us-east-2" : { }, "us-west-1" : { }, "us-west-2" : { } } @@ -5301,6 +5361,16 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-global" }, + "route53-recovery-control-config" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "route53-recovery-control-config.us-west-2.amazonaws.com" + } + } + }, "route53domains" : { "endpoints" : { "us-east-1" : { } @@ -7535,6 +7605,7 @@ }, "dax" : { "endpoints" : { + "cn-north-1" : { }, "cn-northwest-1" : { } } }, @@ -7679,7 +7750,8 @@ }, "gamelift" : { "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { }, + "cn-northwest-1" : { } } }, "glacier" : { @@ -7846,6 +7918,12 @@ }, "neptune" : { "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "rds.cn-north-1.amazonaws.com.cn" + }, "cn-northwest-1" : { "credentialScope" : { "region" : "cn-northwest-1" @@ -10212,6 +10290,11 @@ "us-iso-east-1" : { } } }, + "license-manager" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, "logs" : { "endpoints" : { "us-iso-east-1" : { } @@ -10453,6 +10536,11 @@ "us-isob-east-1" : { } } }, + "ds" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "dynamodb" : { "defaults" : { "protocols" : [ "http", "https" ] diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index f460ce88ca2f..0007ed001f41 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT sdk-core AWS Java SDK :: SDK Core @@ -56,19 +56,10 @@ profiles ${awsjavasdk.version} - org.slf4j slf4j-api - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - org.reactivestreams reactive-streams diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkField.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkField.java index e43d94f5a2da..161754efa1f7 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkField.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkField.java @@ -122,6 +122,23 @@ public Optional getOptionalTrait(Class clzz) { return Optional.ofNullable((T) traits.get(clzz)); } + /** + * Gets the trait of the specified class, or throw {@link IllegalStateException} if not available. + * + * @param clzz Trait class to get. + * @param Type of trait. + * @return Trait instance. + * @throws IllegalStateException if trait is not present. + */ + @SuppressWarnings("unchecked") + public T getRequiredTrait(Class clzz) throws IllegalStateException { + T trait = (T) traits.get(clzz); + if (trait == null) { + throw new IllegalStateException(memberName + " member is missing " + clzz.getSimpleName()); + } + return trait; + } + /** * Checks if a given {@link Trait} is present on the field. * diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java index c46a37e31706..8fe7845d2f71 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java @@ -78,6 +78,8 @@ public enum SdkSystemSetting implements SystemSetting { */ AWS_EC2_METADATA_SERVICE_ENDPOINT("aws.ec2MetadataServiceEndpoint", "http://169.254.169.254"), + AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE("aws.ec2MetadataServiceEndpointMode", "IPv4"), + /** * The elastic container metadata service endpoint that should be called by the ContainerCredentialsProvider * when loading data from the container metadata service. diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index 4a29c7eac90d..cade6174232b 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -259,6 +259,7 @@ private RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { RetryMode retryMode = RetryMode.resolver() .profileFile(() -> config.option(SdkClientOption.PROFILE_FILE)) .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) .resolve(); return RetryPolicy.forRetryMode(retryMode); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java index fb8177d00e94..01a45c683b8a 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java @@ -25,6 +25,7 @@ import software.amazon.awssdk.core.ServiceConfiguration; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; @@ -149,6 +150,13 @@ public final class SdkClientOption extends ClientOption { */ public static final SdkClientOption INTERNAL_USER_AGENT = new SdkClientOption<>(String.class); + /** + * Option to specify the default retry mode. + * + * @see RetryMode.Resolver#defaultRetryMode(RetryMode) + */ + public static final SdkClientOption DEFAULT_RETRY_MODE = new SdkClientOption<>(RetryMode.class); + private SdkClientOption(Class valueClass) { super(valueClass); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/UnreliableFilterInputStream.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/UnreliableFilterInputStream.java index c41876bbfae6..2397f615f794 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/UnreliableFilterInputStream.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/UnreliableFilterInputStream.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.io.InputStream; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.util.json.JacksonUtils; +import software.amazon.awssdk.utils.ToString; /** * An internal class used solely for the purpose of testing via failure @@ -168,6 +168,15 @@ public int getResetCount() { @Override public String toString() { - return JacksonUtils.toJsonString(this); + return ToString.builder("UnreliableFilterInputStream") + .add("isFakeIoException", isFakeIoException) + .add("maxNumErrors", maxNumErrors) + .add("currNumErrors", currNumErrors) + .add("bytesReadBeforeException", bytesReadBeforeException) + .add("marked", marked) + .add("position", position) + .add("resetCount", resetCount) + .add("resetIntervalBeforeException", resetIntervalBeforeException) + .toString(); } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java index 458e777d589c..146d741077c9 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java @@ -91,8 +91,11 @@ public static Resolver resolver() { * Allows customizing the variables used during determination of a {@link RetryMode}. Created via {@link #resolver()}. */ public static class Resolver { + private static final RetryMode SDK_DEFAULT_RETRY_MODE = LEGACY; + private Supplier profileFile; private String profileName; + private RetryMode defaultRetryMode; private Resolver() { } @@ -114,12 +117,20 @@ public Resolver profileName(String profileName) { return this; } + /** + * Configure the {@link RetryMode} that should be used if the mode is not specified anywhere else. + */ + public Resolver defaultRetryMode(RetryMode defaultRetryMode) { + this.defaultRetryMode = defaultRetryMode; + return this; + } + /** * Resolve which retry mode should be used, based on the configured values. */ public RetryMode resolve() { return OptionalUtils.firstPresent(Resolver.fromSystemSettings(), () -> fromProfileFile(profileFile, profileName)) - .orElse(RetryMode.LEGACY); + .orElseGet(this::fromDefaultMode); } private static Optional fromSystemSettings() { @@ -150,5 +161,9 @@ private static Optional fromString(String string) { throw new IllegalStateException("Unsupported retry policy mode configured: " + string); } } + + private RetryMode fromDefaultMode() { + return defaultRetryMode != null ? defaultRetryMode : SDK_DEFAULT_RETRY_MODE; + } } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/util/json/JacksonUtils.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/util/json/JacksonUtils.java deleted file mode 100644 index c379f52a3682..000000000000 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/util/json/JacksonUtils.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.util.json; - -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.core.JsonParseException; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.databind.DeserializationFeature; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectWriter; -import java.io.IOException; -import java.io.Writer; -import software.amazon.awssdk.annotations.SdkProtectedApi; -import software.amazon.awssdk.core.exception.SdkClientException; - -@SdkProtectedApi -public final class JacksonUtils { - private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); - private static final ObjectWriter WRITER = OBJECT_MAPPER.writer(); - - - private static final ObjectWriter PRETTY_WRITER = OBJECT_MAPPER.writerWithDefaultPrettyPrinter(); - - static { - OBJECT_MAPPER.configure(JsonParser.Feature.ALLOW_COMMENTS, true); - OBJECT_MAPPER.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); - } - - private JacksonUtils() { - } - - public static String toJsonPrettyString(Object value) { - try { - return PRETTY_WRITER.writeValueAsString(value); - } catch (Exception e) { - throw new IllegalStateException(e); - } - } - - public static String toJsonString(Object value) { - try { - return WRITER.writeValueAsString(value); - } catch (Exception e) { - throw new IllegalStateException(e); - } - } - - /** - * Returns the deserialized object from the given json string and target - * class; or null if the given json string is null. - */ - public static T fromJsonString(String json, Class clazz) { - if (json == null) { - return null; - } - try { - return OBJECT_MAPPER.readValue(json, clazz); - } catch (Exception e) { - throw SdkClientException.builder().message("Unable to parse Json String.").cause(e).build(); - } - } - - /** - * Returns the deserialized object from the given json string and target - * class; or null if the given json string is null. Clears the JSON location in the event of an error - */ - public static T fromSensitiveJsonString(String json, Class clazz) { - if (json == null) { - return null; - } - try { - return OBJECT_MAPPER.readValue(json, clazz); - } catch (Exception e) { - // If underlying exception is a json parsing issue, clear out the location so that the exception message - // does not contain the raw json - if (e instanceof JsonParseException) { - ((JsonParseException) e).clearLocation(); - } - throw SdkClientException.builder().message("Unable to parse Json String.").cause(e).build(); - } - } - - public static JsonNode jsonNodeOf(String json) { - return fromJsonString(json, JsonNode.class); - } - - public static JsonNode sensitiveJsonNodeOf(String json) { - return fromSensitiveJsonString(json, JsonNode.class); - } - - public static JsonGenerator jsonGeneratorOf(Writer writer) throws IOException { - return new JsonFactory().createGenerator(writer); - } -} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/http/.MockServerTestBase.java.swp b/core/sdk-core/src/test/java/software/amazon/awssdk/core/http/.MockServerTestBase.java.swp deleted file mode 100644 index 941bbee85892..000000000000 Binary files a/core/sdk-core/src/test/java/software/amazon/awssdk/core/http/.MockServerTestBase.java.swp and /dev/null differ diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/retry/RetryModeTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/retry/RetryModeTest.java index 4020e955bb21..cc9ea5511148 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/retry/RetryModeTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/retry/RetryModeTest.java @@ -22,9 +22,9 @@ import java.nio.file.Paths; import java.util.Arrays; import java.util.Collection; +import java.util.concurrent.Callable; import org.junit.After; import org.junit.Before; -import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -44,25 +44,26 @@ public class RetryModeTest { public static Collection data() { return Arrays.asList(new Object[] { // Test defaults - new TestData(null, null, null, RetryMode.LEGACY), - new TestData(null, null, "PropertyNotSet", RetryMode.LEGACY), + new TestData(null, null, null, null, RetryMode.LEGACY), + new TestData(null, null, "PropertyNotSet", null, RetryMode.LEGACY), // Test precedence - new TestData("standard", "legacy", "PropertySetToLegacy", RetryMode.STANDARD), - new TestData("standard", null, null, RetryMode.STANDARD), - new TestData(null, "standard", "PropertySetToLegacy", RetryMode.STANDARD), - new TestData(null, "standard", null, RetryMode.STANDARD), - new TestData(null, null, "PropertySetToStandard", RetryMode.STANDARD), + new TestData("standard", "legacy", "PropertySetToLegacy", RetryMode.LEGACY, RetryMode.STANDARD), + new TestData("standard", null, null, RetryMode.LEGACY, RetryMode.STANDARD), + new TestData(null, "standard", "PropertySetToLegacy", RetryMode.LEGACY, RetryMode.STANDARD), + new TestData(null, "standard", null, RetryMode.LEGACY, RetryMode.STANDARD), + new TestData(null, null, "PropertySetToStandard", RetryMode.LEGACY, RetryMode.STANDARD), + new TestData(null, null, null, RetryMode.STANDARD, RetryMode.STANDARD), // Test invalid values - new TestData("wrongValue", null, null, null), - new TestData(null, "wrongValue", null, null), - new TestData(null, null, "PropertySetToUnsupportedValue", null), + new TestData("wrongValue", null, null, null, IllegalStateException.class), + new TestData(null, "wrongValue", null, null, IllegalStateException.class), + new TestData(null, null, "PropertySetToUnsupportedValue", null, IllegalStateException.class), // Test capitalization standardization - new TestData("sTaNdArD", null, null, RetryMode.STANDARD), - new TestData(null, "sTaNdArD", null, RetryMode.STANDARD), - new TestData(null, null, "PropertyMixedCase", RetryMode.STANDARD), + new TestData("sTaNdArD", null, null, null, RetryMode.STANDARD), + new TestData(null, "sTaNdArD", null, null, RetryMode.STANDARD), + new TestData(null, null, "PropertyMixedCase", null, RetryMode.STANDARD), }); } @@ -76,7 +77,7 @@ public void methodSetup() { } @Test - public void differentCombinationOfConfigs_shouldResolveCorrectly() { + public void differentCombinationOfConfigs_shouldResolveCorrectly() throws Exception { if (testData.envVarValue != null) { ENVIRONMENT_VARIABLE_HELPER.set(SdkSystemSetting.AWS_RETRY_MODE.environmentVariable(), testData.envVarValue); } @@ -92,10 +93,12 @@ public void differentCombinationOfConfigs_shouldResolveCorrectly() { System.setProperty(ProfileFileSystemSetting.AWS_CONFIG_FILE.property(), diskLocationForFile); } - if (testData.expected == null) { - assertThatThrownBy(RetryMode::defaultRetryMode).isInstanceOf(RuntimeException.class); + Callable result = RetryMode.resolver().defaultRetryMode(testData.defaultMode)::resolve; + if (testData.expected instanceof Class) { + Class expectedClassType = (Class) testData.expected; + assertThatThrownBy(result::call).isInstanceOf(expectedClassType); } else { - assertThat(RetryMode.defaultRetryMode()).isEqualTo(testData.expected); + assertThat(result.call()).isEqualTo(testData.expected); } } @@ -108,12 +111,14 @@ private static class TestData { private final String envVarValue; private final String systemProperty; private final String configFile; - private final RetryMode expected; + private final RetryMode defaultMode; + private final Object expected; - TestData(String systemProperty, String envVarValue, String configFile, RetryMode expected) { + TestData(String systemProperty, String envVarValue, String configFile, RetryMode defaultMode, Object expected) { this.envVarValue = envVarValue; this.systemProperty = systemProperty; this.configFile = configFile; + this.defaultMode = defaultMode; this.expected = expected; } } diff --git a/docs/design/NamingConventions.md b/docs/design/NamingConventions.md index f03fceca058b..d33b3fa51f07 100644 --- a/docs/design/NamingConventions.md +++ b/docs/design/NamingConventions.md @@ -4,14 +4,34 @@ This page describes the naming conventions, nouns and common terms -- Abbreviations must follow the same conventions as any other word (eg. use `DynamoDbClient`, not `DynamoDBClient`) +### Class Naming -- Use Singular Enum Name - - For enum classes or "pseudo-enums" classes(classes with public static fields), we should use singular name. (eg. use `SdkSystemSetting`, not `SdkSystemSettings`) +#### General Rules +* Prefer singular class names: `SdkSystemSetting`, not `SdkSystemSettings`. +* Treat acronyms as a single word: `DynamoDbClient`, not `DynamoDBClient`. -- Use of `Provider`, `Supplier` and `Factory` in the class name. - - For general supplier classes (loading/creating resources of the same kind), prefer `Provide` unless the class extends from Java `Supplier` class. (eg. `AwsCredentialsProvider`) - - For factories classes (creating resources of same or different kinds), prefer `Factory`. (eg. `AwsJsonProtocolFactory`) - +#### Classes that instantiate other classes + +* If the class's primary purpose is to return instances of another class: + * If the "get" method has no parameters: + * If the class implements `Supplier`: `{Noun}Supplier` (e.g. `CachedSupplier`) + * If the class does not implements `Supplier`: `{Noun}Provider` (e.g. `AwsCredentialsProvider`) + * If the "get" method has paramters: `{Noun}Factory` (e.g. `AwsJsonProtocolFactory`) + +#### Service-specific classes +* If the class makes service calls: + * If the class can be used to invoke *every* data-plane operation: + * If the class is code generated: + * If the class uses sync HTTP: `{ServiceName}Client` (e.g. `DynamoDbClient`) + * If the class uses async HTTP: `{ServiceName}AsyncClient` (e.g. `DynamoDbAsyncClient`) + * If the class is hand-written: + * If the class uses sync HTTP: `{ServiceName}EnhancedClient` (e.g. `DynamoDbEnhancedClient`) + * If the class uses async HTTP: `{ServiceName}EnhancedAsyncClient` (e.g. `DynamoDbEnhancedAsyncClient`) + * If the class can be used to invoke only *some* data-plane operations: + * If the class uses sync HTTP: `{ServiceName}{Noun}Manager` (e.g. `SqsBatchManager`) + * If the class uses async HTTP: `{ServiceName}Async{Noun}Manager` (e.g. `SqsAsyncBatchManager`) + * Note: If only the only implementation uses async HTTP, `Async` may be excluded. (e.g. `S3TransferManager`) +* If the class does not make service calls: + * If the class creates presigned URLs: `{ServiceName}Presigner` (e.g. `S3Presigner`) + * If the class is a collection of various unrelated "helper" methods: `{ServiceName}Utilities` (e.g. `S3Utilities`) \ No newline at end of file diff --git a/docs/design/core/batch-manager/DecisionLog.md b/docs/design/core/batch-manager/DecisionLog.md new file mode 100644 index 000000000000..92073c536ea8 --- /dev/null +++ b/docs/design/core/batch-manager/DecisionLog.md @@ -0,0 +1,73 @@ +# Decision Log for SDK V2 Batch Utility + +## Log Entry Template + +**Source**: (Meeting/aside/pair programming discussion/daily standup) to (discuss/implement) X + +**Attendees**: Anna-Karin, Irene, Dongie, Matt, Vinod, John, Zoe, Debora, Bennett, Michael + +**Closed Decisions:** + +1. Question? Decision. Justification + +**Open Decisions:** + +1. (Old/Reopened/new) Question? + +## 7/15/2021 + +**Source:** Quick meeting to naming for batch utility + +**Attendees:** Anna-Karin, Dongie, Matt, Vinod, John, Zoe, Debora, Bennett, Michael + +**Closed Decisions:** + +1. What should we name the batching utility to make it more discoverable and easy to understand? We will name it {Service}BatchManager. Since the batching utility operates on a low-level service client, it is not much of a utility. At the same time, it does not replace methods implemented by the low-level client so it is not an enhanced client either. It operates similarly to the TransferManager so we will name it BatchManager. + +**Open Decisions:** + +None + +## 7/13/2021 + +**Source:** Follow-up meeting to discuss open decisions from last meeting and to address any new issues. + +**Attendees:** Dongie, Matt, Zoe, Debora, Bennett, Michael + +**Closed Decisions:** + +1. Do we honor the `RequestOverrideConfiguration` fields in the `SendMessageRequest`? Yes, we will batch them separately using the `AwsRequestOverrideConfiguration`’s included `equals/hashCode` methods. +2. How do we map between the `SendMessageRequest` and the `SendMessageRequestBatchEntry`? We will generate the mappings between the two types using name matching and some type of customization configuration, like a `batch.json`. +3. What if fields exist in `SendMessageRequest` that aren’t in the `SendMessageRequestBatchEntry` or vice-versa? Options discussed: *(a)* Take `SendMessageRequest`, throw exception if there’s a field specified but there isn’t an equivalent field specified in the SendMessageRequestBatchEntry (by name), *(b)* same as “a” but also add a method for `(QueueUrl, SendMessageRequestBatchEntry)`, *(c)* Take `(QueueUrl, SendMessageRequestBatchEntry)` exclusively, *(d)* Take `SendMessageRequest`, fail the build if mismatch, and investigate (or whitelist the field for exclusion). Decision: For now, we will go with (a), but be prepared to do (b) if the fields diverge. +4. Should we allow the customer to override the mapping from `SendMessageRequest` to `SendMessageRequestBatchEntry` if they want to change the opinionated behavior or get more visibility into the transformation process? Not yet, but we may need to do it in the future if the fields diverge. +5. Should the batch utility only use a `sendMessage()` method that only sends one request for a 1:1 correlation between request and response? Yes, for now, but we will be open to customer feedback in this area. +6. Should we accept streams or iterators to `sendMessages()`? No, for now, but we will be open to customer feedback in this area. + +**Open Decisions:** + +1. (Old) What should we name the batching utility to make it more discoverable and easy to understand? + +## 6/29/21 + +**Source:** Meeting to discuss initial design for Batch Utility: https://github.com/aws/aws-sdk-java-v2/pull/2563 + +**Attendees:** Dongie, Matt, Vinod, John, Zoe, Debora, Bennett, Michael + +**Closed Decisions:** + +1. Should we implement batching methods directly on the client or in a separate utility as proposed in the design document? Separate utility. This would be consistent with other APIs (like Waiters). If we want to change this, we would have to change all APIs and we might as well do it all together. +2. Should we use a `batch.json` or `batcher.json` to store default values and service-specific batch methods? Yes. We do not want to place the burden of providing these values on the customer, and this will be consistent with other APIs like Waiters. We will consider implementing this across the SDK. +3. Should we create a wrapper class for `CompletableFuture>`? Yes. We should create a wrapper class like `SQSBatchResponse` in order to avoid nesting generics and make it easier to understand for Customers. +4. Should we include a manual flush and the option to flush a specific buffer? Yes. This will have parity with v1 and give Customers additional functionality that they may need. +5. Should batch retries be handled by the client? Yes. Retries should be handled by the client as is done throughout the SDK. These retries could possibly be batched as well. +6. Should we have separate sync and async interfaces if they look so similar? Yes. We expect the interfaces to diverge as more features are added. The builders are also different. +7. Should async client handle throttling exceptions by just sending requests one after another? No. This would + defeat the purpose of an async client. Instead, the number of requests sent will only be limited by the maximum number of connections allowed by the client. Additionally, the low level clients already provide throttling support. + +**Open Decisions:** + +1. (New) Should the batch utility only use a `sendMessage()` method that only sends one request for a 1:1 correlation between request and response? + 1. If not, how will Customers using `sendMessages()` correlate the request message with the response messages? +2. (New) Should we accept streams or iterators to `sendMessages()`? + 1. Note: A decision on this runs counter to open decision #1. i.e. If we decide on a 1:1 correlation between request and response, we will not accept streams or iterators and vice versa. +3. (New) What should we name the batching utility to make it more discoverable and easy to understand? diff --git a/docs/design/core/batch-manager/Design.md b/docs/design/core/batch-manager/Design.md new file mode 100644 index 000000000000..67f09159c8d6 --- /dev/null +++ b/docs/design/core/batch-manager/Design.md @@ -0,0 +1,334 @@ +**Design:** New Feature, **Status:** [In Development](../../../README.md) + +# Design Document (Automatic Request Batching) + +## Introduction + +* * * +Some customers have described a need for batch write operations across multiple AWS services but the lack of these features either serve as blockers to adoption of the v2 SDK or limit SDK usability for customers. Specifically, this feature was implemented in v1 for the SQS service in the form of the `AmazonSQSBufferedAsyncClient` but equivalent functionality has not been ported over to v2. + +However, since batch write operations are already included in many AWS services, a general automatic batching solution could not only be implemented in SQS but in any service that might benefit from it. On top of features included in v1, additional simplifications and abstractions can also be included in the batch manager to simplify how customers interact with batching throughout the SDK. Therefore the batching manager hopes to benefit customers by reducing cost, improving performance, and/or simplifying implementation. + +This document proposes how this general approach should be implemented in the Java SDK v2. + + +## Design Review + +* * * +Look at decision log here: https://github.com/aws/aws-sdk-java-v2/blob/master/docs/design/core/batch-utilities/DecisionLog.md + +The Java SDK team has decided to implement a separate batch manager for the time being. Further discussion is required surrounding separate utilities vs implementing directly on the client. + +## Overview + +* * * +The batch manager proposed in this document will work similarly to v1’s `AmazonSQSBufferedAsyncClient`. Calls made through the manager will first be buffered before being sent as a batch request to the respective service. Additional functionality will also be implemented in v2, such as the ability to automatically batch an array of items by the manager. + +Client-side buffering will be implemented generically and allows up to the maximum requests for the respective service (ex. max 10 requests for SQS). Doing so will decrease the cost of using these AWS services by reducing the number of sent requests. + +## Proposed APIs + +* * * +The v2 SDK will support a batch manager for both sync and async clients that can leverage batch calls. + +### Instantiation + +**Option 1: Instantiating from an existing client** + +``` +// Sync Batch Manager +SqsClient sqs = SqsClient.create(); +SqsBatchManager sqsBatch = sqs.batchManager(); + +// Async Batch Manager +SqsAsyncClient sqsAsync = SqsAsyncClient.create(); +SqsAsyncBatchManager sqsAsyncBatch = sqsAsync.batchManager(); +``` + +**Option 2: Instantiating from batch manager builder** + +``` +// Sync Batch Manager +SqsBatchManager sqsBatch = SqsBatchManager.builder() + .client(client) + .overrideConfiguration(newConfig) + .build(); + +// Async Batch Manager +SqsAsyncBatchManager sqsBatch = SqsAsyncBatchManager.builder() + .client(asyncClient) + .overrideConfiguration(newConfig) + .build(); +``` + +### General Usage Examples: + +Note: Focusing on automatic batching and manual flushing for the scope of the internship. + +``` +// 1. Automatic Batching +SendMessageRequest request1 = SendMessageRequest.builder() + .messageBody("1") + .build(); +SendMessageRequest request2 = SendMessageRequest.builder() + .messageBody("2") + .build(); + +// Sync +SqsClient sqs = SqsClient.create(); +SqsBatchManager sqsBatch = sqs.batchManager(); +CompletableFuture response1 = sqsBatch.sendMessage(request1); +CompletableFuture response2 = sqsBatch.sendMessage(request2); + +// Async +CompletableFuture response1 = sqsBatch.sendMessage(request1); +CompletableFuture response2 = sqsBatch.sendMessage(request2); + +// 2. Manual Flushing +sqsBatch.flush(); +``` + + + +### `{Service}BatchManager` and `{Service}AsyncBatchManager` + +For each service that can leverage batch features, two classes will be created: A {Service}BatchManager and {Service}AsyncBatchManager (ex. SqsBatchManager and SqsAsyncBatchManager for SQS). This follows the naming convention established in v2 like with {Service}Client and {Service}Manager. + +**Sync:** + +``` +/** + * Batch Manager class that implements batching features for a sync client. + */ + @SdkPublicApi + @Generated("software.amazon.awssdk:codegen") + public interface SqsBatchManager { + + /** + * Buffers outgoing requests on the client and sends them as batch requests to the service. + * Requests are batched together according to a batchKey and are sent periodically to the + * service as determined by {@link #maxBatchOpenInMs}. If the number of requests for a + * batchKey reaches or exceeds {@link #maxBatchItems}, then the requests are immediately + * flushed and the timeout on the periodic flush is reset. + * By default, messages are batched according to a service's maximum size for a batch request. + * These settings can be customized via the configuration. + * + * @param request the outgoing request. + * @return a CompletableFuture of the corresponding response. + */ + CompletableFuture sendMessage(SendMessageRequest message); + + /** + * Manually flush the buffer for sendMessage requests. Completes when requests + * are sent. An exception is returned otherwise. + */ + CompletableFuture flush(); + + // Other Batch Manager methods omitted + // ... + + interface Builder { + + Builder client (SqsClient client); + + /** + * Method to override the default Batch Manager configuration. + * + * @param overrideConfig The provided overriding configuration. + * @return a reference to this object so that method calls can be chained. + */ + Builder overrideConfiguration(BatchOverrideConfiguration overrideConfig); + + /** + * Convenient method to override the default Batch Manager configuration + * without needing to create an instance manually. + * + * @param overrideConfig The consumer that provides the + overriding configuration. + * @return a reference to this object so that method calls can be chained. + */ + default Builder overrideConfiguration( + Consumer overrideConfig); + + SqsBatchManager build(); + + } + } +``` + +**Async:** + +``` +/** + * Batch Manager class that implements batching features for an async client. + */ + @SdkPublicApi + @Generated("software.amazon.awssdk:codegen") + public interface SqsAsyncBatchManager { + + /** + * Buffers outgoing requests on the client and sends them as batch requests to the service. + * Requests are batched together according to a batchKey and are sent periodically to the + * service as determined by {@link #maxBatchOpenInMs}. If the number of requests for a + * batchKey reaches or exceeds {@link #maxBatchItems}, then the requests are immediately + * flushed and the timeout on the periodic flush is reset. + * By default, messages are batched according to a service's maximum size for a batch request. + * These settings can be customized via the configuration. + * + * @param request the outgoing request. + * @return a CompletableFuture of the corresponding response. + */ + CompletableFuture sendMessage(SendMessageRequest message); + + /** + * Manually flush the buffer for sendMessage requests. Completes when requests + * are sent. An exception is returned otherwise. + */ + CompletableFuture flush(); + + // Other Batch Manager methods omitted + // ... + + interface Builder { + + Builder client (SqsAsyncClient client); + + /** + * Method to override the default Batch Manager configuration. + * + * @param overrideConfig The provided overriding configuration. + * @return a reference to this object so that method calls can be chained. + */ + Builder overrideConfiguration(BatchOverrideConfiguration overrideConfig); + + /** + * Convenient method to override the default Batch Manager configuration + * without needing to create an instance manually. + * + * @param overrideConfig The consumer that provides the + overriding configuration. + * @return a reference to this object so that method calls can be chained. + */ + default Builder overrideConfiguration( + Consumer overrideConfig); + + SqsAsyncBatchManager build(); + + } + } + +``` + + + +### `BatchOverrideConfiguration` + +``` +/** + * Configuration class to specify how the Batch Manager will implement its + * batching features. + */ +public final class BatchOverrideConfiguration { + + private final int maxBatchItems; + + private final long maxBatchSizeInBytes; + + private final Duration maxBatchOpenInMs; + + // More fields and methods omitted + // Focus on including configurable fields from v1 +} +``` + +* * * + +## FAQ + +### **Which Services will we generate a Batch Manager?** + +Services that already support batch requests (ex. SQS with sendMessageBatch, Kinesis with putRecords) in order to reduce cost for customers should be supported with a batch manager. + +Note: In this document, we focus on implementing a batch manager for SQS to ensure the functionality of v1’s `AmazonSQSBufferedAsyncClient` is carried over to v2. Therefore the code snippets used mainly focus on methods and types supported by the SQS client. + +### **Why don’t we just implement batching features directly on the low level client?** + +There are three options we discussed in implementing batching features: + +1. Create batching features directly on the low level client +2. Create a separate high level library +3. Create a separate batch manager class + +Using these three options would look like: + +``` +SqsAsyncClient sqsAsync = SqsAsyncClient.builder().build(); + +// Option 1 +sqsAsync.automaticSendMessageBatch(message1); +sqsAsync.automaticSendMessageBatch(message1); + +// Option 2 + SqsAsyncBatchManager batchManager = SqsAsyncBatchManager + .builder() + .client(sqsAsync) + .build() +batchManager.sendMessage(message1); +batchManager.sendMessage(message2); + +// Option 3 +SqsAsyncBatchManager batchManager = SqsAsyncBatchManager.batchManager() +batchManager.sendMessage(message1); +batchManager.sendMessage(message2); +``` + + +**Option 1 Pros:** + +1. Automatic batching features are slightly more discoverable. + +**Option 2 Pros:** + +1. Hand written library can be more user friendly than generated utility methods. +2. Works very similarly to the v1 `AmazonSQSBufferedAsyncClient`, so migration from v1 to v2 should require minimal changes. + +**Option 3 Pros:** + +1. All batch related features for a service would be self-contained in the client’s respective utility class. +2. Works very similarly to the v1 `AmazonSQSBufferedAsyncClient`, so migration from v1 to v2 should require minimal changes. +3. Consistent with existing utilities such as the Waiters utility class. +4. Easily configurable and scalable to incorporate many services. + +**Decision:** Option 3 will be used since it closely follows the style used throughout v2 (especially similar to how the waiters abstraction is used). Furthermore, it provides the most flexibility to scale across multiple services without becoming too complicated to use. + +Look at [decision log](./DecisionLog.md) for reasoning made on 6/29/2021. + +### Why do we only support sending one message at a time instead of a list of messages or streams? + +Supporting a singular sendMessage method makes it easier and simpler for customers to correlate request messages with the respective responses than an implementation that receives streams or lists of messages (ex. sending a SendMessageRequest in SQS returns a SendMessageResponse as opposed to a batch response wrapper class). + +Sending multiple messages or a stream of messages can be as simple as looping through each of the messages and calling the sendMessage method. Therefore, if needed, adding support for sending streams or a list of messages can easily be done as long as the sendMessage method is supported. + +### **Why support Sync and Async?** + +Supporting sync and async clients not only ensures that the APIs of both clients do not diverge, but would also have parity with the buffered client in v1. Furthermore, this support is just a matter of using the respective sync and async clients’ methods to make the requests and should both be simple for customers to understand, and for the SDK team to implement. + + +### **Why does `sendMessage` return a CompletableFuture for both the sync and async client?** + +The sendMessage method automatically buffers each sendMessage request until the buffer is full or a timeout occurs. Therefore, the sync client’s sendMessage would block until the entire batchRequest is sent and received, which could take as long as the timeout specified. To reduce blocking for this extended period of time, the sendMessage returns a CompletableFuture in both the sync and async client which completes when the underlying batchRequest is sent and a response is received. + +Therefore, as mentioned above, the distinction between the sync and async client lies in the use of the respective clients’ methods (ex. the sync batch manager leverages the sync client’s sendMessageBatch under the hood while the async batch manager uses the async client’s sendMessageBatch). + + +## References + +* * * +Github feature requests for specific services: + +* [SQS](https://github.com/aws/aws-sdk-java-v2/issues/165) +* [Kinesis](https://github.com/aws/aws-sdk-java/issues/1162) +* [Kinesis Firehose](https://github.com/aws/aws-sdk-java/issues/1343) +* [CloudWatch](https://github.com/aws/aws-sdk-java/issues/1109) +* [S3 batch style deletions](https://github.com/aws/aws-sdk-java/issues/1307) + diff --git a/docs/design/core/batch-utilities/DecisionLog.md b/docs/design/core/batch-utilities/DecisionLog.md deleted file mode 100644 index 774f05ae61d0..000000000000 --- a/docs/design/core/batch-utilities/DecisionLog.md +++ /dev/null @@ -1,42 +0,0 @@ -# Decision Log for SDK V2 Batch Utility - -## Log Entry Template - -* * * -**Source**: (Meeting/aside/pair programming discussion/daily standup) to (discuss/implement) X - -**Attendees**: Anna-Karin, Irene, Dongie, Matt, Vinod, John, Zoe, Debora, Bennett, Michael - -**Closed Decisions:** - -1. Question? Decision. Justification - -**Open Decisions:** - -1. (Old/Reopened/new) Question? - -## 6/29/21 - -* * * -**Source:** Meeting to discuss initial design for Batch Utility: https://github.com/aws/aws-sdk-java-v2/pull/2563 - -**Attendees:** Dongie, Matt, Vinod, John, Zoe, Debora, Bennett, Michael - -**Closed Decisions:** - -1. Should we implement batching methods directly on the client or in a separate utility as proposed in the design document? Separate utility. This would be consistent with other APIs (like Waiters). If we want to change this, we would have to change all APIs and we might as well do it all together. -2. Should we use a `batch.json` or `batcher.json` to store default values and service-specific batch methods? Yes. We do not want to place the burden of providing these values on the customer, and this will be consistent with other APIs like Waiters. We will consider implementing this across the SDK. -3. Should we create a wrapper class for `CompletableFuture>`? Yes. We should create a wrapper class like `SQSBatchResponse` in order to avoid nesting generics and make it easier to understand for Customers. -4. Should we include a manual flush and the option to flush a specific buffer? Yes. This will have parity with v1 and give Customers additional functionality that they may need. -5. Should batch retries be handled by the client? Yes. Retries should be handled by the client as is done throughout the SDK. These retries could possibly be batched as well. -6. Should we have separate sync and async interfaces if they look so similar? Yes. We expect the interfaces to diverge as more features are added. The builders are also different. -7. Should async client handle throttling exceptions by just sending requests one after another? No. This would defeat the purpose of an async client. Instead, the number of requests sent will only be limited by the maximum number of connections allowed by the client. Additionally, the low level clients already provide throttling support. - -**Open Decisions:** - -1. (New) Should the batch utility only use a `sendMessage()` method that only sends one request for a 1:1 correlation between request and response? - 1. If not, how will Customers using `sendMessages()` correlate the request message with the response messages? -2. (New) Should we accept streams or iterators to `sendMessages()`? - 1. Note: A decision on this runs counter to open decision #1. i.e. If we decide on a 1:1 correlation between request and response, we will not accept streams or iterators and vice versa. -3. (New) What should we name the batching utility to make it more discoverable and easy to understand? - diff --git a/http-client-spi/pom.xml b/http-client-spi/pom.xml index 23ec17e9e109..00a30de12b6d 100644 --- a/http-client-spi/pom.xml +++ b/http-client-spi/pom.xml @@ -22,7 +22,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT http-client-spi AWS Java SDK :: HTTP Client Interface diff --git a/http-clients/apache-client/pom.xml b/http-clients/apache-client/pom.xml index 382154c81262..3c930ea6ed86 100644 --- a/http-clients/apache-client/pom.xml +++ b/http-clients/apache-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT apache-client diff --git a/http-clients/aws-crt-client/pom.xml b/http-clients/aws-crt-client/pom.xml index 4045346742b7..60c6b85f40f2 100644 --- a/http-clients/aws-crt-client/pom.xml +++ b/http-clients/aws-crt-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index 6521f11cc010..bdee481cbd8e 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 diff --git a/http-clients/pom.xml b/http-clients/pom.xml index de769a390264..f419562d4e00 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 diff --git a/http-clients/url-connection-client/pom.xml b/http-clients/url-connection-client/pom.xml index 3a0d20a6244d..d42aedc048b6 100644 --- a/http-clients/url-connection-client/pom.xml +++ b/http-clients/url-connection-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 diff --git a/metric-publishers/cloudwatch-metric-publisher/pom.xml b/metric-publishers/cloudwatch-metric-publisher/pom.xml index afbe787d04fd..555b170e7c18 100644 --- a/metric-publishers/cloudwatch-metric-publisher/pom.xml +++ b/metric-publishers/cloudwatch-metric-publisher/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk metric-publishers - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cloudwatch-metric-publisher diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml index f4b97172eb32..9626fe0c02d5 100644 --- a/metric-publishers/pom.xml +++ b/metric-publishers/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT metric-publishers diff --git a/pom.xml b/pom.xml index 12af2a7dec8a..f8a7d441da5a 100644 --- a/pom.xml +++ b/pom.xml @@ -20,7 +20,7 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT pom AWS Java SDK :: Parent The Amazon Web Services SDK for Java provides Java APIs @@ -68,6 +68,7 @@ codegen-lite codegen-lite-maven-plugin archetypes + third-party test/http-client-tests test/protocol-tests test/protocol-tests-core @@ -80,18 +81,18 @@ test/stability-tests test/sdk-native-image-test test/s3-benchmarks + test/auth-sts-testing https://github.com/aws/aws-sdk-java-v2.git ${project.version} - 2.16.100 + 2.17.15 2.12.3 2.12.3 2.12.3 1.0.1 - 1.2.0 3.12.0 2.18.0 1.7.30 @@ -518,14 +519,9 @@ true *.internal.* - software.amazon.awssdk.core.client.handler.ClientExecutionParams - software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute - software.amazon.awssdk.awscore.eventstream.* - software.amazon.awssdk.protocols.json.BaseAwsJsonProtocolFactory - software.amazon.awssdk.services.macie2.model.* - - software.amazon.awssdk.services.devopsguru.* + + software.amazon.awssdk.services.s3.checksums.ChecksumValidatingInputStream diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index 337263ec47ac..cac0ec068fd3 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../pom.xml release-scripts diff --git a/scripts/doc_crosslinks/generate_cross_link_data.py b/scripts/doc_crosslinks/generate_cross_link_data.py index 6a3d56453669..cf5ce388f93e 100644 --- a/scripts/doc_crosslinks/generate_cross_link_data.py +++ b/scripts/doc_crosslinks/generate_cross_link_data.py @@ -4,7 +4,8 @@ import codecs import json import re -# import requests +import pathlib +from pathlib import Path from os import listdir from os.path import isdir, exists, join from re import split @@ -14,22 +15,17 @@ def generateDocsMap(apiDefinitionsPath, apiDefinitionsRelativeFilePath): - filesInDir = [f for f in listdir(apiDefinitionsPath) if isdir(join(apiDefinitionsPath, f))] - for file in filesInDir : - serviceJsonFileName = join(apiDefinitionsPath, join(file, apiDefinitionsRelativeFilePath)) - - if(exists(serviceJsonFileName)) : - with codecs.open(serviceJsonFileName, 'rb', 'utf-8') as api_definition: - api_content = json.loads(api_definition.read()) - if "uid" in api_content["metadata"].keys(): - sdks[api_content["metadata"]["uid"]] = file - clientClass[api_content["metadata"]["uid"]] = getClientClassNameFromMetadata(api_content["metadata"]) - -# # Below code can be used for debugging failing clients -# str = "https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/services/"+ file +"/"+getClientClassNameFromMetadata(api_content["metadata"])+".html"+"#validateTemplate--" -# ret = requests.head(str) -# if( ret.status_code != 200 ): -# print(str) + rootPath = pathlib.Path(r'./services') + for serviceModelPaths in rootPath.rglob('service-2.json'): + tokenizePath = str(Path(serviceModelPaths).parent).split("/") + getServiceName = tokenizePath[len(tokenizePath)-1] + if (getServiceName == "codegen-resources"): + getServiceName = str(serviceModelPaths).split("services/")[1].split("/src/main/resources")[0] + with codecs.open(serviceModelPaths, 'rb', 'utf-8') as apiDefinition: + apiContent = json.loads(apiDefinition.read()) + if "uid" in apiContent["metadata"].keys(): + sdks[apiContent["metadata"]["uid"]] = getServiceName + clientClass[apiContent["metadata"]["uid"]] = getClientClassNameFromMetadata(apiContent["metadata"]) return sdks @@ -110,11 +106,10 @@ def Main(): argMap = {} argMap[ "apiDefinitionsBasePath" ] = args[ "apiDefinitionsBasePath" ] or "./../services/" argMap[ "apiDefinitionsRelativeFilePath" ] = args[ "apiDefinitionsRelativeFilePath" ] or "/src/main/resources/codegen-resources/service-2.json" - argMap[ "templateFilePath" ] = args[ "templateFilePath" ] or "./scripts/crosslink_redirect.html" + argMap[ "templateFilePath" ] = args[ "templateFilePath" ] or "./scripts/doc_crosslinks/crosslink_redirect.html" argMap[ "outputFilePath" ] = args[ "outputFilePath" ] or "./crosslink_redirect.html" insertDocsMapToRedirect(argMap["apiDefinitionsBasePath"], argMap["apiDefinitionsRelativeFilePath"], argMap["templateFilePath"], argMap["outputFilePath"]) print("Generated Cross link at " + argMap["outputFilePath"]) -Main() - \ No newline at end of file +Main() diff --git a/services-custom/dynamodb-enhanced/pom.xml b/services-custom/dynamodb-enhanced/pom.xml index 75206dd2b630..2ef4d44cf405 100644 --- a/services-custom/dynamodb-enhanced/pom.xml +++ b/services-custom/dynamodb-enhanced/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services-custom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT dynamodb-enhanced AWS Java SDK :: DynamoDB :: Enhanced Client diff --git a/services-custom/pom.xml b/services-custom/pom.xml index 561340f0d5e9..c2ad9e6fc71e 100644 --- a/services-custom/pom.xml +++ b/services-custom/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT services-custom AWS Java SDK :: Custom Services diff --git a/services-custom/s3-transfer-manager/pom.xml b/services-custom/s3-transfer-manager/pom.xml index 6a9dde7bbf0e..6dd6ed613c40 100644 --- a/services-custom/s3-transfer-manager/pom.xml +++ b/services-custom/s3-transfer-manager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml s3-transfer-manager diff --git a/services/accessanalyzer/pom.xml b/services/accessanalyzer/pom.xml index cc51c57b5f38..430f0bd68d78 100644 --- a/services/accessanalyzer/pom.xml +++ b/services/accessanalyzer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT accessanalyzer AWS Java SDK :: Services :: AccessAnalyzer diff --git a/services/acm/pom.xml b/services/acm/pom.xml index 0792f54285be..6373796cc9e1 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT acm AWS Java SDK :: Services :: AWS Certificate Manager diff --git a/services/acm/src/main/resources/codegen-resources/service-2.json b/services/acm/src/main/resources/codegen-resources/service-2.json index 93adef55ae57..5d964fe9d822 100644 --- a/services/acm/src/main/resources/codegen-resources/service-2.json +++ b/services/acm/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Adds one or more tags to an ACM certificate. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key and an optional value. You specify the certificate on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair.

You can apply a tag to just one certificate if you want to identify a specific characteristic of that certificate, or you can apply the same tag to multiple certificates if you want to filter for a common relationship among those certificates. Similarly, you can apply the same tag to multiple resources if you want to specify a relationship among those resources. For example, you can add the same tag to an ACM certificate and an Elastic Load Balancing load balancer to indicate that they are both used by the same website. For more information, see Tagging ACM certificates.

To remove one or more tags, use the RemoveTagsFromCertificate action. To view all of the tags that have been applied to the certificate, use the ListTagsForCertificate action.

" + "documentation":"

Adds one or more tags to an ACM certificate. Tags are labels that you can use to identify and organize your Amazon Web Services resources. Each tag consists of a key and an optional value. You specify the certificate on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair.

You can apply a tag to just one certificate if you want to identify a specific characteristic of that certificate, or you can apply the same tag to multiple certificates if you want to filter for a common relationship among those certificates. Similarly, you can apply the same tag to multiple resources if you want to specify a relationship among those resources. For example, you can add the same tag to an ACM certificate and an Elastic Load Balancing load balancer to indicate that they are both used by the same website. For more information, see Tagging ACM certificates.

To remove one or more tags, use the RemoveTagsFromCertificate action. To view all of the tags that have been applied to the certificate, use the ListTagsForCertificate action.

" }, "DeleteCertificate":{ "name":"DeleteCertificate", @@ -43,7 +43,7 @@ {"shape":"ResourceInUseException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Deletes a certificate and its associated private key. If this action succeeds, the certificate no longer appears in the list that can be displayed by calling the ListCertificates action or be retrieved by calling the GetCertificate action. The certificate will not be available for use by AWS services integrated with ACM.

You cannot delete an ACM certificate that is being used by another AWS service. To delete a certificate that is in use, the certificate association must first be removed.

" + "documentation":"

Deletes a certificate and its associated private key. If this action succeeds, the certificate no longer appears in the list that can be displayed by calling the ListCertificates action or be retrieved by calling the GetCertificate action. The certificate will not be available for use by Amazon Web Services services integrated with ACM.

You cannot delete an ACM certificate that is being used by another Amazon Web Services service. To delete a certificate that is in use, the certificate association must first be removed.

" }, "DescribeCertificate":{ "name":"DescribeCertificate", @@ -85,7 +85,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns the account configuration options associated with an AWS account.

" + "documentation":"

Returns the account configuration options associated with an Amazon Web Services account.

" }, "GetCertificate":{ "name":"GetCertificate", @@ -119,7 +119,7 @@ {"shape":"InvalidParameterException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Imports a certificate into AWS Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the AWS Certificate Manager User Guide.

ACM does not provide managed renewal for certificates that you import.

Note the following guidelines when importing third party certificates:

  • You must enter the private key that matches the certificate you are importing.

  • The private key must be unencrypted. You cannot import a private key that is protected by a password or a passphrase.

  • The private key must be no larger than 5 KB (5,120 bytes).

  • If the certificate you are importing is not self-signed, you must enter its certificate chain.

  • If a certificate chain is included, the issuer must be the subject of one of the certificates in the chain.

  • The certificate, private key, and certificate chain must be PEM-encoded.

  • The current time must be between the Not Before and Not After certificate fields.

  • The Issuer field must not be empty.

  • The OCSP authority URL, if present, must not exceed 1000 characters.

  • To import a new certificate, omit the CertificateArn argument. Include this argument only when you want to replace a previously imported certificate.

  • When you import a certificate by using the CLI, you must specify the certificate, the certificate chain, and the private key by their file names preceded by fileb://. For example, you can specify a certificate saved in the C:\\temp folder as fileb://C:\\temp\\certificate_to_import.pem. If you are making an HTTP or HTTPS Query request, include these arguments as BLOBs.

  • When you import a certificate by using an SDK, you must specify the certificate, the certificate chain, and the private key files in the manner required by the programming language you're using.

  • The cryptographic algorithm of an imported certificate must match the algorithm of the signing CA. For example, if the signing CA key type is RSA, then the certificate key type must also be RSA.

This operation returns the Amazon Resource Name (ARN) of the imported certificate.

" + "documentation":"

Imports a certificate into Amazon Web Services Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the Amazon Web Services Certificate Manager User Guide.

ACM does not provide managed renewal for certificates that you import.

Note the following guidelines when importing third party certificates:

  • You must enter the private key that matches the certificate you are importing.

  • The private key must be unencrypted. You cannot import a private key that is protected by a password or a passphrase.

  • The private key must be no larger than 5 KB (5,120 bytes).

  • If the certificate you are importing is not self-signed, you must enter its certificate chain.

  • If a certificate chain is included, the issuer must be the subject of one of the certificates in the chain.

  • The certificate, private key, and certificate chain must be PEM-encoded.

  • The current time must be between the Not Before and Not After certificate fields.

  • The Issuer field must not be empty.

  • The OCSP authority URL, if present, must not exceed 1000 characters.

  • To import a new certificate, omit the CertificateArn argument. Include this argument only when you want to replace a previously imported certificate.

  • When you import a certificate by using the CLI, you must specify the certificate, the certificate chain, and the private key by their file names preceded by fileb://. For example, you can specify a certificate saved in the C:\\temp folder as fileb://C:\\temp\\certificate_to_import.pem. If you are making an HTTP or HTTPS Query request, include these arguments as BLOBs.

  • When you import a certificate by using an SDK, you must specify the certificate, the certificate chain, and the private key files in the manner required by the programming language you're using.

  • The cryptographic algorithm of an imported certificate must match the algorithm of the signing CA. For example, if the signing CA key type is RSA, then the certificate key type must also be RSA.

This operation returns the Amazon Resource Name (ARN) of the imported certificate.

" }, "ListCertificates":{ "name":"ListCertificates", @@ -210,7 +210,7 @@ {"shape":"TagPolicyException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Requests an ACM certificate for use with other AWS services. To request an ACM certificate, you must specify a fully qualified domain name (FQDN) in the DomainName parameter. You can also specify additional FQDNs in the SubjectAlternativeNames parameter.

If you are requesting a private certificate, domain validation is not required. If you are requesting a public certificate, each domain name that you specify must be validated to verify that you own or control the domain. You can use DNS validation or email validation. We recommend that you use DNS validation. ACM issues public certificates after receiving approval from the domain owner.

" + "documentation":"

Requests an ACM certificate for use with other Amazon Web Services services. To request an ACM certificate, you must specify a fully qualified domain name (FQDN) in the DomainName parameter. You can also specify additional FQDNs in the SubjectAlternativeNames parameter.

If you are requesting a private certificate, domain validation is not required. If you are requesting a public certificate, each domain name that you specify must be validated to verify that you own or control the domain. You can use DNS validation or email validation. We recommend that you use DNS validation. ACM issues public certificates after receiving approval from the domain owner.

ACM behavior differs from the https://tools.ietf.org/html/rfc6125#appendix-B.2RFC 6125 specification of the certificate validation process. first checks for a subject alternative name, and, if it finds one, ignores the common name (CN)

" }, "ResendValidationEmail":{ "name":"ResendValidationEmail", @@ -304,7 +304,7 @@ "members":{ "CertificateArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the certificate. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the certificate. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

" }, "DomainName":{ "shape":"DomainNameString", @@ -372,15 +372,15 @@ }, "InUseBy":{ "shape":"InUseList", - "documentation":"

A list of ARNs for the AWS resources that are using the certificate. A certificate can be used by multiple AWS resources.

" + "documentation":"

A list of ARNs for the Amazon Web Services resources that are using the certificate. A certificate can be used by multiple Amazon Web Services resources.

" }, "FailureReason":{ "shape":"FailureReason", - "documentation":"

The reason the certificate request failed. This value exists only when the certificate status is FAILED. For more information, see Certificate Request Failed in the AWS Certificate Manager User Guide.

" + "documentation":"

The reason the certificate request failed. This value exists only when the certificate status is FAILED. For more information, see Certificate Request Failed in the Amazon Web Services Certificate Manager User Guide.

" }, "Type":{ "shape":"CertificateType", - "documentation":"

The source of the certificate. For certificates provided by ACM, this value is AMAZON_ISSUED. For certificates that you imported with ImportCertificate, this value is IMPORTED. ACM does not provide managed renewal for imported certificates. For more information about the differences between certificates that you import and those that ACM provides, see Importing Certificates in the AWS Certificate Manager User Guide.

" + "documentation":"

The source of the certificate. For certificates provided by ACM, this value is AMAZON_ISSUED. For certificates that you imported with ImportCertificate, this value is IMPORTED. ACM does not provide managed renewal for imported certificates. For more information about the differences between certificates that you import and those that ACM provides, see Importing Certificates in the Amazon Web Services Certificate Manager User Guide.

" }, "RenewalSummary":{ "shape":"RenewalSummary", @@ -594,7 +594,7 @@ "documentation":"

Specifies the number of days prior to certificate expiration when ACM starts generating EventBridge events. ACM sends one event per day per certificate until the certificate expires. By default, accounts receive events starting 45 days before certificate expiration.

" } }, - "documentation":"

Object containing expiration events options associated with an AWS account.

" + "documentation":"

Object containing expiration events options associated with an Amazon Web Services account.

" }, "ExportCertificateRequest":{ "type":"structure", @@ -714,7 +714,7 @@ "members":{ "ExpiryEvents":{ "shape":"ExpiryEventsConfiguration", - "documentation":"

Expiration events configuration options associated with the AWS account.

" + "documentation":"

Expiration events configuration options associated with the Amazon Web Services account.

" } } }, @@ -840,8 +840,9 @@ "KeyAlgorithm":{ "type":"string", "enum":[ - "RSA_2048", "RSA_1024", + "RSA_2048", + "RSA_3072", "RSA_4096", "EC_prime256v1", "EC_secp384r1", @@ -1099,7 +1100,7 @@ }, "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the private certificate authority (CA) that will be used to issue the certificate. If you do not provide an ARN and you are trying to request a private certificate, ACM will attempt to issue a public certificate. For more information about private CAs, see the AWS Certificate Manager Private Certificate Authority (PCA) user guide. The ARN must have the following form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + "documentation":"

The Amazon Resource Name (ARN) of the private certificate authority (CA) that will be used to issue the certificate. If you do not provide an ARN and you are trying to request a private certificate, ACM will attempt to issue a public certificate. For more information about private CAs, see the Amazon Web Services Certificate Manager Private Certificate Authority (PCA) user guide. The ARN must have the following form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" }, "Tags":{ "shape":"TagList", @@ -1151,7 +1152,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

The certificate is in use by another AWS service in the caller's account. Remove the association and try again.

", + "documentation":"

The certificate is in use by another Amazon Web Services service in the caller's account. Remove the association and try again.

", "exception":true }, "ResourceNotFoundException":{ @@ -1183,7 +1184,7 @@ "documentation":"

The value of the CNAME record to add to your DNS database. This is supplied by ACM.

" } }, - "documentation":"

Contains a DNS record value that you can use to can use to validate ownership or control of a domain. This is used by the DescribeCertificate action.

" + "documentation":"

Contains a DNS record value that you can use to validate ownership or control of a domain. This is used by the DescribeCertificate action.

" }, "RevocationReason":{ "type":"string", @@ -1287,7 +1288,7 @@ "members":{ "message":{"shape":"ValidationExceptionMessage"} }, - "documentation":"

The supplied input failed to satisfy constraints of an AWS service.

", + "documentation":"

The supplied input failed to satisfy constraints of an Amazon Web Services service.

", "exception":true, "synthetic":true }, @@ -1300,5 +1301,5 @@ ] } }, - "documentation":"AWS Certificate Manager

You can use AWS Certificate Manager (ACM) to manage SSL/TLS certificates for your AWS-based websites and applications. For more information about using ACM, see the AWS Certificate Manager User Guide.

" + "documentation":"Amazon Web Services Certificate Manager

You can use Amazon Web Services Certificate Manager (ACM) to manage SSL/TLS certificates for your Amazon Web Services-based websites and applications. For more information about using ACM, see the Amazon Web Services Certificate Manager User Guide.

" } diff --git a/services/acmpca/pom.xml b/services/acmpca/pom.xml index 379c5de26cd1..b6311b4d23f5 100644 --- a/services/acmpca/pom.xml +++ b/services/acmpca/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT acmpca AWS Java SDK :: Services :: ACM PCA diff --git a/services/alexaforbusiness/pom.xml b/services/alexaforbusiness/pom.xml index e3ea900eabcb..dfd7d2ceeb80 100644 --- a/services/alexaforbusiness/pom.xml +++ b/services/alexaforbusiness/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 alexaforbusiness diff --git a/services/amp/pom.xml b/services/amp/pom.xml index 3f6737a14812..c09b91af75bc 100644 --- a/services/amp/pom.xml +++ b/services/amp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT amp AWS Java SDK :: Services :: Amp diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index 3b4a82677c66..f61694c8ac0e 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT amplify AWS Java SDK :: Services :: Amplify diff --git a/services/amplifybackend/pom.xml b/services/amplifybackend/pom.xml index 0c8550d4142c..644e12817bef 100644 --- a/services/amplifybackend/pom.xml +++ b/services/amplifybackend/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT amplifybackend AWS Java SDK :: Services :: Amplify Backend diff --git a/services/amplifybackend/src/main/resources/codegen-resources/service-2.json b/services/amplifybackend/src/main/resources/codegen-resources/service-2.json index 6e9526e9b46d..6e567d590989 100644 --- a/services/amplifybackend/src/main/resources/codegen-resources/service-2.json +++ b/services/amplifybackend/src/main/resources/codegen-resources/service-2.json @@ -1027,7 +1027,7 @@ "ApiName": { "shape": "__string", "locationName": "apiName", - "documentation": "

The API name used to interact with the data model, configured as a part of the amplify project.

" + "documentation": "

The API name used to interact with the data model, configured as a part of your Amplify project.

" }, "ConflictResolution": { "shape": "BackendAPIConflictResolution", @@ -1092,6 +1092,32 @@ "BackendEnvironmentName" ] }, + "BackendAuthAppleProviderConfig": { + "type": "structure", + "members": { + "ClientId": { + "shape": "__string", + "locationName": "client_id", + "documentation": "

Describes the client_id (also called Services ID) that comes from Apple.

" + }, + "KeyId": { + "shape": "__string", + "locationName": "key_id", + "documentation": "

Describes the key_id that comes from Apple.

" + }, + "PrivateKey": { + "shape": "__string", + "locationName": "private_key", + "documentation": "

Describes the private_key that comes from Apple.

" + }, + "TeamId": { + "shape": "__string", + "locationName": "team_id", + "documentation": "

Describes the team_id that comes from Apple.

" + } + }, + "documentation": "

Describes Apple social federation configurations for allowing your app users to sign in using OAuth.

" + }, "BackendAuthRespObj": { "type": "structure", "members": { @@ -1138,12 +1164,12 @@ "ClientId": { "shape": "__string", "locationName": "client_id", - "documentation": "

Describes the client_id which can be obtained from the third-party social federation provider.

" + "documentation": "

Describes the client_id, which can be obtained from the third-party social federation provider.

" }, "ClientSecret": { "shape": "__string", "locationName": "client_secret", - "documentation": "

Describes the client_secret which can be obtained from third-party social federation providers.

" + "documentation": "

Describes the client_secret, which can be obtained from third-party social federation providers.

" } }, "documentation": "

Describes third-party social federation configurations for allowing your app users to sign in using OAuth.

" @@ -1180,7 +1206,7 @@ "Operation": { "shape": "__string", "locationName": "operation", - "documentation": "

Filters the list of response objects to only include those with the specified operation name.

" + "documentation": "

Filters the list of response objects to include only those with the specified operation name.

" }, "Status": { "shape": "__string", @@ -1472,7 +1498,7 @@ "DeliveryMethod": { "shape": "DeliveryMethod", "locationName": "deliveryMethod", - "documentation": "

Describes which mode to use (either SMS or email) to deliver messages to app users that want to recover their password.

" + "documentation": "

Describes which mode to use (either SMS or email) to deliver messages to app users who want to recover their password.

" }, "EmailSettings": { "shape": "EmailSettings", @@ -1515,7 +1541,7 @@ "members": { "MFAMode": { "shape": "MFAMode", - "documentation": "

Describes whether MFA should be [ON, OFF, OPTIONAL] for authentication in your Amplify project.

" + "documentation": "

Describes whether MFA should be [ON, OFF, or OPTIONAL] for authentication in your Amplify project.

" }, "Settings": { "shape": "Settings", @@ -1523,7 +1549,7 @@ "documentation": "

Describes the configuration settings and methods for your Amplify app users to use MFA.

" } }, - "documentation": "

Describes whether multi-factor authentication policies should be applied for your Amazon Cognito user pool configured as a part of your Amplify project.

", + "documentation": "

Describes whether to apply multi-factor authentication policies for your Amazon Cognito user pool configured as a part of your Amplify project.

", "required": [ "MFAMode" ] @@ -1554,7 +1580,7 @@ "RedirectSignOutURIs": { "shape": "ListOf__string", "locationName": "redirectSignOutURIs", - "documentation": "

Redirect URLs used by OAuth when a user signs out of an Amplify app.

" + "documentation": "

Redirect URLs that OAuth uses when a user signs out of an Amplify app.

" }, "SocialProviderSettings": { "shape": "SocialProviderSettings", @@ -1725,7 +1751,7 @@ "Mfa": { "shape": "CreateBackendAuthMFAConfig", "locationName": "mfa", - "documentation": "

Describes whether multi-factor authentication policies should be applied for your Amazon Cognito user pool configured as a part of your Amplify project.

" + "documentation": "

Describes whether to apply multi-factor authentication policies for your Amazon Cognito user pool configured as a part of your Amplify project.

" }, "OAuth": { "shape": "CreateBackendAuthOAuthConfig", @@ -3484,6 +3510,9 @@ }, "LoginWithAmazon": { "shape": "BackendAuthSocialProviderConfig" + }, + "SignInWithApple": { + "shape": "BackendAuthAppleProviderConfig" } }, "documentation": "

The settings for using the social identity providers for access to your Amplify app.

" @@ -3610,7 +3639,7 @@ "UnauthenticatedLogin": { "shape": "__boolean", "locationName": "unauthenticatedLogin", - "documentation": "

A boolean value which can be set to allow or disallow guest-level authorization into your Amplify app.

" + "documentation": "

A boolean value that can be set to allow or disallow guest-level authorization into your Amplify app.

" } }, "documentation": "

Describes the authorization configuration for the Amazon Cognito identity pool, provisioned as a part of your auth resource in the Amplify project.

" @@ -3651,12 +3680,12 @@ "RedirectSignInURIs": { "shape": "ListOf__string", "locationName": "redirectSignInURIs", - "documentation": "

Redirect URLs used by OAuth when a user signs in to an Amplify app.

" + "documentation": "

Redirect URLs that OAuth uses when a user signs in to an Amplify app.

" }, "RedirectSignOutURIs": { "shape": "ListOf__string", "locationName": "redirectSignOutURIs", - "documentation": "

Redirect URLs used by OAuth when a user signs out of an Amplify app.

" + "documentation": "

Redirect URLs that OAuth uses when a user signs out of an Amplify app.

" }, "SocialProviderSettings": { "shape": "SocialProviderSettings", @@ -3813,7 +3842,7 @@ "Mfa": { "shape": "UpdateBackendAuthMFAConfig", "locationName": "mfa", - "documentation": "

Describes whether multi-factor authentication policies should be applied for your Amazon Cognito user pool configured as a part of your Amplify project.

" + "documentation": "

Describes whether to apply multi-factor authentication policies for your Amazon Cognito user pool configured as a part of your Amplify project.

" }, "OAuth": { "shape": "UpdateBackendAuthOAuthConfig", @@ -3908,7 +3937,7 @@ "Operation": { "shape": "__string", "locationName": "operation", - "documentation": "

Filters the list of response objects to only include those with the specified operation name.

" + "documentation": "

Filters the list of response objects to include only those with the specified operation name.

" }, "Status": { "shape": "__string", diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index 41a0a630a81e..ad5bc2593bec 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT apigateway AWS Java SDK :: Services :: Amazon API Gateway diff --git a/services/apigatewaymanagementapi/pom.xml b/services/apigatewaymanagementapi/pom.xml index 779b0e111073..2caef89c81b9 100644 --- a/services/apigatewaymanagementapi/pom.xml +++ b/services/apigatewaymanagementapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT apigatewaymanagementapi AWS Java SDK :: Services :: ApiGatewayManagementApi diff --git a/services/apigatewayv2/pom.xml b/services/apigatewayv2/pom.xml index 94ae58ace392..1a94c7cef192 100644 --- a/services/apigatewayv2/pom.xml +++ b/services/apigatewayv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT apigatewayv2 AWS Java SDK :: Services :: ApiGatewayV2 diff --git a/services/appconfig/pom.xml b/services/appconfig/pom.xml index 902405a7505f..861f7e0d0a3f 100644 --- a/services/appconfig/pom.xml +++ b/services/appconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT appconfig AWS Java SDK :: Services :: AppConfig diff --git a/services/appflow/pom.xml b/services/appflow/pom.xml index 05aac49b7038..3641b2c4bf87 100644 --- a/services/appflow/pom.xml +++ b/services/appflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT appflow AWS Java SDK :: Services :: Appflow diff --git a/services/appintegrations/pom.xml b/services/appintegrations/pom.xml index 374babb67830..b7a48f77a6cb 100644 --- a/services/appintegrations/pom.xml +++ b/services/appintegrations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT appintegrations AWS Java SDK :: Services :: App Integrations diff --git a/services/appintegrations/src/main/resources/codegen-resources/service-2.json b/services/appintegrations/src/main/resources/codegen-resources/service-2.json index a35f0586f70b..ca3d4c29973e 100644 --- a/services/appintegrations/src/main/resources/codegen-resources/service-2.json +++ b/services/appintegrations/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"InvalidRequestException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Creates an EventIntegration, given a specified name, description, and a reference to an Amazon Eventbridge bus in your account and a partner event source that will push events to that bus. No objects are created in the your account, only metadata that is persisted on the EventIntegration control plane.

" + "documentation":"

Creates an EventIntegration, given a specified name, description, and a reference to an Amazon EventBridge bus in your account and a partner event source that pushes events to that bus. No objects are created in the your account, only metadata that is persisted on the EventIntegration control plane.

" }, "DeleteEventIntegration":{ "name":"DeleteEventIntegration", @@ -45,7 +45,7 @@ {"shape":"InvalidRequestException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Deletes the specified existing event integration. If the event integration is associated with clients, the request is rejected.

" + "documentation":"

Deletes the specified existing event integration. If the event integration is associated with clients, the request is rejected.

" }, "GetEventIntegration":{ "name":"GetEventIntegration", @@ -62,7 +62,7 @@ {"shape":"InvalidRequestException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Return information about the event integration.

" + "documentation":"

Return information about the event integration.

" }, "ListEventIntegrationAssociations":{ "name":"ListEventIntegrationAssociations", @@ -79,7 +79,7 @@ {"shape":"InvalidRequestException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Returns a paginated list of event integration associations in the account.

" + "documentation":"

Returns a paginated list of event integration associations in the account.

" }, "ListEventIntegrations":{ "name":"ListEventIntegrations", @@ -95,7 +95,7 @@ {"shape":"InvalidRequestException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Returns a paginated list of event integrations in the account.

" + "documentation":"

Returns a paginated list of event integrations in the account.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -111,7 +111,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Lists the tags for the specified resource.

" + "documentation":"

Lists the tags for the specified resource.

" }, "TagResource":{ "name":"TagResource", @@ -127,7 +127,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Adds the specified tags to the specified resource.

" + "documentation":"

Adds the specified tags to the specified resource.

" }, "UntagResource":{ "name":"UntagResource", @@ -143,7 +143,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Removes the specified tags from the specified resource.

" + "documentation":"

Removes the specified tags from the specified resource.

" }, "UpdateEventIntegration":{ "name":"UpdateEventIntegration", @@ -160,7 +160,7 @@ {"shape":"InvalidRequestException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Updates the description of an event integration.

" + "documentation":"

Updates the description of an event integration.

" } }, "shapes":{ @@ -212,7 +212,7 @@ }, "EventBridgeBus":{ "shape":"EventBridgeBus", - "documentation":"

The Eventbridge bus.

" + "documentation":"

The EventBridge bus.

" }, "ClientToken":{ "shape":"IdempotencyToken", @@ -287,7 +287,7 @@ "documentation":"

The source of the events.

" } }, - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

The event filter.

" + "documentation":"

The event filter.

" }, "EventIntegration":{ "type":"structure", @@ -310,14 +310,14 @@ }, "EventBridgeBus":{ "shape":"EventBridgeBus", - "documentation":"

The Amazon Eventbridge bus for the event integration.

" + "documentation":"

The Amazon EventBridge bus for the event integration.

" }, "Tags":{ "shape":"TagMap", "documentation":"

The tags.

" } }, - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

The event integration.

" + "documentation":"

The event integration.

" }, "EventIntegrationAssociation":{ "type":"structure", @@ -340,14 +340,14 @@ }, "EventBridgeRuleName":{ "shape":"EventBridgeRuleName", - "documentation":"

The name of the Eventbridge rule.

" + "documentation":"

The name of the EventBridge rule.

" }, "ClientAssociationMetadata":{ "shape":"ClientAssociationMetadata", "documentation":"

The metadata associated with the client.

" } }, - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

The event integration association.

" + "documentation":"

The event integration association.

" }, "EventIntegrationAssociationsList":{ "type":"list", @@ -390,7 +390,7 @@ }, "EventBridgeBus":{ "shape":"EventBridgeBus", - "documentation":"

The Eventbridge bus.

" + "documentation":"

The EventBridge bus.

" }, "EventFilter":{ "shape":"EventFilter", @@ -671,5 +671,5 @@ } } }, - "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

The Amazon AppIntegrations service enables you to configure and reuse connections to external applications.

For information about how you can use external applications with Amazon Connect, see Set up pre-built integrations in the Amazon Connect Administrator Guide.

" + "documentation":"

The Amazon AppIntegrations service enables you to configure and reuse connections to external applications.

For information about how you can use external applications with Amazon Connect, see Set up pre-built integrations in the Amazon Connect Administrator Guide.

" } diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index 7c2ea2b9f76b..1712d30e0f6f 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT applicationautoscaling AWS Java SDK :: Services :: AWS Application Auto Scaling diff --git a/services/applicationcostprofiler/pom.xml b/services/applicationcostprofiler/pom.xml index 5cbbefae747d..353af1318aba 100644 --- a/services/applicationcostprofiler/pom.xml +++ b/services/applicationcostprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT applicationcostprofiler AWS Java SDK :: Services :: Application Cost Profiler diff --git a/services/applicationdiscovery/pom.xml b/services/applicationdiscovery/pom.xml index b226f0f9f389..4af5c0b9d780 100644 --- a/services/applicationdiscovery/pom.xml +++ b/services/applicationdiscovery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT applicationdiscovery AWS Java SDK :: Services :: AWS Application Discovery Service diff --git a/services/applicationinsights/pom.xml b/services/applicationinsights/pom.xml index 963b303bcc16..7de929de9162 100644 --- a/services/applicationinsights/pom.xml +++ b/services/applicationinsights/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT applicationinsights AWS Java SDK :: Services :: Application Insights diff --git a/services/appmesh/pom.xml b/services/appmesh/pom.xml index a39a7e88e2e5..b80a7d19b61a 100644 --- a/services/appmesh/pom.xml +++ b/services/appmesh/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT appmesh AWS Java SDK :: Services :: App Mesh diff --git a/services/apprunner/pom.xml b/services/apprunner/pom.xml index 6a6b10aa73c7..27abf31cf30c 100644 --- a/services/apprunner/pom.xml +++ b/services/apprunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT apprunner AWS Java SDK :: Services :: App Runner diff --git a/services/appstream/pom.xml b/services/appstream/pom.xml index 5b8be6f94c26..ac935035f7e5 100644 --- a/services/appstream/pom.xml +++ b/services/appstream/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT appstream AWS Java SDK :: Services :: Amazon AppStream diff --git a/services/appsync/pom.xml b/services/appsync/pom.xml index 0e557e2fc8b1..8309650e1c49 100644 --- a/services/appsync/pom.xml +++ b/services/appsync/pom.xml @@ -21,7 +21,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT appsync diff --git a/services/appsync/src/main/resources/codegen-resources/service-2.json b/services/appsync/src/main/resources/codegen-resources/service-2.json index 1584e66d85ed..f0b58d7844f5 100644 --- a/services/appsync/src/main/resources/codegen-resources/service-2.json +++ b/services/appsync/src/main/resources/codegen-resources/service-2.json @@ -716,7 +716,7 @@ "members":{ "authenticationType":{ "shape":"AuthenticationType", - "documentation":"

The authentication type: API key, AWS IAM, OIDC, or Amazon Cognito user pools.

" + "documentation":"

The authentication type: API key, Identity and Access Management, OIDC, or Amazon Cognito user pools.

" }, "openIDConnectConfig":{ "shape":"OpenIDConnectConfig", @@ -725,6 +725,10 @@ "userPoolConfig":{ "shape":"CognitoUserPoolConfig", "documentation":"

The Amazon Cognito user pool configuration.

" + }, + "lambdaAuthorizerConfig":{ + "shape":"LambdaAuthorizerConfig", + "documentation":"

Configuration for AWS Lambda function authorization.

" } }, "documentation":"

Describes an additional authentication provider.

" @@ -820,7 +824,7 @@ "documentation":"

The time after which the API key is deleted. The date is represented as seconds since the epoch, rounded down to the nearest hour.

" } }, - "documentation":"

Describes an API key.

Customers invoke AWS AppSync GraphQL API operations with API keys as an identity mechanism. There are two key versions:

da1: This version was introduced at launch in November 2017. These keys always expire after 7 days. Key expiration is managed by Amazon DynamoDB TTL. The keys ceased to be valid after February 21, 2018 and should not be used after that date.

  • ListApiKeys returns the expiration time in milliseconds.

  • CreateApiKey returns the expiration time in milliseconds.

  • UpdateApiKey is not available for this key version.

  • DeleteApiKey deletes the item from the table.

  • Expiration is stored in Amazon DynamoDB as milliseconds. This results in a bug where keys are not automatically deleted because DynamoDB expects the TTL to be stored in seconds. As a one-time action, we will delete these keys from the table after February 21, 2018.

da2: This version was introduced in February 2018 when AppSync added support to extend key expiration.

  • ListApiKeys returns the expiration time and deletion time in seconds.

  • CreateApiKey returns the expiration time and deletion time in seconds and accepts a user-provided expiration time in seconds.

  • UpdateApiKey returns the expiration time and and deletion time in seconds and accepts a user-provided expiration time in seconds. Expired API keys are kept for 60 days after the expiration time. Key expiration time can be updated while the key is not deleted.

  • DeleteApiKey deletes the item from the table.

  • Expiration is stored in Amazon DynamoDB as seconds. After the expiration time, using the key to authenticate will fail. But the key can be reinstated before deletion.

  • Deletion is stored in Amazon DynamoDB as seconds. The key will be deleted after deletion time.

" + "documentation":"

Describes an API key.

Customers invoke AppSync GraphQL API operations with API keys as an identity mechanism. There are two key versions:

da1: This version was introduced at launch in November 2017. These keys always expire after 7 days. Key expiration is managed by Amazon DynamoDB TTL. The keys ceased to be valid after February 21, 2018 and should not be used after that date.

  • ListApiKeys returns the expiration time in milliseconds.

  • CreateApiKey returns the expiration time in milliseconds.

  • UpdateApiKey is not available for this key version.

  • DeleteApiKey deletes the item from the table.

  • Expiration is stored in Amazon DynamoDB as milliseconds. This results in a bug where keys are not automatically deleted because DynamoDB expects the TTL to be stored in seconds. As a one-time action, we will delete these keys from the table after February 21, 2018.

da2: This version was introduced in February 2018 when AppSync added support to extend key expiration.

  • ListApiKeys returns the expiration time and deletion time in seconds.

  • CreateApiKey returns the expiration time and deletion time in seconds and accepts a user-provided expiration time in seconds.

  • UpdateApiKey returns the expiration time and and deletion time in seconds and accepts a user-provided expiration time in seconds. Expired API keys are kept for 60 days after the expiration time. Key expiration time can be updated while the key is not deleted.

  • DeleteApiKey deletes the item from the table.

  • Expiration is stored in Amazon DynamoDB as seconds. After the expiration time, using the key to authenticate will fail. But the key can be reinstated before deletion.

  • Deletion is stored in Amazon DynamoDB as seconds. The key will be deleted after deletion time.

" }, "ApiKeyLimitExceededException":{ "type":"structure", @@ -859,7 +863,8 @@ "API_KEY", "AWS_IAM", "AMAZON_COGNITO_USER_POOLS", - "OPENID_CONNECT" + "OPENID_CONNECT", + "AWS_LAMBDA" ] }, "AuthorizationConfig":{ @@ -872,7 +877,7 @@ }, "awsIamConfig":{ "shape":"AwsIamConfig", - "documentation":"

The AWS IAM settings.

" + "documentation":"

The Identity and Access Management settings.

" } }, "documentation":"

The authorization config in case the HTTP endpoint requires authorization.

" @@ -886,14 +891,14 @@ "members":{ "signingRegion":{ "shape":"String", - "documentation":"

The signing region for AWS IAM authorization.

" + "documentation":"

The signing region for Identity and Access Management authorization.

" }, "signingServiceName":{ "shape":"String", - "documentation":"

The signing service name for AWS IAM authorization.

" + "documentation":"

The signing service name for Identity and Access Management authorization.

" } }, - "documentation":"

The AWS IAM configuration.

" + "documentation":"

The Identity and Access Management configuration.

" }, "BadRequestException":{ "type":"structure", @@ -938,7 +943,7 @@ }, "awsRegion":{ "shape":"String", - "documentation":"

The AWS Region in which the user pool was created.

" + "documentation":"

The Amazon Web Services Region in which the user pool was created.

" }, "appIdClientRegex":{ "shape":"String", @@ -1077,7 +1082,7 @@ }, "serviceRoleArn":{ "shape":"String", - "documentation":"

The AWS IAM service role ARN for the data source. The system assumes this role when accessing the data source.

" + "documentation":"

The Identity and Access Management service role ARN for the data source. The system assumes this role when accessing the data source.

" }, "dynamodbConfig":{ "shape":"DynamodbDataSourceConfig", @@ -1085,7 +1090,7 @@ }, "lambdaConfig":{ "shape":"LambdaDataSourceConfig", - "documentation":"

AWS Lambda settings.

" + "documentation":"

Amazon Web Services Lambda settings.

" }, "elasticsearchConfig":{ "shape":"ElasticsearchDataSourceConfig", @@ -1178,7 +1183,7 @@ }, "authenticationType":{ "shape":"AuthenticationType", - "documentation":"

The authentication type: API key, AWS IAM, OIDC, or Amazon Cognito user pools.

" + "documentation":"

The authentication type: API key, Identity and Access Management, OIDC, or Amazon Cognito user pools.

" }, "userPoolConfig":{ "shape":"UserPoolConfig", @@ -1199,6 +1204,10 @@ "xrayEnabled":{ "shape":"Boolean", "documentation":"

A flag indicating whether to enable X-Ray tracing for the GraphqlApi.

" + }, + "lambdaAuthorizerConfig":{ + "shape":"LambdaAuthorizerConfig", + "documentation":"

Configuration for AWS Lambda function authorization.

" } } }, @@ -1324,11 +1333,11 @@ }, "type":{ "shape":"DataSourceType", - "documentation":"

The type of the data source.

  • AMAZON_DYNAMODB: The data source is an Amazon DynamoDB table.

  • AMAZON_ELASTICSEARCH: The data source is an Amazon Elasticsearch Service domain.

  • AWS_LAMBDA: The data source is an AWS Lambda function.

  • NONE: There is no data source. This type is used when you wish to invoke a GraphQL operation without connecting to a data source, such as performing data transformation with resolvers or triggering a subscription to be invoked from a mutation.

  • HTTP: The data source is an HTTP endpoint.

  • RELATIONAL_DATABASE: The data source is a relational database.

" + "documentation":"

The type of the data source.

  • AMAZON_DYNAMODB: The data source is an Amazon DynamoDB table.

  • AMAZON_ELASTICSEARCH: The data source is an Amazon Elasticsearch Service domain.

  • AWS_LAMBDA: The data source is an Amazon Web Services Lambda function.

  • NONE: There is no data source. This type is used when you wish to invoke a GraphQL operation without connecting to a data source, such as performing data transformation with resolvers or triggering a subscription to be invoked from a mutation.

  • HTTP: The data source is an HTTP endpoint.

  • RELATIONAL_DATABASE: The data source is a relational database.

" }, "serviceRoleArn":{ "shape":"String", - "documentation":"

The AWS IAM service role ARN for the data source. The system assumes this role when accessing the data source.

" + "documentation":"

The Identity and Access Management service role ARN for the data source. The system assumes this role when accessing the data source.

" }, "dynamodbConfig":{ "shape":"DynamodbDataSourceConfig", @@ -1336,7 +1345,7 @@ }, "lambdaConfig":{ "shape":"LambdaDataSourceConfig", - "documentation":"

AWS Lambda settings.

" + "documentation":"

Amazon Web Services Lambda settings.

" }, "elasticsearchConfig":{ "shape":"ElasticsearchDataSourceConfig", @@ -1579,7 +1588,7 @@ }, "awsRegion":{ "shape":"String", - "documentation":"

The AWS Region.

" + "documentation":"

The Amazon Web Services Region.

" }, "useCallerCredentials":{ "shape":"Boolean", @@ -1609,7 +1618,7 @@ }, "awsRegion":{ "shape":"String", - "documentation":"

The AWS Region.

" + "documentation":"

The Amazon Web Services Region.

" } }, "documentation":"

Describes an Elasticsearch data source configuration.

" @@ -1987,7 +1996,11 @@ }, "wafWebAclArn":{ "shape":"String", - "documentation":"

The ARN of the AWS Web Application Firewall (WAF) ACL associated with this GraphqlApi, if one exists.

" + "documentation":"

The ARN of the WAF ACL associated with this GraphqlApi, if one exists.

" + }, + "lambdaAuthorizerConfig":{ + "shape":"LambdaAuthorizerConfig", + "documentation":"

Configuration for AWS Lambda function authorization.

" } }, "documentation":"

Describes a GraphQL API.

" @@ -2001,7 +2014,7 @@ "members":{ "endpoint":{ "shape":"String", - "documentation":"

The HTTP URL endpoint. You can either specify the domain name or IP, and port combination, and the URL scheme must be HTTP or HTTPS. If the port is not specified, AWS AppSync uses the default port 80 for the HTTP endpoint and port 443 for HTTPS endpoints.

" + "documentation":"

The HTTP URL endpoint. You can either specify the domain name or IP, and port combination, and the URL scheme must be HTTP or HTTPS. If the port is not specified, AppSync uses the default port 80 for the HTTP endpoint and port 443 for HTTPS endpoints.

" }, "authorizationConfig":{ "shape":"AuthorizationConfig", @@ -2015,11 +2028,30 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

An internal AWS AppSync error occurred. Try your request again.

", + "documentation":"

An internal AppSync error occurred. Try your request again.

", "error":{"httpStatusCode":500}, "exception":true, "fault":true }, + "LambdaAuthorizerConfig":{ + "type":"structure", + "required":["authorizerUri"], + "members":{ + "authorizerResultTtlInSeconds":{ + "shape":"TTL", + "documentation":"

The number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda function can override this by returning a ttlOverride key in its response. A value of 0 disables caching of responses.

" + }, + "authorizerUri":{ + "shape":"String", + "documentation":"

The ARN of the lambda function to be called for authorization. This may be a standard Lambda ARN, a version ARN (.../v3) or alias ARN.

Note: This Lambda function must have the following resource-based policy assigned to it. When configuring Lambda authorizers in the Console, this is done for you. To do so with the AWS CLI, run the following:

aws lambda add-permission --function-name \"arn:aws:lambda:us-east-2:111122223333:function:my-function\" --statement-id \"appsync\" --principal appsync.amazonaws.com --action lambda:InvokeFunction

" + }, + "identityValidationExpression":{ + "shape":"String", + "documentation":"

A regular expression for validation of tokens before the Lambda Function is called.

" + } + }, + "documentation":"

A LambdaAuthorizerConfig holds configuration on how to authorize AppSync API access when using the AWS_LAMBDA authorizer mode. Be aware that an AppSync API may have only one Lambda authorizer configured at a time.

" + }, "LambdaConflictHandlerConfig":{ "type":"structure", "members":{ @@ -2039,7 +2071,7 @@ "documentation":"

The ARN for the Lambda function.

" } }, - "documentation":"

Describes an AWS Lambda data source configuration.

" + "documentation":"

Describes an Amazon Web Services Lambda data source configuration.

" }, "LimitExceededException":{ "type":"structure", @@ -2363,7 +2395,7 @@ }, "cloudWatchLogsRoleArn":{ "shape":"String", - "documentation":"

The service role that AWS AppSync will assume to publish to Amazon CloudWatch logs in your account.

" + "documentation":"

The service role that AppSync will assume to publish to Amazon CloudWatch logs in your account.

" }, "excludeVerboseContent":{ "shape":"Boolean", @@ -2407,7 +2439,7 @@ }, "clientId":{ "shape":"String", - "documentation":"

The client identifier of the Relying party at the OpenID identity provider. This identifier is typically obtained when the Relying party is registered with the OpenID identity provider. You can specify a regular expression so the AWS AppSync can validate against multiple client identifiers at a time.

" + "documentation":"

The client identifier of the Relying party at the OpenID identity provider. This identifier is typically obtained when the Relying party is registered with the OpenID identity provider. You can specify a regular expression so the AppSync can validate against multiple client identifiers at a time.

" }, "iatTTL":{ "shape":"Long", @@ -2448,7 +2480,7 @@ "members":{ "awsRegion":{ "shape":"String", - "documentation":"

AWS Region for RDS HTTP endpoint.

" + "documentation":"

Amazon Web Services Region for RDS HTTP endpoint.

" }, "dbClusterIdentifier":{ "shape":"String", @@ -2464,7 +2496,7 @@ }, "awsSecretStoreArn":{ "shape":"String", - "documentation":"

AWS secret store ARN for database credentials.

" + "documentation":"

Amazon Web Services secret store ARN for database credentials.

" } }, "documentation":"

The Amazon RDS HTTP endpoint configuration.

" @@ -2614,12 +2646,17 @@ }, "documentation":"

Describes a Sync configuration for a resolver.

Contains information on which Conflict Detection as well as Resolution strategy should be performed when the resolver is invoked.

" }, + "TTL":{ + "type":"integer", + "max":3600, + "min":0 + }, "TagKey":{ "type":"string", "documentation":"

The key for the tag.

", "max":128, "min":1, - "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + "pattern":"^(?!aws:)[ a-zA-Z+-=._:/]+$" }, "TagKeyList":{ "type":"list", @@ -2852,7 +2889,7 @@ }, "lambdaConfig":{ "shape":"LambdaDataSourceConfig", - "documentation":"

The new AWS Lambda configuration.

" + "documentation":"

The new Amazon Web Services Lambda configuration.

" }, "elasticsearchConfig":{ "shape":"ElasticsearchDataSourceConfig", @@ -2975,6 +3012,10 @@ "xrayEnabled":{ "shape":"Boolean", "documentation":"

A flag indicating whether to enable X-Ray tracing for the GraphqlApi.

" + }, + "lambdaAuthorizerConfig":{ + "shape":"LambdaAuthorizerConfig", + "documentation":"

Configuration for AWS Lambda function authorization.

" } } }, @@ -3105,7 +3146,7 @@ }, "awsRegion":{ "shape":"String", - "documentation":"

The AWS Region in which the user pool was created.

" + "documentation":"

The Amazon Web Services Region in which the user pool was created.

" }, "defaultAction":{ "shape":"DefaultAction", @@ -3119,5 +3160,5 @@ "documentation":"

Describes an Amazon Cognito user pool configuration.

" } }, - "documentation":"

AWS AppSync provides API actions for creating and interacting with data sources using GraphQL from your application.

" + "documentation":"

AppSync provides API actions for creating and interacting with data sources using GraphQL from your application.

" } diff --git a/services/athena/pom.xml b/services/athena/pom.xml index 3319c6713dce..32bcb788d098 100644 --- a/services/athena/pom.xml +++ b/services/athena/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT athena AWS Java SDK :: Services :: Amazon Athena diff --git a/services/athena/src/main/resources/codegen-resources/service-2.json b/services/athena/src/main/resources/codegen-resources/service-2.json index a8725926e77f..39d03c9dc006 100644 --- a/services/athena/src/main/resources/codegen-resources/service-2.json +++ b/services/athena/src/main/resources/codegen-resources/service-2.json @@ -52,7 +52,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Creates (registers) a data catalog with the specified name and properties. Catalogs created are visible to all users of the same AWS account.

" + "documentation":"

Creates (registers) a data catalog with the specified name and properties. Catalogs created are visible to all users of the same Amazon Web Services account.

" }, "CreateNamedQuery":{ "name":"CreateNamedQuery", @@ -66,7 +66,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Creates a named query in the specified workgroup. Requires that you have access to the workgroup.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", + "documentation":"

Creates a named query in the specified workgroup. Requires that you have access to the workgroup.

For code samples using the Amazon Web Services SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", "idempotent":true }, "CreatePreparedStatement":{ @@ -123,7 +123,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Deletes the named query if you have access to the workgroup in which the query was saved.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", + "documentation":"

Deletes the named query if you have access to the workgroup in which the query was saved.

For code samples using the Amazon Web Services SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", "idempotent":true }, "DeletePreparedStatement":{ @@ -283,7 +283,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Lists the data catalogs in the current AWS account.

" + "documentation":"

Lists the data catalogs in the current Amazon Web Services account.

" }, "ListDatabases":{ "name":"ListDatabases", @@ -326,7 +326,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Provides a list of available query IDs only for queries saved in the specified workgroup. Requires that you have access to the specified workgroup. If a workgroup is not specified, lists the saved queries for the primary workgroup.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" + "documentation":"

Provides a list of available query IDs only for queries saved in the specified workgroup. Requires that you have access to the specified workgroup. If a workgroup is not specified, lists the saved queries for the primary workgroup.

For code samples using the Amazon Web Services SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" }, "ListPreparedStatements":{ "name":"ListPreparedStatements", @@ -354,7 +354,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Provides a list of available query execution IDs for the queries in the specified workgroup. If a workgroup is not specified, returns a list of query execution IDs for the primary workgroup. Requires you to have access to the workgroup in which the queries ran.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" + "documentation":"

Provides a list of available query execution IDs for the queries in the specified workgroup. If a workgroup is not specified, returns a list of query execution IDs for the primary workgroup. Requires you to have access to the workgroup in which the queries ran.

For code samples using the Amazon Web Services SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" }, "ListTableMetadata":{ "name":"ListTableMetadata", @@ -413,7 +413,7 @@ {"shape":"InvalidRequestException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Runs the SQL query statements contained in the Query. Requires you to have access to the workgroup in which the query ran. Running queries against an external catalog requires GetDataCatalog permission to the catalog. For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", + "documentation":"

Runs the SQL query statements contained in the Query. Requires you to have access to the workgroup in which the query ran. Running queries against an external catalog requires GetDataCatalog permission to the catalog. For code samples using the Amazon Web Services SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", "idempotent":true }, "StopQueryExecution":{ @@ -428,7 +428,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Stops a query execution. Requires you to have access to the workgroup in which the query ran.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", + "documentation":"

Stops a query execution. Requires you to have access to the workgroup in which the query ran.

For code samples using the Amazon Web Services SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", "idempotent":true }, "TagResource":{ @@ -669,11 +669,11 @@ "members":{ "Name":{ "shape":"CatalogNameString", - "documentation":"

The name of the data catalog to create. The catalog name must be unique for the AWS account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters.

" + "documentation":"

The name of the data catalog to create. The catalog name must be unique for the Amazon Web Services account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters.

" }, "Type":{ "shape":"DataCatalogType", - "documentation":"

The type of data catalog to create: LAMBDA for a federated catalog or HIVE for an external hive metastore.

Do not use the GLUE type. This refers to the AwsDataCatalog that already exists in your account, of which you can have only one. Specifying the GLUE type will result in an INVALID_INPUT error.

" + "documentation":"

The type of data catalog to create: LAMBDA for a federated catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog.

" }, "Description":{ "shape":"DescriptionString", @@ -681,7 +681,7 @@ }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type.

  • For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version.

    metadata-function=lambda_arn, sdk-version=version_number

  • For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both.

    • If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.

      metadata-function=lambda_arn, record-function=lambda_arn

    • If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.

      function=lambda_arn

" + "documentation":"

Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type.

  • For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version.

    metadata-function=lambda_arn, sdk-version=version_number

  • For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both.

    • If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.

      metadata-function=lambda_arn, record-function=lambda_arn

    • If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.

      function=lambda_arn

  • The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs.

    catalog-id=catalog_id

    • The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify.

    • Queries that specify a Glue Data Catalog other than the default AwsDataCatalog must be run on Athena engine version 2.

    • In Regions where Athena engine version 2 is not available, creating new Glue data catalogs results in an INVALID_INPUT error.

" }, "Tags":{ "shape":"TagList", @@ -720,7 +720,7 @@ }, "ClientRequestToken":{ "shape":"IdempotencyToken", - "documentation":"

A unique case-sensitive string used to ensure the request to create the query is idempotent (executes only once). If another CreateNamedQuery request is received, the same response is returned and another query is not created. If a parameter has changed, for example, the QueryString, an error is returned.

This token is listed as not required because AWS SDKs (for example the AWS SDK for Java) auto-generate the token for users. If you are not using the AWS SDK or the AWS CLI, you must provide this token or the action will fail.

", + "documentation":"

A unique case-sensitive string used to ensure the request to create the query is idempotent (executes only once). If another CreateNamedQuery request is received, the same response is returned and another query is not created. If a parameter has changed, for example, the QueryString, an error is returned.

This token is listed as not required because Amazon Web Services SDKs (for example the Amazon Web Services SDK for Java) auto-generate the token for users. If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide this token or the action will fail.

", "idempotencyToken":true }, "WorkGroup":{ @@ -779,7 +779,7 @@ }, "Configuration":{ "shape":"WorkGroupConfiguration", - "documentation":"

The configuration for the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption configuration, if any, used for encrypting query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup, the limit for the amount of bytes scanned (cutoff) per query, if it is specified, and whether workgroup's settings (specified with EnforceWorkGroupConfiguration) in the WorkGroupConfiguration override client-side settings. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "documentation":"

The configuration for the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption configuration, if any, used for encrypting query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup, the limit for the amount of bytes scanned (cutoff) per query, if it is specified, and whether workgroup's settings (specified with EnforceWorkGroupConfiguration) in the WorkGroupConfiguration override client-side settings. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" }, "Description":{ "shape":"WorkGroupDescriptionString", @@ -805,7 +805,7 @@ "members":{ "Name":{ "shape":"CatalogNameString", - "documentation":"

The name of the data catalog. The catalog name must be unique for the AWS account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters.

" + "documentation":"

The name of the data catalog. The catalog name must be unique for the Amazon Web Services account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters.

" }, "Description":{ "shape":"DescriptionString", @@ -813,14 +813,14 @@ }, "Type":{ "shape":"DataCatalogType", - "documentation":"

The type of data catalog: LAMBDA for a federated catalog or HIVE for an external hive metastore. GLUE refers to the AwsDataCatalog that already exists in your account, of which you can have only one.

" + "documentation":"

The type of data catalog to create: LAMBDA for a federated catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog.

" }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.

  • For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version.

    metadata-function=lambda_arn, sdk-version=version_number

  • For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both.

    • If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.

      metadata-function=lambda_arn, record-function=lambda_arn

    • If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.

      function=lambda_arn

" + "documentation":"

Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.

  • For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version.

    metadata-function=lambda_arn, sdk-version=version_number

  • For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both.

    • If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.

      metadata-function=lambda_arn, record-function=lambda_arn

    • If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.

      function=lambda_arn

  • The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue catalog belongs.

    catalog-id=catalog_id

    • The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify.

    • Queries that specify a Glue Data Catalog other than the default AwsDataCatalog must be run on Athena engine version 2.

" } }, - "documentation":"

Contains information about a data catalog in an AWS account.

" + "documentation":"

Contains information about a data catalog in an Amazon Web Services account.

" }, "DataCatalogSummary":{ "type":"structure", @@ -1151,7 +1151,7 @@ "members":{ "UpdateCount":{ "shape":"Long", - "documentation":"

The number of rows inserted with a CREATE TABLE AS SELECT statement.

" + "documentation":"

The number of rows inserted with a CREATE TABLE AS SELECT statement.

" }, "ResultSet":{ "shape":"ResultSet", @@ -1703,7 +1703,7 @@ }, "StatementType":{ "shape":"StatementType", - "documentation":"

The type of query statement that was run. DDL indicates DDL query statements. DML indicates DML (Data Manipulation Language) query statements, such as CREATE TABLE AS SELECT. UTILITY indicates query statements other than DDL and DML, such as SHOW CREATE TABLE, or DESCRIBE <table>.

" + "documentation":"

The type of query statement that was run. DDL indicates DDL query statements. DML indicates DML (Data Manipulation Language) query statements, such as CREATE TABLE AS SELECT. UTILITY indicates query statements other than DDL and DML, such as SHOW CREATE TABLE, or DESCRIBE TABLE.

" }, "ResultConfiguration":{ "shape":"ResultConfiguration", @@ -1737,7 +1737,7 @@ "members":{ "Database":{ "shape":"DatabaseString", - "documentation":"

The name of the database used in the query execution.

" + "documentation":"

The name of the database used in the query execution. The database must exist in the catalog.

" }, "Catalog":{ "shape":"CatalogNameString", @@ -1856,11 +1856,11 @@ "members":{ "OutputLocation":{ "shape":"String", - "documentation":"

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Query Results If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The \"workgroup settings override\" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "documentation":"

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Query Results If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The \"workgroup settings override\" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" }, "RemoveOutputLocation":{ "shape":"BoxedBoolean", - "documentation":"

If set to \"true\", indicates that the previously-specified query results location (also known as a client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the OutputLocation in ResultConfigurationUpdates (the client-side setting), the OutputLocation in the workgroup's ResultConfiguration will be updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.

" + "documentation":"

If set to \"true\", indicates that the previously-specified query results location (also known as a client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the OutputLocation in ResultConfigurationUpdates (the client-side setting), the OutputLocation in the workgroup's ResultConfiguration will be updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.

" }, "EncryptionConfiguration":{ "shape":"EncryptionConfiguration", @@ -1868,7 +1868,7 @@ }, "RemoveEncryptionConfiguration":{ "shape":"BoxedBoolean", - "documentation":"

If set to \"true\", indicates that the previously-specified encryption configuration (also known as the client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the EncryptionConfiguration in ResultConfigurationUpdates (the client-side setting), the EncryptionConfiguration in the workgroup's ResultConfiguration will be updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.

" + "documentation":"

If set to \"true\", indicates that the previously-specified encryption configuration (also known as the client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the EncryptionConfiguration in ResultConfigurationUpdates (the client-side setting), the EncryptionConfiguration in the workgroup's ResultConfiguration will be updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.

" } }, "documentation":"

The information about the updates in the query results, such as output location and encryption configuration for the query results.

" @@ -1921,7 +1921,7 @@ }, "ClientRequestToken":{ "shape":"IdempotencyToken", - "documentation":"

A unique case-sensitive string used to ensure the request to create the query is idempotent (executes only once). If another StartQueryExecution request is received, the same response is returned and another query is not created. If a parameter has changed, for example, the QueryString, an error is returned.

This token is listed as not required because AWS SDKs (for example the AWS SDK for Java) auto-generate the token for users. If you are not using the AWS SDK or the AWS CLI, you must provide this token or the action will fail.

", + "documentation":"

A unique case-sensitive string used to ensure the request to create the query is idempotent (executes only once). If another StartQueryExecution request is received, the same response is returned and another query is not created. If a parameter has changed, for example, the QueryString, an error is returned.

This token is listed as not required because Amazon Web Services SDKs (for example the Amazon Web Services SDK for Java) auto-generate the token for users. If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide this token or the action will fail.

", "idempotencyToken":true }, "QueryExecutionContext":{ @@ -2176,11 +2176,11 @@ "members":{ "Name":{ "shape":"CatalogNameString", - "documentation":"

The name of the data catalog to update. The catalog name must be unique for the AWS account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters.

" + "documentation":"

The name of the data catalog to update. The catalog name must be unique for the Amazon Web Services account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters.

" }, "Type":{ "shape":"DataCatalogType", - "documentation":"

Specifies the type of data catalog to update. Specify LAMBDA for a federated catalog or HIVE for an external hive metastore.

Do not use the GLUE type. This refers to the AwsDataCatalog that already exists in your account, of which you can have only one. Specifying the GLUE type will result in an INVALID_INPUT error.

" + "documentation":"

Specifies the type of data catalog to update. Specify LAMBDA for a federated catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog.

" }, "Description":{ "shape":"DescriptionString", @@ -2269,7 +2269,7 @@ }, "Configuration":{ "shape":"WorkGroupConfiguration", - "documentation":"

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption configuration, if any, used for query results; whether the Amazon CloudWatch Metrics are enabled for the workgroup; whether workgroup settings override client-side settings; and the data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "documentation":"

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption configuration, if any, used for query results; whether the Amazon CloudWatch Metrics are enabled for the workgroup; whether workgroup settings override client-side settings; and the data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" }, "Description":{ "shape":"WorkGroupDescriptionString", @@ -2280,7 +2280,7 @@ "documentation":"

The date and time the workgroup was created.

" } }, - "documentation":"

A workgroup, which contains a name, description, creation time, state, and other configuration, listed under WorkGroup$Configuration. Each workgroup enables you to isolate queries for you or your group of users from other queries in the same account, to configure the query results location and the encryption configuration (known as workgroup settings), to enable sending query metrics to Amazon CloudWatch, and to establish per-query data usage control limits for all queries in a workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "documentation":"

A workgroup, which contains a name, description, creation time, state, and other configuration, listed under WorkGroup$Configuration. Each workgroup enables you to isolate queries for you or your group of users from other queries in the same account, to configure the query results location and the encryption configuration (known as workgroup settings), to enable sending query metrics to Amazon CloudWatch, and to establish per-query data usage control limits for all queries in a workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" }, "WorkGroupConfiguration":{ "type":"structure", @@ -2310,7 +2310,7 @@ "documentation":"

The engine version that all queries running on the workgroup use. Queries on the AmazonAthenaPreviewFunctionality workgroup run on the preview engine regardless of this setting.

" } }, - "documentation":"

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption option, if any, used for query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup and whether workgroup settings override query settings, and the data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "documentation":"

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption option, if any, used for query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup and whether workgroup settings override query settings, and the data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" }, "WorkGroupConfigurationUpdates":{ "type":"structure", @@ -2400,5 +2400,5 @@ }, "datumString":{"type":"string"} }, - "documentation":"

Amazon Athena is an interactive query service that lets you use standard SQL to analyze data directly in Amazon S3. You can point Athena at your data in Amazon S3 and run ad-hoc queries and get results in seconds. Athena is serverless, so there is no infrastructure to set up or manage. You pay only for the queries you run. Athena scales automatically—executing queries in parallel—so results are fast, even with large datasets and complex queries. For more information, see What is Amazon Athena in the Amazon Athena User Guide.

If you connect to Athena using the JDBC driver, use version 1.1.0 of the driver or later with the Amazon Athena API. Earlier version drivers do not support the API. For more information and to download the driver, see Accessing Amazon Athena with JDBC.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" + "documentation":"

Amazon Athena is an interactive query service that lets you use standard SQL to analyze data directly in Amazon S3. You can point Athena at your data in Amazon S3 and run ad-hoc queries and get results in seconds. Athena is serverless, so there is no infrastructure to set up or manage. You pay only for the queries you run. Athena scales automatically—executing queries in parallel—so results are fast, even with large datasets and complex queries. For more information, see What is Amazon Athena in the Amazon Athena User Guide.

If you connect to Athena using the JDBC driver, use version 1.1.0 of the driver or later with the Amazon Athena API. Earlier version drivers do not support the API. For more information and to download the driver, see Accessing Amazon Athena with JDBC.

For code samples using the Amazon Web Services SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" } diff --git a/services/auditmanager/pom.xml b/services/auditmanager/pom.xml index be32a256587d..12c834a4eab2 100644 --- a/services/auditmanager/pom.xml +++ b/services/auditmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT auditmanager AWS Java SDK :: Services :: Audit Manager diff --git a/services/auditmanager/src/main/resources/codegen-resources/service-2.json b/services/auditmanager/src/main/resources/codegen-resources/service-2.json index 4250b16335f9..aea855fa5096 100644 --- a/services/auditmanager/src/main/resources/codegen-resources/service-2.json +++ b/services/auditmanager/src/main/resources/codegen-resources/service-2.json @@ -26,7 +26,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Associates an evidence folder to the specified assessment report in AWS Audit Manager.

" + "documentation":"

Associates an evidence folder to the specified assessment report in Audit Manager.

" }, "BatchAssociateAssessmentReportEvidence":{ "name":"BatchAssociateAssessmentReportEvidence", @@ -42,7 +42,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Associates a list of evidence to an assessment report in an AWS Audit Manager assessment.

" + "documentation":"

Associates a list of evidence to an assessment report in an Audit Manager assessment.

" }, "BatchCreateDelegationByAssessment":{ "name":"BatchCreateDelegationByAssessment", @@ -58,7 +58,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create a batch of delegations for a specified assessment in AWS Audit Manager.

" + "documentation":"

Create a batch of delegations for a specified assessment in Audit Manager.

" }, "BatchDeleteDelegationByAssessment":{ "name":"BatchDeleteDelegationByAssessment", @@ -74,7 +74,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes the delegations in the specified AWS Audit Manager assessment.

" + "documentation":"

Deletes the delegations in the specified Audit Manager assessment.

" }, "BatchDisassociateAssessmentReportEvidence":{ "name":"BatchDisassociateAssessmentReportEvidence", @@ -90,7 +90,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Disassociates a list of evidence from the specified assessment report in AWS Audit Manager.

" + "documentation":"

Disassociates a list of evidence from the specified assessment report in Audit Manager.

" }, "BatchImportEvidenceToAssessmentControl":{ "name":"BatchImportEvidenceToAssessmentControl", @@ -106,7 +106,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Uploads one or more pieces of evidence to the specified control in the assessment in AWS Audit Manager.

" + "documentation":"

Uploads one or more pieces of evidence to the specified control in the assessment in Audit Manager.

" }, "CreateAssessment":{ "name":"CreateAssessment", @@ -122,7 +122,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates an assessment in AWS Audit Manager.

" + "documentation":"

Creates an assessment in Audit Manager.

" }, "CreateAssessmentFramework":{ "name":"CreateAssessmentFramework", @@ -138,7 +138,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a custom framework in AWS Audit Manager.

" + "documentation":"

Creates a custom framework in Audit Manager.

" }, "CreateAssessmentReport":{ "name":"CreateAssessmentReport", @@ -170,7 +170,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a new custom control in AWS Audit Manager.

" + "documentation":"

Creates a new custom control in Audit Manager.

" }, "DeleteAssessment":{ "name":"DeleteAssessment", @@ -186,7 +186,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes an assessment in AWS Audit Manager.

" + "documentation":"

Deletes an assessment in Audit Manager.

" }, "DeleteAssessmentFramework":{ "name":"DeleteAssessmentFramework", @@ -202,7 +202,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes a custom framework in AWS Audit Manager.

" + "documentation":"

Deletes a custom framework in Audit Manager.

" }, "DeleteAssessmentReport":{ "name":"DeleteAssessmentReport", @@ -218,7 +218,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes an assessment report from an assessment in AWS Audit Manager.

" + "documentation":"

Deletes an assessment report from an assessment in Audit Manager.

" }, "DeleteControl":{ "name":"DeleteControl", @@ -234,7 +234,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes a custom control in AWS Audit Manager.

" + "documentation":"

Deletes a custom control in Audit Manager.

" }, "DeregisterAccount":{ "name":"DeregisterAccount", @@ -250,7 +250,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deregisters an account in AWS Audit Manager.

" + "documentation":"

Deregisters an account in Audit Manager.

" }, "DeregisterOrganizationAdminAccount":{ "name":"DeregisterOrganizationAdminAccount", @@ -266,7 +266,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deregisters the delegated AWS administrator account from the AWS organization.

" + "documentation":"

Removes the specified member account as a delegated administrator for Audit Manager.

When you remove a delegated administrator from your Audit Manager settings, or when you deregister a delegated administrator from Organizations, you continue to have access to the evidence that you previously collected under that account. However, Audit Manager will stop collecting and attaching evidence to that delegated administrator account moving forward.

" }, "DisassociateAssessmentReportEvidenceFolder":{ "name":"DisassociateAssessmentReportEvidenceFolder", @@ -282,7 +282,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Disassociates an evidence folder from the specified assessment report in AWS Audit Manager.

" + "documentation":"

Disassociates an evidence folder from the specified assessment report in Audit Manager.

" }, "GetAccountStatus":{ "name":"GetAccountStatus", @@ -295,7 +295,7 @@ "errors":[ {"shape":"InternalServerException"} ], - "documentation":"

Returns the registration status of an account in AWS Audit Manager.

" + "documentation":"

Returns the registration status of an account in Audit Manager.

" }, "GetAssessment":{ "name":"GetAssessment", @@ -311,7 +311,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns an assessment from AWS Audit Manager.

" + "documentation":"

Returns an assessment from Audit Manager.

" }, "GetAssessmentFramework":{ "name":"GetAssessmentFramework", @@ -327,7 +327,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a framework from AWS Audit Manager.

" + "documentation":"

Returns a framework from Audit Manager.

" }, "GetAssessmentReportUrl":{ "name":"GetAssessmentReportUrl", @@ -343,7 +343,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns the URL of a specified assessment report in AWS Audit Manager.

" + "documentation":"

Returns the URL of a specified assessment report in Audit Manager.

" }, "GetChangeLogs":{ "name":"GetChangeLogs", @@ -359,7 +359,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a list of changelogs from AWS Audit Manager.

" + "documentation":"

Returns a list of changelogs from Audit Manager.

" }, "GetControl":{ "name":"GetControl", @@ -375,7 +375,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a control from AWS Audit Manager.

" + "documentation":"

Returns a control from Audit Manager.

" }, "GetDelegations":{ "name":"GetDelegations", @@ -406,7 +406,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns evidence from AWS Audit Manager.

" + "documentation":"

Returns evidence from Audit Manager.

" }, "GetEvidenceByEvidenceFolder":{ "name":"GetEvidenceByEvidenceFolder", @@ -422,7 +422,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns all evidence from a specified evidence folder in AWS Audit Manager.

" + "documentation":"

Returns all evidence from a specified evidence folder in Audit Manager.

" }, "GetEvidenceFolder":{ "name":"GetEvidenceFolder", @@ -438,7 +438,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns an evidence folder from the specified assessment in AWS Audit Manager.

" + "documentation":"

Returns an evidence folder from the specified assessment in Audit Manager.

" }, "GetEvidenceFoldersByAssessment":{ "name":"GetEvidenceFoldersByAssessment", @@ -454,7 +454,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns the evidence folders from a specified assessment in AWS Audit Manager.

" + "documentation":"

Returns the evidence folders from a specified assessment in Audit Manager.

" }, "GetEvidenceFoldersByAssessmentControl":{ "name":"GetEvidenceFoldersByAssessmentControl", @@ -470,7 +470,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a list of evidence folders associated with a specified control of an assessment in AWS Audit Manager.

" + "documentation":"

Returns a list of evidence folders associated with a specified control of an assessment in Audit Manager.

" }, "GetOrganizationAdminAccount":{ "name":"GetOrganizationAdminAccount", @@ -486,7 +486,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns the name of the delegated AWS administrator account for the AWS organization.

" + "documentation":"

Returns the name of the delegated Amazon Web Services administrator account for the organization.

" }, "GetServicesInScope":{ "name":"GetServicesInScope", @@ -501,7 +501,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a list of the in-scope AWS services for the specified assessment.

" + "documentation":"

Returns a list of the in-scope Amazon Web Services services for the specified assessment.

" }, "GetSettings":{ "name":"GetSettings", @@ -515,7 +515,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns the settings for the specified AWS account.

" + "documentation":"

Returns the settings for the specified account.

" }, "ListAssessmentFrameworks":{ "name":"ListAssessmentFrameworks", @@ -530,7 +530,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a list of the frameworks available in the AWS Audit Manager framework library.

" + "documentation":"

Returns a list of the frameworks available in the Audit Manager framework library.

" }, "ListAssessmentReports":{ "name":"ListAssessmentReports", @@ -545,7 +545,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a list of assessment reports created in AWS Audit Manager.

" + "documentation":"

Returns a list of assessment reports created in Audit Manager.

" }, "ListAssessments":{ "name":"ListAssessments", @@ -560,7 +560,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a list of current and past assessments from AWS Audit Manager.

" + "documentation":"

Returns a list of current and past assessments from Audit Manager.

" }, "ListControls":{ "name":"ListControls", @@ -575,7 +575,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a list of controls from AWS Audit Manager.

" + "documentation":"

Returns a list of controls from Audit Manager.

" }, "ListKeywordsForDataSource":{ "name":"ListKeywordsForDataSource", @@ -605,7 +605,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a list of all AWS Audit Manager notifications.

" + "documentation":"

Returns a list of all Audit Manager notifications.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -620,7 +620,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns a list of tags for the specified resource in AWS Audit Manager.

" + "documentation":"

Returns a list of tags for the specified resource in Audit Manager.

" }, "RegisterAccount":{ "name":"RegisterAccount", @@ -636,7 +636,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Enables AWS Audit Manager for the specified AWS account.

" + "documentation":"

Enables Audit Manager for the specified account.

" }, "RegisterOrganizationAdminAccount":{ "name":"RegisterOrganizationAdminAccount", @@ -652,7 +652,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Enables an AWS account within the organization as the delegated administrator for AWS Audit Manager.

" + "documentation":"

Enables an account within the organization as the delegated administrator for Audit Manager.

" }, "TagResource":{ "name":"TagResource", @@ -667,7 +667,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Tags the specified resource in AWS Audit Manager.

" + "documentation":"

Tags the specified resource in Audit Manager.

" }, "UntagResource":{ "name":"UntagResource", @@ -682,7 +682,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes a tag from a resource in AWS Audit Manager.

" + "documentation":"

Removes a tag from a resource in Audit Manager.

" }, "UpdateAssessment":{ "name":"UpdateAssessment", @@ -698,7 +698,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Edits an AWS Audit Manager assessment.

" + "documentation":"

Edits an Audit Manager assessment.

" }, "UpdateAssessmentControl":{ "name":"UpdateAssessmentControl", @@ -714,7 +714,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates a control within an assessment in AWS Audit Manager.

" + "documentation":"

Updates a control within an assessment in Audit Manager.

" }, "UpdateAssessmentControlSetStatus":{ "name":"UpdateAssessmentControlSetStatus", @@ -730,7 +730,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates the status of a control set in an AWS Audit Manager assessment.

" + "documentation":"

Updates the status of a control set in an Audit Manager assessment.

" }, "UpdateAssessmentFramework":{ "name":"UpdateAssessmentFramework", @@ -746,7 +746,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates a custom framework in AWS Audit Manager.

" + "documentation":"

Updates a custom framework in Audit Manager.

" }, "UpdateAssessmentStatus":{ "name":"UpdateAssessmentStatus", @@ -762,7 +762,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates the status of an assessment in AWS Audit Manager.

" + "documentation":"

Updates the status of an assessment in Audit Manager.

" }, "UpdateControl":{ "name":"UpdateControl", @@ -778,7 +778,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates a custom control in AWS Audit Manager.

" + "documentation":"

Updates a custom control in Audit Manager.

" }, "UpdateSettings":{ "name":"UpdateSettings", @@ -793,7 +793,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates AWS Audit Manager settings for the current user account.

" + "documentation":"

Updates Audit Manager settings for the current user account.

" }, "ValidateAssessmentReportIntegrity":{ "name":"ValidateAssessmentReportIntegrity", @@ -809,7 +809,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Validates the integrity of an assessment report in AWS Audit Manager.

" + "documentation":"

Validates the integrity of an assessment report in Audit Manager.

" } }, "shapes":{ @@ -818,18 +818,18 @@ "members":{ "id":{ "shape":"AccountId", - "documentation":"

The identifier for the specified AWS account.

" + "documentation":"

The identifier for the specified account.

" }, "emailAddress":{ "shape":"EmailAddress", - "documentation":"

The email address associated with the specified AWS account.

" + "documentation":"

The email address associated with the specified account.

" }, "name":{ "shape":"AccountName", - "documentation":"

The name of the specified AWS account.

" + "documentation":"

The name of the specified account.

" } }, - "documentation":"

The wrapper of AWS account details, such as account ID, email address, and so on.

" + "documentation":"

The wrapper of account details, such as account ID, email address, and so on.

" }, "AWSAccounts":{ "type":"list", @@ -840,10 +840,10 @@ "members":{ "serviceName":{ "shape":"AWSServiceName", - "documentation":"

The name of the AWS service.

" + "documentation":"

The name of the Amazon Web Service.

" } }, - "documentation":"

An AWS service such as Amazon S3, AWS CloudTrail, and so on.

" + "documentation":"

An Amazon Web Service such as Amazon S3, CloudTrail, and so on.

" }, "AWSServiceName":{ "type":"string", @@ -861,7 +861,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

Your account is not registered with AWS Audit Manager. Check the delegated administrator setup on the AWS Audit Manager settings page, and try again.

", + "documentation":"

Your account is not registered with Audit Manager. Check the delegated administrator setup on the Audit Manager settings page, and try again.

", "error":{"httpStatusCode":403}, "exception":true }, @@ -917,7 +917,7 @@ }, "awsAccount":{ "shape":"AWSAccount", - "documentation":"

The AWS account associated with the assessment.

" + "documentation":"

The account associated with the assessment.

" }, "metadata":{ "shape":"AssessmentMetadata", @@ -932,7 +932,7 @@ "documentation":"

The tags associated with the assessment.

" } }, - "documentation":"

An entity that defines the scope of audit evidence collected by AWS Audit Manager. An AWS Audit Manager assessment is an implementation of an AWS Audit Manager framework.

" + "documentation":"

An entity that defines the scope of audit evidence collected by Audit Manager. An Audit Manager assessment is an implementation of an Audit Manager framework.

" }, "AssessmentControl":{ "type":"structure", @@ -974,7 +974,7 @@ "documentation":"

The amount of evidence in the assessment report.

" } }, - "documentation":"

The control entity that represents a standard or custom control used in an AWS Audit Manager assessment.

" + "documentation":"

The control entity that represents a standard or custom control used in an Audit Manager assessment.

" }, "AssessmentControlSet":{ "type":"structure", @@ -1012,7 +1012,7 @@ "documentation":"

The total number of evidence objects uploaded manually to the control set.

" } }, - "documentation":"

Represents a set of controls in an AWS Audit Manager assessment.

" + "documentation":"

Represents a set of controls in an Audit Manager assessment.

" }, "AssessmentControlSets":{ "type":"list", @@ -1056,7 +1056,7 @@ }, "dataSource":{ "shape":"String", - "documentation":"

The AWS service from which the evidence was collected.

" + "documentation":"

The Amazon Web Service from which the evidence was collected.

" }, "author":{ "shape":"String", @@ -1080,7 +1080,7 @@ }, "evidenceByTypeConfigurationDataCount":{ "shape":"Integer", - "documentation":"

The number of evidence that falls under the configuration data category. This evidence is collected from configuration snapshots of other AWS services such as Amazon EC2, Amazon S3, or IAM.

" + "documentation":"

The number of evidence that falls under the configuration data category. This evidence is collected from configuration snapshots of other Amazon Web Services services such as Amazon EC2, Amazon S3, or IAM.

" }, "evidenceByTypeManualCount":{ "shape":"Integer", @@ -1088,22 +1088,22 @@ }, "evidenceByTypeComplianceCheckCount":{ "shape":"Integer", - "documentation":"

The number of evidence that falls under the compliance check category. This evidence is collected from AWS Config or AWS Security Hub.

" + "documentation":"

The number of evidence that falls under the compliance check category. This evidence is collected from Config or Security Hub.

" }, "evidenceByTypeComplianceCheckIssuesCount":{ "shape":"Integer", - "documentation":"

The total number of issues that were reported directly from AWS Security Hub, AWS Config, or both.

" + "documentation":"

The total number of issues that were reported directly from Security Hub, Config, or both.

" }, "evidenceByTypeUserActivityCount":{ "shape":"Integer", - "documentation":"

The number of evidence that falls under the user activity category. This evidence is collected from AWS CloudTrail logs.

" + "documentation":"

The number of evidence that falls under the user activity category. This evidence is collected from CloudTrail logs.

" }, "evidenceAwsServiceSourceCount":{ "shape":"Integer", - "documentation":"

The total number of AWS resources assessed to generate the evidence.

" + "documentation":"

The total number of Amazon Web Services resources assessed to generate the evidence.

" } }, - "documentation":"

The folder in which AWS Audit Manager stores evidence for an assessment.

" + "documentation":"

The folder in which Audit Manager stores evidence for an assessment.

" }, "AssessmentEvidenceFolderName":{ "type":"string", @@ -1132,7 +1132,7 @@ "documentation":"

The control sets associated with the framework.

" } }, - "documentation":"

The file used to structure and automate AWS Audit Manager assessments for a given compliance standard.

" + "documentation":"

The file used to structure and automate Audit Manager assessments for a given compliance standard.

" }, "AssessmentFrameworkDescription":{ "type":"string", @@ -1219,7 +1219,7 @@ }, "scope":{ "shape":"Scope", - "documentation":"

The wrapper of AWS accounts and services in scope for the assessment.

" + "documentation":"

The wrapper of accounts and services in scope for the assessment.

" }, "roles":{ "shape":"Roles", @@ -1276,7 +1276,7 @@ "documentation":"

The time of the most recent update.

" } }, - "documentation":"

A metadata object associated with an assessment in AWS Audit Manager.

" + "documentation":"

A metadata object associated with an assessment in Audit Manager.

" }, "AssessmentName":{ "type":"string", @@ -1301,7 +1301,7 @@ }, "awsAccountId":{ "shape":"AccountId", - "documentation":"

The identifier for the specified AWS account.

" + "documentation":"

The identifier for the specified account.

" }, "assessmentId":{ "shape":"UUID", @@ -1324,7 +1324,7 @@ "documentation":"

Specifies when the assessment report was created.

" } }, - "documentation":"

A finalized document generated from an AWS Audit Manager assessment. These reports summarize the relevant evidence collected for your audit, and link to the relevant evidence folders which are named and organized according to the controls specified in your assessment.

" + "documentation":"

A finalized document generated from an Audit Manager assessment. These reports summarize the relevant evidence collected for your audit, and link to the relevant evidence folders which are named and organized according to the controls specified in your assessment.

" }, "AssessmentReportDescription":{ "type":"string", @@ -1421,7 +1421,7 @@ "documentation":"

The destination of the assessment report.

" } }, - "documentation":"

The location in which AWS Audit Manager saves assessment reports for the given assessment.

" + "documentation":"

The location in which Audit Manager saves assessment reports for the given assessment.

" }, "AssessmentReportsMetadata":{ "type":"list", @@ -1506,7 +1506,7 @@ "members":{ "createDelegationRequest":{ "shape":"CreateDelegationRequest", - "documentation":"

The API request to batch create delegations in AWS Audit Manager.

" + "documentation":"

The API request to batch create delegations in Audit Manager.

" }, "errorCode":{ "shape":"ErrorCode", @@ -1532,7 +1532,7 @@ "members":{ "createDelegationRequests":{ "shape":"CreateDelegationRequests", - "documentation":"

The API request to batch create delegations in AWS Audit Manager.

" + "documentation":"

The API request to batch create delegations in Audit Manager.

" }, "assessmentId":{ "shape":"UUID", @@ -1647,7 +1647,7 @@ "members":{ "manualEvidence":{ "shape":"ManualEvidence", - "documentation":"

Manual evidence that cannot be collected automatically by AWS Audit Manager.

" + "documentation":"

Manual evidence that cannot be collected automatically by Audit Manager.

" }, "errorCode":{ "shape":"ErrorCode", @@ -1731,7 +1731,7 @@ "documentation":"

The IAM user or role that performed the action.

" } }, - "documentation":"

The record of a change within AWS Audit Manager, such as a modified assessment, a delegated control set, and so on.

" + "documentation":"

The record of a change within Audit Manager, such as a modified assessment, a delegated control set, and so on.

" }, "ChangeLogs":{ "type":"list", @@ -1779,7 +1779,7 @@ }, "controlSources":{ "shape":"ControlSources", - "documentation":"

The data source that determines from where AWS Audit Manager collects evidence for the control.

" + "documentation":"

The data source that determines from where Audit Manager collects evidence for the control.

" }, "controlMappingSources":{ "shape":"ControlMappingSources", @@ -1806,7 +1806,7 @@ "documentation":"

The tags associated with the control.

" } }, - "documentation":"

A control in AWS Audit Manager.

" + "documentation":"

A control in Audit Manager.

" }, "ControlComment":{ "type":"structure", @@ -1873,7 +1873,7 @@ "documentation":"

The instructions for troubleshooting the specified control.

" } }, - "documentation":"

The data source that determines from where AWS Audit Manager collects evidence for the control.

" + "documentation":"

The data source that determines from where Audit Manager collects evidence for the control.

" }, "ControlMappingSources":{ "type":"list", @@ -1897,7 +1897,7 @@ }, "controlSources":{ "shape":"ControlSources", - "documentation":"

The data source that determines from where AWS Audit Manager collects evidence for the control.

" + "documentation":"

The data source that determines from where Audit Manager collects evidence for the control.

" }, "createdAt":{ "shape":"Timestamp", @@ -1945,7 +1945,7 @@ "documentation":"

The list of controls within the control set.

" } }, - "documentation":"

A set of controls in AWS Audit Manager.

" + "documentation":"

A set of controls in Audit Manager.

" }, "ControlSetId":{ "type":"string", @@ -2008,7 +2008,7 @@ "documentation":"

The unique identifier of the control.

" } }, - "documentation":"

Control entity attributes that uniquely identify an existing control to be added to a framework in AWS Audit Manager.

" + "documentation":"

Control entity attributes that uniquely identify an existing control to be added to a framework in Audit Manager.

" }, "CreateAssessmentFrameworkControlSet":{ "type":"structure", @@ -2023,7 +2023,7 @@ "documentation":"

The list of controls within the control set. This does not contain the control set ID.

" } }, - "documentation":"

A controlSet entity that represents a collection of controls in AWS Audit Manager. This does not contain the control set ID.

" + "documentation":"

A controlSet entity that represents a collection of controls in Audit Manager. This does not contain the control set ID.

" }, "CreateAssessmentFrameworkControlSets":{ "type":"list", @@ -2157,7 +2157,7 @@ }, "sourceDescription":{ "shape":"SourceDescription", - "documentation":"

The description of the data source that determines from where AWS Audit Manager collects evidence for the control.

" + "documentation":"

The description of the data source that determines from where Audit Manager collects evidence for the control.

" }, "sourceSetUpOption":{ "shape":"SourceSetUpOption", @@ -2250,7 +2250,7 @@ "documentation":"

The type of customer persona.

In CreateAssessment, roleType can only be PROCESS_OWNER.

In UpdateSettings, roleType can only be PROCESS_OWNER.

In BatchCreateDelegationByAssessment, roleType can only be RESOURCE_OWNER.

" } }, - "documentation":"

A collection of attributes used to create a delegation for an assessment in AWS Audit Manager.

" + "documentation":"

A collection of attributes used to create a delegation for an assessment in Audit Manager.

" }, "CreateDelegationRequests":{ "type":"list", @@ -2535,7 +2535,7 @@ }, "evidenceAwsAccountId":{ "shape":"AccountId", - "documentation":"

The identifier for the specified AWS account.

" + "documentation":"

The identifier for the specified account.

" }, "time":{ "shape":"Timestamp", @@ -2543,7 +2543,7 @@ }, "eventSource":{ "shape":"AWSServiceName", - "documentation":"

The AWS service from which the evidence is collected.

" + "documentation":"

The Amazon Web Service from which the evidence is collected.

" }, "eventName":{ "shape":"EventName", @@ -2567,15 +2567,15 @@ }, "complianceCheck":{ "shape":"String", - "documentation":"

The evaluation status for evidence that falls under the compliance check category. For evidence collected from AWS Security Hub, a Pass or Fail result is shown. For evidence collected from AWS Config, a Compliant or Noncompliant result is shown.

" + "documentation":"

The evaluation status for evidence that falls under the compliance check category. For evidence collected from Security Hub, a Pass or Fail result is shown. For evidence collected from Config, a Compliant or Noncompliant result is shown.

" }, "awsOrganization":{ "shape":"String", - "documentation":"

The AWS account from which the evidence is collected, and its AWS organization path.

" + "documentation":"

The account from which the evidence is collected, and its organization path.

" }, "awsAccountId":{ "shape":"AccountId", - "documentation":"

The identifier for the specified AWS account.

" + "documentation":"

The identifier for the specified account.

" }, "evidenceFolderId":{ "shape":"UUID", @@ -2660,7 +2660,7 @@ }, "controlSources":{ "shape":"ControlSources", - "documentation":"

The sources from which AWS Audit Manager collects evidence for the control.

" + "documentation":"

The sources from which Audit Manager collects evidence for the control.

" }, "controlSets":{ "shape":"ControlSets", @@ -2687,7 +2687,7 @@ "documentation":"

The tags associated with the framework.

" } }, - "documentation":"

The file used to structure and automate AWS Audit Manager assessments for a given compliance standard.

" + "documentation":"

The file used to structure and automate Audit Manager assessments for a given compliance standard.

" }, "FrameworkDescription":{ "type":"string", @@ -2750,7 +2750,7 @@ "members":{ "status":{ "shape":"AccountStatus", - "documentation":"

The status of the specified AWS account.

" + "documentation":"

The status of the specified account.

" } } }, @@ -3173,7 +3173,7 @@ "members":{ "serviceMetadata":{ "shape":"ServiceMetadataList", - "documentation":"

The metadata associated with the aAWS service.

" + "documentation":"

The metadata associated with the Amazon Web Service.

" } } }, @@ -3194,7 +3194,7 @@ "members":{ "settings":{ "shape":"Settings", - "documentation":"

The settings object that holds all supported AWS Audit Manager settings.

" + "documentation":"

The settings object that holds all supported Audit Manager settings.

" } } }, @@ -3482,7 +3482,7 @@ "documentation":"

The Amazon S3 URL that points to a manual evidence object.

" } }, - "documentation":"

Evidence that is uploaded to AWS Audit Manager manually.

" + "documentation":"

Evidence that is uploaded to Audit Manager manually.

" }, "ManualEvidenceList":{ "type":"list", @@ -3538,7 +3538,7 @@ "documentation":"

The sender of the notification.

" } }, - "documentation":"

The notification used to inform a user of an update in AWS Audit Manager. For example, this includes the notification that is sent when a control set is delegated for review.

" + "documentation":"

The notification used to inform a user of an update in Audit Manager. For example, this includes the notification that is sent when a control set is delegated for review.

" }, "Notifications":{ "type":"list", @@ -3559,11 +3559,11 @@ "members":{ "kmsKey":{ "shape":"KmsKey", - "documentation":"

The AWS KMS key details.

" + "documentation":"

The KMS key details.

" }, "delegatedAdminAccount":{ "shape":"AccountId", - "documentation":"

The delegated administrator account for AWS Audit Manager.

" + "documentation":"

The delegated administrator account for Audit Manager.

" } } }, @@ -3595,7 +3595,7 @@ }, "organizationId":{ "shape":"organizationId", - "documentation":"

The identifier for the specified AWS organization.

" + "documentation":"

The identifier for the specified organization.

" } } }, @@ -3611,7 +3611,7 @@ "documentation":"

The value of the specified resource.

" } }, - "documentation":"

A system asset that is evaluated in an AWS Audit Manager assessment.

" + "documentation":"

A system asset that is evaluated in an Audit Manager assessment.

" }, "ResourceNotFoundException":{ "type":"structure", @@ -3651,7 +3651,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the IAM role.

" } }, - "documentation":"

The wrapper that contains the AWS Audit Manager role information of the current user, such as the role type and IAM Amazon Resource Name (ARN).

" + "documentation":"

The wrapper that contains the Audit Manager role information of the current user, such as the role type and IAM Amazon Resource Name (ARN).

" }, "RoleType":{ "type":"string", @@ -3668,7 +3668,7 @@ "type":"string", "max":1024, "min":1, - "pattern":"^(S|s)3:\\/\\/[a-zA-Z0-9-_\\/.]+$" + "pattern":"^(S|s)3:\\/\\/[a-zA-Z0-9\\-\\.\\(\\)\\'\\*\\_\\!\\/]+$" }, "SNSTopic":{ "type":"string", @@ -3681,36 +3681,36 @@ "members":{ "awsAccounts":{ "shape":"AWSAccounts", - "documentation":"

The AWS accounts included in the scope of the assessment.

" + "documentation":"

The accounts included in the scope of the assessment.

" }, "awsServices":{ "shape":"AWSServices", - "documentation":"

The AWS services included in the scope of the assessment.

" + "documentation":"

The Amazon Web Services services included in the scope of the assessment.

" } }, - "documentation":"

The wrapper that contains the AWS accounts and AWS services in scope for the assessment.

" + "documentation":"

The wrapper that contains the accounts and services in scope for the assessment.

" }, "ServiceMetadata":{ "type":"structure", "members":{ "name":{ "shape":"AWSServiceName", - "documentation":"

The name of the AWS service.

" + "documentation":"

The name of the Amazon Web Service.

" }, "displayName":{ "shape":"NonEmptyString", - "documentation":"

The display name of the AWS service.

" + "documentation":"

The display name of the Amazon Web Service.

" }, "description":{ "shape":"NonEmptyString", - "documentation":"

The description of the specified AWS service.

" + "documentation":"

The description of the specified Amazon Web Service.

" }, "category":{ "shape":"NonEmptyString", - "documentation":"

The category in which the AWS service belongs, such as compute, storage, database, and so on.

" + "documentation":"

The category in which the Amazon Web Service belongs, such as compute, storage, database, and so on.

" } }, - "documentation":"

The metadata associated with the specified AWS service.

" + "documentation":"

The metadata associated with the specified Amazon Web Service.

" }, "ServiceMetadataList":{ "type":"list", @@ -3731,7 +3731,7 @@ "members":{ "isAwsOrgEnabled":{ "shape":"Boolean", - "documentation":"

Specifies whether AWS Organizations is enabled.

" + "documentation":"

Specifies whether Organizations is enabled.

" }, "snsTopic":{ "shape":"SNSTopic", @@ -3747,10 +3747,10 @@ }, "kmsKey":{ "shape":"KmsKey", - "documentation":"

The AWS KMS key details.

" + "documentation":"

The KMS key details.

" } }, - "documentation":"

The settings object that holds all supported AWS Audit Manager settings.

" + "documentation":"

The settings object that holds all supported Audit Manager settings.

" }, "SnsArn":{ "type":"string", @@ -3780,10 +3780,10 @@ }, "keywordValue":{ "shape":"KeywordValue", - "documentation":"

The value of the keyword used to search AWS CloudTrail logs, AWS Config rules, AWS Security Hub checks, and AWS API names when mapping a control data source.

" + "documentation":"

The value of the keyword used to search CloudTrail logs, Config rules, Security Hub checks, and Amazon Web Services API names when mapping a control data source.

" } }, - "documentation":"

The keyword to search for in AWS CloudTrail logs, AWS Config rules, AWS Security Hub checks, and AWS API names.

" + "documentation":"

The keyword to search for in CloudTrail logs, Config rules, Security Hub checks, and Amazon Web Services API names.

" }, "SourceName":{ "type":"string", @@ -4033,7 +4033,7 @@ "documentation":"

The list of controls contained within the control set.

" } }, - "documentation":"

A controlSet entity that represents a collection of controls in AWS Audit Manager. This does not contain the control set ID.

" + "documentation":"

A controlSet entity that represents a collection of controls in Audit Manager. This does not contain the control set ID.

" }, "UpdateAssessmentFrameworkControlSets":{ "type":"list", @@ -4206,7 +4206,7 @@ "members":{ "snsTopic":{ "shape":"SnsArn", - "documentation":"

The Amazon Simple Notification Service (Amazon SNS) topic to which AWS Audit Manager sends notifications.

" + "documentation":"

The Amazon Simple Notification Service (Amazon SNS) topic to which Audit Manager sends notifications.

" }, "defaultAssessmentReportsDestination":{ "shape":"AssessmentReportsDestination", @@ -4218,7 +4218,7 @@ }, "kmsKey":{ "shape":"KmsKey", - "documentation":"

The AWS KMS key details.

" + "documentation":"

The KMS key details.

" } } }, @@ -4338,5 +4338,5 @@ "pattern":"o-[a-z0-9]{10,32}" } }, - "documentation":"

Welcome to the AWS Audit Manager API reference. This guide is for developers who need detailed information about the AWS Audit Manager API operations, data types, and errors.

AWS Audit Manager is a service that provides automated evidence collection so that you can continuously audit your AWS usage, and assess the effectiveness of your controls to better manage risk and simplify compliance.

AWS Audit Manager provides pre-built frameworks that structure and automate assessments for a given compliance standard. Frameworks include a pre-built collection of controls with descriptions and testing procedures, which are grouped according to the requirements of the specified compliance standard or regulation. You can also customize frameworks and controls to support internal audits with unique requirements.

Use the following links to get started with the AWS Audit Manager API:

  • Actions: An alphabetical list of all AWS Audit Manager API operations.

  • Data types: An alphabetical list of all AWS Audit Manager data types.

  • Common parameters: Parameters that all Query operations can use.

  • Common errors: Client and server errors that all operations can return.

If you're new to AWS Audit Manager, we recommend that you review the AWS Audit Manager User Guide.

" + "documentation":"

Welcome to the Audit Manager API reference. This guide is for developers who need detailed information about the Audit Manager API operations, data types, and errors.

Audit Manager is a service that provides automated evidence collection so that you can continuously audit your Amazon Web Services usage, and assess the effectiveness of your controls to better manage risk and simplify compliance.

Audit Manager provides pre-built frameworks that structure and automate assessments for a given compliance standard. Frameworks include a pre-built collection of controls with descriptions and testing procedures, which are grouped according to the requirements of the specified compliance standard or regulation. You can also customize frameworks and controls to support internal audits with unique requirements.

Use the following links to get started with the Audit Manager API:

  • Actions: An alphabetical list of all Audit Manager API operations.

  • Data types: An alphabetical list of all Audit Manager data types.

  • Common parameters: Parameters that all Query operations can use.

  • Common errors: Client and server errors that all operations can return.

If you're new to Audit Manager, we recommend that you review the Audit Manager User Guide.

" } diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index f77396b9bc13..8ac0c9e011d4 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT autoscaling AWS Java SDK :: Services :: Auto Scaling diff --git a/services/autoscaling/src/main/resources/codegen-resources/service-2.json b/services/autoscaling/src/main/resources/codegen-resources/service-2.json index 13dae7d40915..f35bead38fa4 100644 --- a/services/autoscaling/src/main/resources/codegen-resources/service-2.json +++ b/services/autoscaling/src/main/resources/codegen-resources/service-2.json @@ -907,7 +907,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"InstanceRefreshInProgressFault"} ], - "documentation":"

Starts a new instance refresh operation, which triggers a rolling replacement of previously launched instances in the Auto Scaling group with a new group of instances.

This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes.

If the call succeeds, it creates a new instance refresh request with a unique ID that you can use to track its progress. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh operation in progress, use the CancelInstanceRefresh API.

" + "documentation":"

Starts a new instance refresh operation. An instance refresh performs a rolling replacement of all or some instances in an Auto Scaling group. Each instance is terminated first and then replaced, which temporarily reduces the capacity available within your Auto Scaling group.

This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group. This feature is helpful, for example, when you have a new AMI or a new user data script. You just need to create a new launch template that specifies the new AMI or user data script. Then start an instance refresh to immediately begin the process of updating instances in the group.

If the call succeeds, it creates a new instance refresh request with a unique ID that you can use to track its progress. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh operation in progress, use the CancelInstanceRefresh API.

" }, "SuspendProcesses":{ "name":"SuspendProcesses", @@ -2340,6 +2340,14 @@ } } }, + "DesiredConfiguration":{ + "type":"structure", + "members":{ + "LaunchTemplate":{"shape":"LaunchTemplateSpecification"}, + "MixedInstancesPolicy":{"shape":"MixedInstancesPolicy"} + }, + "documentation":"

Describes the desired configuration for an instance refresh.

If you specify a desired configuration, you must specify either a LaunchTemplate or a MixedInstancesPolicy.

" + }, "DetachInstancesAnswer":{ "type":"structure", "members":{ @@ -2818,6 +2826,11 @@ "ProgressDetails":{ "shape":"InstanceRefreshProgressDetails", "documentation":"

Additional progress details for an Auto Scaling group that has a warm pool.

" + }, + "Preferences":{"shape":"RefreshPreferences"}, + "DesiredConfiguration":{ + "shape":"DesiredConfiguration", + "documentation":"

Describes the specific update you want to deploy.

" } }, "documentation":"

Describes an instance refresh for an Auto Scaling group.

" @@ -2928,7 +2941,7 @@ "documentation":"

The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave the value at its default (empty), Amazon EC2 Auto Scaling uses the On-Demand price as the maximum Spot price. To remove a value that you previously set, include the property but specify an empty string (\"\") for the value.

" } }, - "documentation":"

Describes an instances distribution for an Auto Scaling group with a MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities.

When you update SpotAllocationStrategy, SpotInstancePools, or SpotMaxPrice, this update action does not deploy any changes across the running Amazon EC2 instances in the group. Your existing Spot Instances continue to run as long as the maximum price for those instances is higher than the current Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" + "documentation":"

Describes an instances distribution for an Auto Scaling group with a MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities.

When you modify SpotAllocationStrategy, SpotInstancePools, or SpotMaxPrice in the UpdateAutoScalingGroup API call, this update action does not deploy any changes across the running Amazon EC2 instances in the group. Your existing Spot Instances continue to run as long as the maximum price for those instances is higher than the current Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" }, "InstancesToUpdate":{ "type":"integer", @@ -3108,7 +3121,7 @@ "documentation":"

Any properties that you specify override the same properties in the launch template. If not provided, Amazon EC2 Auto Scaling uses the instance type specified in the launch template when it launches an instance.

" } }, - "documentation":"

Describes a launch template and overrides.

You specify these properties as part of a mixed instances policy.

When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" + "documentation":"

Describes a launch template and overrides.

You specify these properties as part of a mixed instances policy.

When you update the launch template or overrides in the UpdateAutoScalingGroup API call, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" }, "LaunchTemplateName":{ "type":"string", @@ -3150,7 +3163,7 @@ "documentation":"

The version number, $Latest, or $Default. To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API. If the value is $Latest, Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default, Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default.

" } }, - "documentation":"

Describes the Amazon EC2 launch template and the launch template version that can be used by an Auto Scaling group to configure Amazon EC2 instances.

The launch template that is specified must be configured for use with an Auto Scaling group. For more information, see Creating a launch template for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes the launch template and the version of the launch template that Amazon EC2 Auto Scaling uses to launch Amazon EC2 instances. For more information about launch templates, see Launch templates in the Amazon EC2 Auto Scaling User Guide.

" }, "LifecycleActionResult":{"type":"string"}, "LifecycleActionToken":{ @@ -3464,14 +3477,14 @@ "members":{ "LaunchTemplate":{ "shape":"LaunchTemplate", - "documentation":"

Specifies the launch template to use and optionally the instance types (overrides) that are used to provision EC2 instances to fulfill On-Demand and Spot capacities. Required when creating a mixed instances policy.

" + "documentation":"

Specifies the launch template to use and the instance types (overrides) that are used to provision EC2 instances to fulfill On-Demand and Spot capacities. Required when creating a mixed instances policy.

" }, "InstancesDistribution":{ "shape":"InstancesDistribution", "documentation":"

Specifies the instances distribution. If not provided, the value for each property in InstancesDistribution uses a default value.

" } }, - "documentation":"

Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy as the top-level property instead of a launch configuration or launch template.

" + "documentation":"

Describes a mixed instances policy. A mixed instances policy contains the instance types Amazon EC2 Auto Scaling can launch, and other information Amazon EC2 Auto Scaling can use to launch instances to help you optimize your costs. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

" }, "MonitoringEnabled":{"type":"boolean"}, "NoDevice":{"type":"boolean"}, @@ -4003,7 +4016,7 @@ "members":{ "MinHealthyPercentage":{ "shape":"IntPercent", - "documentation":"

The amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group (rounded up to the nearest integer). The default is 90.

" + "documentation":"

The amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue. The value is expressed as a percentage of the desired capacity of the Auto Scaling group (rounded up to the nearest integer). The default is 90.

Setting the minimum healthy percentage to 100 percent limits the rate of replacement to one instance at a time. In contrast, setting it to 0 percent has the effect of replacing all instances at the same time.

" }, "InstanceWarmup":{ "shape":"RefreshInstanceWarmup", @@ -4016,9 +4029,13 @@ "CheckpointDelay":{ "shape":"CheckpointDelay", "documentation":"

The amount of time, in seconds, to wait after a checkpoint before continuing. This property is optional, but if you specify a value for it, you must also specify a value for CheckpointPercentages. If you specify a value for CheckpointPercentages and not for CheckpointDelay, the CheckpointDelay defaults to 3600 (1 hour).

" + }, + "SkipMatching":{ + "shape":"SkipMatching", + "documentation":"

A boolean value that indicates whether skip matching is enabled. If true, then Amazon EC2 Auto Scaling skips replacing instances that match the desired configuration. If no desired configuration is specified, then it skips replacing instances that have the same configuration that is already set on the group. The default is false.

" } }, - "documentation":"

Describes information used to start an instance refresh.

All properties are optional. However, if you specify a value for CheckpointDelay, you must also provide a value for CheckpointPercentages.

" + "documentation":"

Describes the preferences for an instance refresh.

" }, "RefreshStrategy":{ "type":"string", @@ -4386,6 +4403,7 @@ }, "ShouldDecrementDesiredCapacity":{"type":"boolean"}, "ShouldRespectGracePeriod":{"type":"boolean"}, + "SkipMatching":{"type":"boolean"}, "SpotInstancePools":{"type":"integer"}, "SpotPrice":{ "type":"string", @@ -4411,11 +4429,15 @@ }, "Strategy":{ "shape":"RefreshStrategy", - "documentation":"

The strategy to use for the instance refresh. The only valid value is Rolling.

A rolling update is an update that is applied to all instances in an Auto Scaling group until all instances have been updated. A rolling update can fail due to failed health checks or if instances are on standby or are protected from scale in. If the rolling update process fails, any instances that were already replaced are not rolled back to their previous configuration.

" + "documentation":"

The strategy to use for the instance refresh. The only valid value is Rolling.

A rolling update helps you update your instances gradually. A rolling update can fail due to failed health checks or if instances are on standby or are protected from scale in. If the rolling update process fails, any instances that are replaced are not rolled back to their previous configuration.

" + }, + "DesiredConfiguration":{ + "shape":"DesiredConfiguration", + "documentation":"

The desired configuration. For example, the desired configuration can specify a new launch template or a new version of the current launch template.

Once the instance refresh succeeds, Amazon EC2 Auto Scaling updates the settings of the Auto Scaling group to reflect the new desired configuration.

When you specify a new launch template or a new version of the current launch template for your desired configuration, consider enabling the SkipMatching property in preferences. If it's enabled, Amazon EC2 Auto Scaling skips replacing instances that already use the specified launch template and version. This can help you reduce the number of replacements that are required to apply updates.

" }, "Preferences":{ "shape":"RefreshPreferences", - "documentation":"

Set of preferences associated with the instance refresh request.

If not provided, the default values are used. For MinHealthyPercentage, the default value is 90. For InstanceWarmup, the default is to use the value specified for the health check grace period for the Auto Scaling group.

For more information, see RefreshPreferences in the Amazon EC2 Auto Scaling API Reference.

" + "documentation":"

Set of preferences associated with the instance refresh request. If not provided, the default values are used.

" } } }, diff --git a/services/autoscalingplans/pom.xml b/services/autoscalingplans/pom.xml index 2ce5ac6d1838..a9355f09be8f 100644 --- a/services/autoscalingplans/pom.xml +++ b/services/autoscalingplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT autoscalingplans AWS Java SDK :: Services :: Auto Scaling Plans diff --git a/services/backup/pom.xml b/services/backup/pom.xml index 3a774432521a..74550a188829 100644 --- a/services/backup/pom.xml +++ b/services/backup/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT backup AWS Java SDK :: Services :: Backup diff --git a/services/batch/pom.xml b/services/batch/pom.xml index 8b64a7b5c3fa..0d6e88dca0bc 100644 --- a/services/batch/pom.xml +++ b/services/batch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT batch AWS Java SDK :: Services :: AWS Batch diff --git a/services/batch/src/main/resources/codegen-resources/service-2.json b/services/batch/src/main/resources/codegen-resources/service-2.json index e0bfc06d058c..5a7d2fba98ea 100644 --- a/services/batch/src/main/resources/codegen-resources/service-2.json +++ b/services/batch/src/main/resources/codegen-resources/service-2.json @@ -24,7 +24,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Cancels a job in an AWS Batch job queue. Jobs that are in the SUBMITTED, PENDING, or RUNNABLE state are canceled. Jobs that have progressed to STARTING or RUNNING aren't canceled, but the API operation still succeeds, even if no job is canceled. These jobs must be terminated with the TerminateJob operation.

" + "documentation":"

Cancels a job in an Batch job queue. Jobs that are in the SUBMITTED, PENDING, or RUNNABLE state are canceled. Jobs that have progressed to STARTING or RUNNING aren't canceled, but the API operation still succeeds, even if no job is canceled. These jobs must be terminated with the TerminateJob operation.

" }, "CreateComputeEnvironment":{ "name":"CreateComputeEnvironment", @@ -38,7 +38,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Creates an AWS Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or AWS Fargate resources. UNMANAGED compute environments can only use EC2 resources.

In a managed compute environment, AWS Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.

Multi-node parallel jobs aren't supported on Spot Instances.

In an unmanaged compute environment, you can manage your own EC2 compute resources and have a lot of flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide.

AWS Batch doesn't upgrade the AMIs in a compute environment after the environment is created. For example, it doesn't update the AMIs when a newer version of the Amazon ECS optimized AMI is available. Therefore, you're responsible for managing the guest operating system (including its updates and security patches) and any additional application software or utilities that you install on the compute resources. To use a new AMI for your AWS Batch jobs, complete these steps:

  1. Create a new compute environment with the new AMI.

  2. Add the compute environment to an existing job queue.

  3. Remove the earlier compute environment from your job queue.

  4. Delete the earlier compute environment.

" + "documentation":"

Creates an Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use EC2 resources.

In a managed compute environment, Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.

Multi-node parallel jobs aren't supported on Spot Instances.

In an unmanaged compute environment, you can manage your own EC2 compute resources and have a lot of flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide.

Batch doesn't upgrade the AMIs in a compute environment after the environment is created. For example, it doesn't update the AMIs when a newer version of the Amazon ECS optimized AMI is available. Therefore, you're responsible for managing the guest operating system (including its updates and security patches) and any additional application software or utilities that you install on the compute resources. To use a new AMI for your Batch jobs, complete these steps:

  1. Create a new compute environment with the new AMI.

  2. Add the compute environment to an existing job queue.

  3. Remove the earlier compute environment from your job queue.

  4. Delete the earlier compute environment.

" }, "CreateJobQueue":{ "name":"CreateJobQueue", @@ -52,7 +52,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Creates an AWS Batch job queue. When you create a job queue, you associate one or more compute environments to the queue and assign an order of preference for the compute environments.

You also set a priority to the job queue that determines the order that the AWS Batch scheduler places jobs onto its associated compute environments. For example, if a compute environment is associated with more than one job queue, the job queue with a higher priority is given preference for scheduling jobs to that compute environment.

" + "documentation":"

Creates an Batch job queue. When you create a job queue, you associate one or more compute environments to the queue and assign an order of preference for the compute environments.

You also set a priority to the job queue that determines the order that the Batch scheduler places jobs onto its associated compute environments. For example, if a compute environment is associated with more than one job queue, the job queue with a higher priority is given preference for scheduling jobs to that compute environment.

" }, "DeleteComputeEnvironment":{ "name":"DeleteComputeEnvironment", @@ -66,7 +66,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Deletes an AWS Batch compute environment.

Before you can delete a compute environment, you must set its state to DISABLED with the UpdateComputeEnvironment API operation and disassociate it from any job queues with the UpdateJobQueue API operation. Compute environments that use AWS Fargate resources must terminate all active jobs on that compute environment before deleting the compute environment. If this isn't done, the compute environment enters an invalid state.

" + "documentation":"

Deletes an Batch compute environment.

Before you can delete a compute environment, you must set its state to DISABLED with the UpdateComputeEnvironment API operation and disassociate it from any job queues with the UpdateJobQueue API operation. Compute environments that use Fargate resources must terminate all active jobs on that compute environment before deleting the compute environment. If this isn't done, the compute environment enters an invalid state.

" }, "DeleteJobQueue":{ "name":"DeleteJobQueue", @@ -94,7 +94,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Deregisters an AWS Batch job definition. Job definitions are permanently deleted after 180 days.

" + "documentation":"

Deregisters an Batch job definition. Job definitions are permanently deleted after 180 days.

" }, "DescribeComputeEnvironments":{ "name":"DescribeComputeEnvironments", @@ -150,7 +150,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Describes a list of AWS Batch jobs.

" + "documentation":"

Describes a list of Batch jobs.

" }, "ListJobs":{ "name":"ListJobs", @@ -164,7 +164,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Returns a list of AWS Batch jobs.

You must specify only one of the following items:

  • A job queue ID to return a list of jobs in that job queue

  • A multi-node parallel job ID to return a list of nodes for that job

  • An array job ID to return a list of the children for that job

You can filter the results by job status with the jobStatus parameter. If you don't specify a status, only RUNNING jobs are returned.

" + "documentation":"

Returns a list of Batch jobs.

You must specify only one of the following items:

  • A job queue ID to return a list of jobs in that job queue

  • A multi-node parallel job ID to return a list of nodes for that job

  • An array job ID to return a list of the children for that job

You can filter the results by job status with the jobStatus parameter. If you don't specify a status, only RUNNING jobs are returned.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -178,7 +178,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Lists the tags for an AWS Batch resource. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

" + "documentation":"

Lists the tags for an Batch resource. Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

" }, "RegisterJobDefinition":{ "name":"RegisterJobDefinition", @@ -192,7 +192,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Registers an AWS Batch job definition.

" + "documentation":"

Registers an Batch job definition.

" }, "SubmitJob":{ "name":"SubmitJob", @@ -206,7 +206,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Submits an AWS Batch job from a job definition. Parameters that are specified during SubmitJob override parameters defined in the job definition. vCPU and memory requirements that are specified in the ResourceRequirements objects in the job definition are the exception. They can't be overridden this way using the memory and vcpus parameters. Rather, you must specify updates to job definition parameters in a ResourceRequirements object that's included in the containerOverrides parameter.

Jobs that run on Fargate resources can't be guaranteed to run for more than 14 days. This is because, after 14 days, Fargate resources might become unavailable and job might be terminated.

" + "documentation":"

Submits an Batch job from a job definition. Parameters that are specified during SubmitJob override parameters defined in the job definition. vCPU and memory requirements that are specified in the ResourceRequirements objects in the job definition are the exception. They can't be overridden this way using the memory and vcpus parameters. Rather, you must specify updates to job definition parameters in a ResourceRequirements object that's included in the containerOverrides parameter.

Jobs that run on Fargate resources can't be guaranteed to run for more than 14 days. This is because, after 14 days, Fargate resources might become unavailable and job might be terminated.

" }, "TagResource":{ "name":"TagResource", @@ -220,7 +220,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource aren't specified in the request parameters, they aren't changed. When a resource is deleted, the tags associated with that resource are deleted as well. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

" + "documentation":"

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource aren't specified in the request parameters, they aren't changed. When a resource is deleted, the tags that are associated with that resource are deleted as well. Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

" }, "TerminateJob":{ "name":"TerminateJob", @@ -248,7 +248,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Deletes specified tags from an AWS Batch resource.

" + "documentation":"

Deletes specified tags from an Batch resource.

" }, "UpdateComputeEnvironment":{ "name":"UpdateComputeEnvironment", @@ -262,7 +262,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Updates an AWS Batch compute environment.

" + "documentation":"

Updates an Batch compute environment.

" }, "UpdateJobQueue":{ "name":"UpdateJobQueue", @@ -300,7 +300,7 @@ "documentation":"

The size of the array job.

" } }, - "documentation":"

An object representing an AWS Batch array job.

" + "documentation":"

An object representing an Batch array job.

" }, "ArrayPropertiesDetail":{ "type":"structure", @@ -362,7 +362,7 @@ }, "logStreamName":{ "shape":"String", - "documentation":"

The name of the CloudWatch Logs log stream associated with the container. The log group for AWS Batch jobs is /aws/batch/job. Each container attempt receives a log stream name when they reach the RUNNING status.

" + "documentation":"

The name of the CloudWatch Logs log stream associated with the container. The log group for Batch jobs is /aws/batch/job. Each container attempt receives a log stream name when they reach the RUNNING status.

" }, "networkInterfaces":{ "shape":"NetworkInterfaceList", @@ -449,11 +449,11 @@ "members":{ "jobId":{ "shape":"String", - "documentation":"

The AWS Batch job ID of the job to cancel.

" + "documentation":"

The Batch job ID of the job to cancel.

" }, "reason":{ "shape":"String", - "documentation":"

A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the AWS Batch activity logs.

" + "documentation":"

A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the Batch activity logs.

" } }, "documentation":"

Contains the parameters for CancelJob.

" @@ -498,11 +498,11 @@ }, "type":{ "shape":"CEType", - "documentation":"

The type of the compute environment: MANAGED or UNMANAGED. For more information, see Compute Environments in the AWS Batch User Guide.

" + "documentation":"

The type of the compute environment: MANAGED or UNMANAGED. For more information, see Compute Environments in the Batch User Guide.

" }, "state":{ "shape":"CEState", - "documentation":"

The state of the compute environment. The valid values are ENABLED or DISABLED.

If the state is ENABLED, then the AWS Batch scheduler can attempt to place jobs from an associated job queue on the compute resources within the environment. If the compute environment is managed, then it can scale its instances out or in automatically, based on the job queue demand.

If the state is DISABLED, then the AWS Batch scheduler doesn't attempt to place jobs within the environment. Jobs in a STARTING or RUNNING state continue to progress normally. Managed compute environments in the DISABLED state don't scale out. However, they scale in to minvCpus value after instances become idle.

" + "documentation":"

The state of the compute environment. The valid values are ENABLED or DISABLED.

If the state is ENABLED, then the Batch scheduler can attempt to place jobs from an associated job queue on the compute resources within the environment. If the compute environment is managed, then it can scale its instances out or in automatically, based on the job queue demand.

If the state is DISABLED, then the Batch scheduler doesn't attempt to place jobs within the environment. Jobs in a STARTING or RUNNING state continue to progress normally. Managed compute environments in the DISABLED state don't scale out. However, they scale in to minvCpus value after instances become idle.

" }, "status":{ "shape":"CEStatus", @@ -514,14 +514,14 @@ }, "computeResources":{ "shape":"ComputeResource", - "documentation":"

The compute resources defined for the compute environment. For more information, see Compute Environments in the AWS Batch User Guide.

" + "documentation":"

The compute resources defined for the compute environment. For more information, see Compute Environments in the Batch User Guide.

" }, "serviceRole":{ "shape":"String", - "documentation":"

The service role associated with the compute environment that allows AWS Batch to make calls to AWS API operations on your behalf. For more information, see AWS Batch service IAM role in the AWS Batch User Guide.

" + "documentation":"

The service role associated with the compute environment that allows Batch to make calls to Amazon Web Services API operations on your behalf. For more information, see Batch service IAM role in the Batch User Guide.

" } }, - "documentation":"

An object representing an AWS Batch compute environment.

" + "documentation":"

An object representing an Batch compute environment.

" }, "ComputeEnvironmentDetailList":{ "type":"list", @@ -543,7 +543,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the compute environment.

" } }, - "documentation":"

The order in which compute environments are tried for job placement within a queue. Compute environments are tried in ascending order. For example, if two compute environments are associated with a job queue, the compute environment with a lower order integer value is tried for job placement first. Compute environments must be in the VALID state before you can associate them with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. AWS Batch doesn't support mixing compute environment architecture types in a single job queue.

" + "documentation":"

The order in which compute environments are tried for job placement within a queue. Compute environments are tried in ascending order. For example, if two compute environments are associated with a job queue, the compute environment with a lower order integer value is tried for job placement first. Compute environments must be in the VALID state before you can associate them with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. Batch doesn't support mixing compute environment architecture types in a single job queue.

" }, "ComputeEnvironmentOrders":{ "type":"list", @@ -559,102 +559,102 @@ "members":{ "type":{ "shape":"CRType", - "documentation":"

The type of compute environment: EC2, SPOT, FARGATE, or FARGATE_SPOT. For more information, see Compute Environments in the AWS Batch User Guide.

If you choose SPOT, you must also specify an Amazon EC2 Spot Fleet role with the spotIamFleetRole parameter. For more information, see Amazon EC2 Spot Fleet role in the AWS Batch User Guide.

" + "documentation":"

The type of compute environment: EC2, SPOT, FARGATE, or FARGATE_SPOT. For more information, see Compute Environments in the Batch User Guide.

If you choose SPOT, you must also specify an Amazon EC2 Spot Fleet role with the spotIamFleetRole parameter. For more information, see Amazon EC2 Spot Fleet role in the Batch User Guide.

" }, "allocationStrategy":{ "shape":"CRAllocationStrategy", - "documentation":"

The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation Strategies in the AWS Batch User Guide.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

BEST_FIT (default)

AWS Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, AWS Batch waits for the additional instances to be available. If there aren't enough instances available, or if the user is hitting Amazon EC2 service limits then additional jobs aren't run until the currently running jobs have completed. This allocation strategy keeps costs lower but can limit scaling. If you are using Spot Fleets with BEST_FIT then the Spot Fleet IAM Role must be specified.

BEST_FIT_PROGRESSIVE

AWS Batch will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the previously selected instance types aren't available, AWS Batch will select new instance types.

SPOT_CAPACITY_OPTIMIZED

AWS Batch will select one or more instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED strategies, AWS Batch might need to go above maxvCpus to meet your capacity requirements. In this event, AWS Batch never exceeds maxvCpus by more than a single instance.

" + "documentation":"

The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation Strategies in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

BEST_FIT (default)

Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available, or if the user is reaching Amazon EC2 service limits then additional jobs aren't run until the currently running jobs have completed. This allocation strategy keeps costs lower but can limit scaling. If you are using Spot Fleets with BEST_FIT then the Spot Fleet IAM Role must be specified.

BEST_FIT_PROGRESSIVE

Batch will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the previously selected instance types aren't available, Batch will select new instance types.

SPOT_CAPACITY_OPTIMIZED

Batch will select one or more instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED strategies, Batch might need to go above maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

" }, "minvCpus":{ "shape":"Integer", - "documentation":"

The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is DISABLED).

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "documentation":"

The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is DISABLED).

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" }, "maxvCpus":{ "shape":"Integer", - "documentation":"

The maximum number of Amazon EC2 vCPUs that a compute environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, AWS Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, AWS Batch never exceeds maxvCpus by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.

" + "documentation":"

The maximum number of Amazon EC2 vCPUs that a compute environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.

" }, "desiredvCpus":{ "shape":"Integer", - "documentation":"

The desired number of Amazon EC2 vCPUS in the compute environment. AWS Batch modifies this value between the minimum and maximum values, based on job queue demand.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "documentation":"

The desired number of Amazon EC2 vCPUS in the compute environment. Batch modifies this value between the minimum and maximum values, based on job queue demand.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" }, "instanceTypes":{ "shape":"StringList", - "documentation":"

The instances types that can be launched. You can specify instance families to launch any instance type within those families (for example, c5 or p3), or you can specify specific sizes within a family (such as c5.8xlarge). You can also choose optimal to select instance types (from the C4, M4, and R4 instance families) that match the demand of your job queues.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

When you create a compute environment, the instance types that you select for the compute environment must share the same architecture. For example, you can't mix x86 and ARM instances in the same compute environment.

Currently, optimal uses instance types from the C4, M4, and R4 instance families. In Regions that don't have instance types from those instance families, instance types from the C5, M5. and R5 instance families are used.

" + "documentation":"

The instances types that can be launched. You can specify instance families to launch any instance type within those families (for example, c5 or p3), or you can specify specific sizes within a family (such as c5.8xlarge). You can also choose optimal to select instance types (from the C4, M4, and R4 instance families) that match the demand of your job queues.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

When you create a compute environment, the instance types that you select for the compute environment must share the same architecture. For example, you can't mix x86 and ARM instances in the same compute environment.

Currently, optimal uses instance types from the C4, M4, and R4 instance families. In Regions that don't have instance types from those instance families, instance types from the C5, M5. and R5 instance families are used.

" }, "imageId":{ "shape":"String", - "documentation":"

The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. This parameter is overridden by the imageIdOverride member of the Ec2Configuration structure.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

The AMI that you choose for a compute environment must match the architecture of the instance types that you intend to use for that compute environment. For example, if your compute environment uses A1 instance types, the compute resource AMI that you choose must support ARM instances. Amazon ECS vends both x86 and ARM versions of the Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see Amazon ECS-optimized Amazon Linux 2 AMI in the Amazon Elastic Container Service Developer Guide.

", + "documentation":"

The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. This parameter is overridden by the imageIdOverride member of the Ec2Configuration structure.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

The AMI that you choose for a compute environment must match the architecture of the instance types that you intend to use for that compute environment. For example, if your compute environment uses A1 instance types, the compute resource AMI that you choose must support ARM instances. Amazon ECS vends both x86 and ARM versions of the Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see Amazon ECS-optimized Amazon Linux 2 AMI in the Amazon Elastic Container Service Developer Guide.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use ec2Configuration[].imageIdOverride instead." }, "subnets":{ "shape":"StringList", - "documentation":"

The VPC subnets into which the compute resources are launched. These subnets must be within the same VPC. Fargate compute resources can contain up to 16 subnets. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

" + "documentation":"

The VPC subnets where the compute resources are launched. These subnets must be within the same VPC. Fargate compute resources can contain up to 16 subnets. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

" }, "securityGroupIds":{ "shape":"StringList", - "documentation":"

The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security groups must be specified, either in securityGroupIds or using a launch template referenced in launchTemplate. This parameter is required for jobs running on Fargate resources and must contain at least one security group. Fargate doesn't support launch templates. If security groups are specified using both securityGroupIds and launchTemplate, the values in securityGroupIds is used.

" + "documentation":"

The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security groups must be specified, either in securityGroupIds or using a launch template referenced in launchTemplate. This parameter is required for jobs that are running on Fargate resources and must contain at least one security group. Fargate doesn't support launch templates. If security groups are specified using both securityGroupIds and launchTemplate, the values in securityGroupIds are used.

" }, "ec2KeyPair":{ "shape":"String", - "documentation":"

The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "documentation":"

The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" }, "instanceRole":{ "shape":"String", - "documentation":"

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS Instance Role in the AWS Batch User Guide.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "documentation":"

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS Instance Role in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" }, "tags":{ "shape":"TagsMap", - "documentation":"

Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For AWS Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value−for example, { \"Name\": \"AWS Batch Instance - C4OnDemand\" }. This is helpful for recognizing your AWS Batch instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment has been created; any changes require creating a new compute environment and removing the old compute environment. These tags aren't seen when using the AWS Batch ListTagsForResource API operation.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "documentation":"

Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value−for example, { \"Name\": \"Batch Instance - C4OnDemand\" }. This is helpful for recognizing your Batch instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment is created.Aany changes to these tags require that you create a new compute environment and remove the old compute environment. These tags aren't seen when using the Batch ListTagsForResource API operation.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" }, "placementGroup":{ "shape":"String", - "documentation":"

The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel jobs to your compute environment, you should consider creating a cluster placement group and associate it with your compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single Availability Zone with high network flow potential. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "documentation":"

The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel jobs to your compute environment, you should consider creating a cluster placement group and associate it with your compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single Availability Zone with high network flow potential. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" }, "bidPercentage":{ "shape":"Integer", - "documentation":"

The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand price.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "documentation":"

The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand price.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" }, "spotIamFleetRole":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. This role is required if the allocation strategy set to BEST_FIT or if the allocation strategy isn't specified. For more information, see Amazon EC2 Spot Fleet Role in the AWS Batch User Guide.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

To tag your Spot Instances on creation, the Spot Fleet IAM role specified here must use the newer AmazonEC2SpotFleetTaggingRole managed policy. The previously recommended AmazonEC2SpotFleetRole managed policy doesn't have the required permissions to tag Spot Instances. For more information, see Spot Instances not tagged on creation in the AWS Batch User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. This role is required if the allocation strategy set to BEST_FIT or if the allocation strategy isn't specified. For more information, see Amazon EC2 Spot Fleet Role in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

To tag your Spot Instances on creation, the Spot Fleet IAM role specified here must use the newer AmazonEC2SpotFleetTaggingRole managed policy. The previously recommended AmazonEC2SpotFleetRole managed policy doesn't have the required permissions to tag Spot Instances. For more information, see Spot Instances not tagged on creation in the Batch User Guide.

" }, "launchTemplate":{ "shape":"LaunchTemplateSpecification", - "documentation":"

The launch template to use for your compute resources. Any other compute resource parameters that you specify in a CreateComputeEnvironment API operation override the same parameters in the launch template. You must specify either the launch template ID or launch template name in the request, but not both. For more information, see Launch Template Support in the AWS Batch User Guide.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "documentation":"

The launch template to use for your compute resources. Any other compute resource parameters that you specify in a CreateComputeEnvironment API operation override the same parameters in the launch template. You must specify either the launch template ID or launch template name in the request, but not both. For more information, see Launch Template Support in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" }, "ec2Configuration":{ "shape":"Ec2ConfigurationList", - "documentation":"

Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment. If Ec2Configuration isn't specified, the default is ECS_AL1.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "documentation":"

Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment. If Ec2Configuration isn't specified, the default is ECS_AL1.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" } }, - "documentation":"

An object representing an AWS Batch compute resource. For more information, see Compute Environments in the AWS Batch User Guide.

" + "documentation":"

An object representing an Batch compute resource. For more information, see Compute Environments in the Batch User Guide.

" }, "ComputeResourceUpdate":{ "type":"structure", "members":{ "minvCpus":{ "shape":"Integer", - "documentation":"

The minimum number of Amazon EC2 vCPUs that an environment should maintain.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "documentation":"

The minimum number of Amazon EC2 vCPUs that an environment should maintain.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" }, "maxvCpus":{ "shape":"Integer", - "documentation":"

The maximum number of Amazon EC2 vCPUs that an environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, AWS Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, AWS Batch never exceeds maxvCpus by more than a single instance. That is, no more than a single instance from among those specified in your compute environment.

" + "documentation":"

The maximum number of Amazon EC2 vCPUs that an environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. That is, no more than a single instance from among those specified in your compute environment.

" }, "desiredvCpus":{ "shape":"Integer", - "documentation":"

The desired number of Amazon EC2 vCPUS in the compute environment.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "documentation":"

The desired number of Amazon EC2 vCPUS in the compute environment.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" }, "subnets":{ "shape":"StringList", - "documentation":"

The VPC subnets that the compute resources are launched into. Fargate compute resources can contain up to 16 subnets. Providing an empty list will be handled as if this parameter wasn't specified and no change is made. This can't be specified for EC2 compute resources. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

" + "documentation":"

The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16 subnets. Providing an empty list will be handled as if this parameter wasn't specified and no change is made. This can't be specified for EC2 compute resources. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

" }, "securityGroupIds":{ "shape":"StringList", "documentation":"

The Amazon EC2 security groups associated with instances launched in the compute environment. This parameter is required for Fargate compute resources, where it can contain up to 5 security groups. This can't be specified for EC2 compute resources. Providing an empty list is handled as if this parameter wasn't specified and no change is made.

" } }, - "documentation":"

An object representing the attributes of a compute environment that can be updated. For more information, see Compute Environments in the AWS Batch User Guide.

" + "documentation":"

An object representing the attributes of a compute environment that can be updated. For more information, see Compute Environments in the Batch User Guide.

" }, "ContainerDetail":{ "type":"structure", @@ -681,7 +681,7 @@ }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For more information, see AWS Batch execution IAM role in the AWS Batch User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the execution role that Batch can assume. For more information, see Batch execution IAM role in the Batch User Guide.

" }, "volumes":{ "shape":"Volumes", @@ -689,7 +689,7 @@ }, "environment":{ "shape":"EnvironmentVariables", - "documentation":"

The environment variables to pass to a container.

Environment variables must not start with AWS_BATCH; this naming convention is reserved for variables that are set by the AWS Batch service.

" + "documentation":"

The environment variables to pass to a container.

Environment variables must not start with AWS_BATCH; this naming convention is reserved for variables that are set by the Batch service.

" }, "mountPoints":{ "shape":"MountPoints", @@ -701,11 +701,11 @@ }, "ulimits":{ "shape":"Ulimits", - "documentation":"

A list of ulimit values to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run.

This parameter isn't applicable to jobs running on Fargate resources.

" + "documentation":"

A list of ulimit values to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run.

This parameter isn't applicable to jobs that are running on Fargate resources.

" }, "privileged":{ "shape":"Boolean", - "documentation":"

When this parameter is true, the container is given elevated permissions on the host container instance (similar to the root user). The default value is false.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided, or specified as false.

" + "documentation":"

When this parameter is true, the container is given elevated permissions on the host container instance (similar to the root user). The default value is false.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided, or specified as false.

" }, "user":{ "shape":"String", @@ -729,11 +729,11 @@ }, "logStreamName":{ "shape":"String", - "documentation":"

The name of the CloudWatch Logs log stream associated with the container. The log group for AWS Batch jobs is /aws/batch/job. Each container attempt receives a log stream name when they reach the RUNNING status.

" + "documentation":"

The name of the CloudWatch Logs log stream associated with the container. The log group for Batch jobs is /aws/batch/job. Each container attempt receives a log stream name when they reach the RUNNING status.

" }, "instanceType":{ "shape":"String", - "documentation":"

The instance type of the underlying host infrastructure of a multi-node parallel job.

This parameter isn't applicable to jobs running on Fargate resources.

" + "documentation":"

The instance type of the underlying host infrastructure of a multi-node parallel job.

This parameter isn't applicable to jobs that are running on Fargate resources.

" }, "networkInterfaces":{ "shape":"NetworkInterfaceList", @@ -749,19 +749,19 @@ }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance. Or, alternatively, it must be configured on a different log server for remote logging options. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers might be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance. Or, alternatively, it must be configured on a different log server for remote logging options. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers might be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "secrets":{ "shape":"SecretList", - "documentation":"

The secrets to pass to the container. For more information, see Specifying sensitive data in the AWS Batch User Guide.

" + "documentation":"

The secrets to pass to the container. For more information, see Specifying sensitive data in the Batch User Guide.

" }, "networkConfiguration":{ "shape":"NetworkConfiguration", - "documentation":"

The network configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" + "documentation":"

The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.

" }, "fargatePlatformConfiguration":{ "shape":"FargatePlatformConfiguration", - "documentation":"

The platform configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" + "documentation":"

The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.

" } }, "documentation":"

An object representing the details of a container that's part of a job.

" @@ -771,13 +771,13 @@ "members":{ "vcpus":{ "shape":"Integer", - "documentation":"

This parameter indicates the number of vCPUs reserved for the container.It overrides the vcpus parameter that's set in the job definition, but doesn't override any vCPU requirement specified in the resourceRequirement structure in the job definition.

This parameter is supported for jobs that run on EC2 resources, but isn't supported for jobs that run on Fargate resources. For Fargate resources, you can only use resourceRequirement. For EC2 resources, you can use either this parameter or resourceRequirement but not both.

This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU.

This parameter isn't applicable to jobs that run on Fargate resources and shouldn't be provided. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

", + "documentation":"

This parameter indicates the number of vCPUs reserved for the container.It overrides the vcpus parameter that's set in the job definition, but doesn't override any vCPU requirement specified in the resourceRequirement structure in the job definition. To override vCPU requirements that are specified in the ResourceRequirement structure in the job definition, ResourceRequirement must be specified in the SubmitJob request, with type set to VCPU and value set to the new value.

This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU.

This parameter is supported for jobs that run on EC2 resources, but isn't supported for jobs that run on Fargate resources. For Fargate resources, you can only use resourceRequirement. For EC2 resources, you can use either this parameter or resourceRequirement but not both.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, "memory":{ "shape":"Integer", - "documentation":"

This parameter indicates the amount of memory (in MiB) that's reserved for the job. It overrides the memory parameter set in the job definition, but doesn't override any memory requirement specified in the ResourceRequirement structure in the job definition.

This parameter is supported for jobs that run on EC2 resources, but isn't supported for jobs that run on Fargate resources. For these resources, use resourceRequirement instead.

", + "documentation":"

This parameter indicates the amount of memory (in MiB) that's reserved for the job. It overrides the memory parameter set in the job definition, but doesn't override any memory requirement specified in the ResourceRequirement structure in the job definition. To override memory requirements that are specified in the ResourceRequirement structure in the job definition, ResourceRequirement must be specified in the SubmitJob request, with type set to MEMORY and value set to the new value.

This parameter is supported for jobs that run on EC2 resources, but isn't supported for jobs that run on Fargate resources. For these resources, use resourceRequirement instead.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, @@ -787,11 +787,11 @@ }, "instanceType":{ "shape":"String", - "documentation":"

The instance type to use for a multi-node parallel job.

This parameter isn't applicable to single-node container jobs or for jobs running on Fargate resources and shouldn't be provided.

" + "documentation":"

The instance type to use for a multi-node parallel job.

This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided.

" }, "environment":{ "shape":"EnvironmentVariables", - "documentation":"

The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the job definition.

Environment variables must not start with AWS_BATCH; this naming convention is reserved for variables that are set by the AWS Batch service.

" + "documentation":"

The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the job definition.

Environment variables must not start with AWS_BATCH; this naming convention is reserved for variables that are set by the Batch service.

" }, "resourceRequirements":{ "shape":"ResourceRequirements", @@ -809,13 +809,13 @@ }, "vcpus":{ "shape":"Integer", - "documentation":"

The number of vCPUs reserved for the job. Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. The number of vCPUs must be specified but can be be specified in several places. You must specify it at least once for each node.

This parameter is supported on EC2 resources but isn't supported for jobs that run on Fargate resources. For these resources, use resourceRequirement instead. You can use this parameter or resourceRequirements structure but not both.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

", + "documentation":"

The number of vCPUs reserved for the job. Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. The number of vCPUs must be specified but can be specified in several places. You must specify it at least once for each node.

This parameter is supported on EC2 resources but isn't supported for jobs that run on Fargate resources. For these resources, use resourceRequirement instead. You can use this parameter or resourceRequirements structure but not both.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, "memory":{ "shape":"Integer", - "documentation":"

This parameter indicates the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it is terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.

This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

This parameter is supported on EC2 resources but isn't supported on Fargate resources. For Fargate resources, you should specify the memory requirement using resourceRequirement. You can do this for EC2 resources.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

", + "documentation":"

This parameter indicates the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.

This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

This parameter is supported on EC2 resources but isn't supported on Fargate resources. For Fargate resources, you should specify the memory requirement using resourceRequirement. You can also do this for EC2 resources.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the Batch User Guide.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, @@ -825,11 +825,11 @@ }, "jobRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that the container can assume for Amazon Web Services permissions. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see AWS Batch execution IAM role in the AWS Batch User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the execution role that Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see Batch execution IAM role in the Batch User Guide.

" }, "volumes":{ "shape":"Volumes", @@ -837,7 +837,7 @@ }, "environment":{ "shape":"EnvironmentVariables", - "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We don't recommend using plaintext environment variables for sensitive information, such as credential data.

Environment variables must not start with AWS_BATCH; this naming convention is reserved for variables that are set by the AWS Batch service.

" + "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We don't recommend using plaintext environment variables for sensitive information, such as credential data.

Environment variables must not start with AWS_BATCH; this naming convention is reserved for variables that are set by the Batch service.

" }, "mountPoints":{ "shape":"MountPoints", @@ -849,11 +849,11 @@ }, "privileged":{ "shape":"Boolean", - "documentation":"

When this parameter is true, the container is given elevated permissions on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run. The default value is false.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided, or specified as false.

" + "documentation":"

When this parameter is true, the container is given elevated permissions on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run. The default value is false.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided, or specified as false.

" }, "ulimits":{ "shape":"Ulimits", - "documentation":"

A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" + "documentation":"

A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.

" }, "user":{ "shape":"String", @@ -861,7 +861,7 @@ }, "instanceType":{ "shape":"String", - "documentation":"

The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use the same instance type.

This parameter isn't applicable to single-node container jobs or for jobs that run on Fargate resources and shouldn't be provided.

" + "documentation":"

The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use the same instance type.

This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided.

" }, "resourceRequirements":{ "shape":"ResourceRequirements", @@ -873,19 +873,19 @@ }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type).

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type).

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "secrets":{ "shape":"SecretList", - "documentation":"

The secrets for the container. For more information, see Specifying sensitive data in the AWS Batch User Guide.

" + "documentation":"

The secrets for the container. For more information, see Specifying sensitive data in the Batch User Guide.

" }, "networkConfiguration":{ "shape":"NetworkConfiguration", - "documentation":"

The network configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" + "documentation":"

The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.

" }, "fargatePlatformConfiguration":{ "shape":"FargatePlatformConfiguration", - "documentation":"

The platform configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" + "documentation":"

The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.

" } }, "documentation":"

Container properties are used in job definitions to describe the container that's launched as part of a job.

" @@ -917,23 +917,23 @@ }, "type":{ "shape":"CEType", - "documentation":"

The type of the compute environment: MANAGED or UNMANAGED. For more information, see Compute Environments in the AWS Batch User Guide.

" + "documentation":"

The type of the compute environment: MANAGED or UNMANAGED. For more information, see Compute Environments in the Batch User Guide.

" }, "state":{ "shape":"CEState", - "documentation":"

The state of the compute environment. If the state is ENABLED, then the compute environment accepts jobs from a queue and can scale out automatically based on queues.

If the state is ENABLED, then the AWS Batch scheduler can attempt to place jobs from an associated job queue on the compute resources within the environment. If the compute environment is managed, then it can scale its instances out or in automatically, based on the job queue demand.

If the state is DISABLED, then the AWS Batch scheduler doesn't attempt to place jobs within the environment. Jobs in a STARTING or RUNNING state continue to progress normally. Managed compute environments in the DISABLED state don't scale out. However, they scale in to minvCpus value after instances become idle.

" + "documentation":"

The state of the compute environment. If the state is ENABLED, then the compute environment accepts jobs from a queue and can scale out automatically based on queues.

If the state is ENABLED, then the Batch scheduler can attempt to place jobs from an associated job queue on the compute resources within the environment. If the compute environment is managed, then it can scale its instances out or in automatically, based on the job queue demand.

If the state is DISABLED, then the Batch scheduler doesn't attempt to place jobs within the environment. Jobs in a STARTING or RUNNING state continue to progress normally. Managed compute environments in the DISABLED state don't scale out. However, they scale in to minvCpus value after instances become idle.

" }, "computeResources":{ "shape":"ComputeResource", - "documentation":"

Details about the compute resources managed by the compute environment. This parameter is required for managed compute environments. For more information, see Compute Environments in the AWS Batch User Guide.

" + "documentation":"

Details about the compute resources managed by the compute environment. This parameter is required for managed compute environments. For more information, see Compute Environments in the Batch User Guide.

" }, "serviceRole":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. For more information, see AWS Batch service IAM role in the AWS Batch User Guide.

If your account has already created the AWS Batch service-linked role, that role is used by default for your compute environment unless you specify a role here. If the AWS Batch service-linked role does not exist in your account, and no role is specified here, the service will try to create the AWS Batch service-linked role in your account.

If your specified role has a path other than /, then you must specify either the full role ARN (recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly names and paths in the IAM User Guide.

Depending on how you created your AWS Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. For more information, see Batch service IAM role in the Batch User Guide.

If your account already created the Batch service-linked role, that role is used by default for your compute environment unless you specify a different role here. If the Batch service-linked role doesn't exist in your account, and no role is specified here, the service attempts to create the Batch service-linked role in your account.

If your specified role has a path other than /, then you must specify either the full role ARN (recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly names and paths in the IAM User Guide.

Depending on how you created your Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

" }, "tags":{ "shape":"TagrisTagsMap", - "documentation":"

The tags that you apply to the compute environment to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS General Reference.

These tags can be updated or removed using the TagResource and UntagResource API operations. These tags don't propagate to the underlying compute resources.

" + "documentation":"

The tags that you apply to the compute environment to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General Reference.

These tags can be updated or removed using the TagResource and UntagResource API operations. These tags don't propagate to the underlying compute resources.

" } }, "documentation":"

Contains the parameters for CreateComputeEnvironment.

" @@ -969,15 +969,15 @@ }, "priority":{ "shape":"Integer", - "documentation":"

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments cannot be mixed.

" + "documentation":"

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

" }, "computeEnvironmentOrder":{ "shape":"ComputeEnvironmentOrders", - "documentation":"

The set of compute environments mapped to a job queue and their order relative to each other. The job scheduler uses this parameter to determine which compute environment should run a specific job. Compute environments must be in the VALID state before you can associate them with a job queue. You can associate up to three compute environments with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. AWS Batch doesn't support mixing compute environment architecture types in a single job queue.

" + "documentation":"

The set of compute environments mapped to a job queue and their order relative to each other. The job scheduler uses this parameter to determine which compute environment should run a specific job. Compute environments must be in the VALID state before you can associate them with a job queue. You can associate up to three compute environments with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. Batch doesn't support mixing compute environment architecture types in a single job queue.

" }, "tags":{ "shape":"TagrisTagsMap", - "documentation":"

The tags that you apply to the job queue to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging your AWS Batch resources in AWS Batch User Guide.

" + "documentation":"

The tags that you apply to the job queue to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging your Batch resources in Batch User Guide.

" } }, "documentation":"

Contains the parameters for CreateJobQueue.

" @@ -1082,7 +1082,7 @@ "members":{ "jobDefinitions":{ "shape":"StringList", - "documentation":"

A list of up to 100 job definition names or full Amazon Resource Name (ARN) entries.

" + "documentation":"

A list of up to 100 job definitions. Each entry in the list can either be an ARN of the form arn:aws:batch:${Region}:${Account}:job-definition/${JobDefinitionName}:${Revision} or a short version using the form ${JobDefinitionName}:${Revision}.

" }, "maxResults":{ "shape":"Integer", @@ -1177,14 +1177,14 @@ }, "containerPath":{ "shape":"String", - "documentation":"

The path inside the container used to expose the host device. By default, the hostPath value is used.

" + "documentation":"

The path inside the container that's used to expose the host device. By default, the hostPath value is used.

" }, "permissions":{ "shape":"DeviceCgroupPermissions", "documentation":"

The explicit permissions to provide to the container for the device. By default, the container has permissions for read, write, and mknod for the device.

" } }, - "documentation":"

An object representing a container instance host device.

This object isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" + "documentation":"

An object representing a container instance host device.

This object isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.

" }, "DeviceCgroupPermission":{ "type":"string", @@ -1211,7 +1211,7 @@ }, "iam":{ "shape":"EFSAuthorizationConfigIAM", - "documentation":"

Whether or not to use the AWS Batch execution IAM role defined in a job definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the EFSVolumeConfiguration. If this parameter is omitted, the default value of DISABLED is used. For more information, see Using Amazon EFS Access Points in the AWS Batch User Guide. EFS IAM authorization requires that TransitEncryption be ENABLED and that a JobRoleArn is specified.

" + "documentation":"

Whether or not to use the Batch job IAM role defined in a job definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the EFSVolumeConfiguration. If this parameter is omitted, the default value of DISABLED is used. For more information, see Using Amazon EFS Access Points in the Batch User Guide. EFS IAM authorization requires that TransitEncryption be ENABLED and that a JobRoleArn is specified.

" } }, "documentation":"

The authorization configuration details for the Amazon EFS file system.

" @@ -1240,22 +1240,22 @@ }, "rootDirectory":{ "shape":"String", - "documentation":"

The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume will be used. Specifying / will have the same effect as omitting this parameter.

If an EFS access point is specified in the authorizationConfig, the root directory parameter must either be omitted or set to / which will enforce the path set on the Amazon EFS access point.

" + "documentation":"

The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume is used instead. Specifying / has the same effect as omitting this parameter. The maximum length is 4,096 characters.

If an EFS access point is specified in the authorizationConfig, the root directory parameter must either be omitted or set to /, which enforces the path set on the Amazon EFS access point.

" }, "transitEncryption":{ "shape":"EFSTransitEncryption", - "documentation":"

Whether or not to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of DISABLED is used. For more information, see Encrypting data in transit in the Amazon Elastic File System User Guide.

" + "documentation":"

Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of DISABLED is used. For more information, see Encrypting data in transit in the Amazon Elastic File System User Guide.

" }, "transitEncryptionPort":{ "shape":"Integer", - "documentation":"

The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses. For more information, see EFS Mount Helper in the Amazon Elastic File System User Guide.

" + "documentation":"

The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be between 0 and 65,535. For more information, see EFS Mount Helper in the Amazon Elastic File System User Guide.

" }, "authorizationConfig":{ "shape":"EFSAuthorizationConfig", "documentation":"

The authorization configuration details for the Amazon EFS file system.

" } }, - "documentation":"

This parameter is specified when you are using an Amazon Elastic File System file system for task storage. For more information, see Amazon EFS Volumes in the AWS Batch User Guide.

" + "documentation":"

This is used when you're using an Amazon Elastic File System file system for job storage. For more information, see Amazon EFS Volumes in the Batch User Guide.

" }, "Ec2Configuration":{ "type":"structure", @@ -1263,14 +1263,14 @@ "members":{ "imageType":{ "shape":"ImageType", - "documentation":"

The image type to match with the instance type to select an AMI. If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized AMI (ECS_AL1) is used. Starting on March 31, 2021, this default will be changing to ECS_AL2 (Amazon Linux 2).

ECS_AL2

Amazon Linux 2− Default for all AWS Graviton-based instance families (for example, C6g, M6g, R6g, and T4g) and can be used for all non-GPU instance types.

ECS_AL2_NVIDIA

Amazon Linux 2 (GPU)−Default for all GPU instance families (for example P4 and G4) and can be used for all non-AWS Graviton-based instance types.

ECS_AL1

Amazon Linux−Default for all non-GPU, non-AWS Graviton instance families. Amazon Linux is reaching the end-of-life of standard support. For more information, see Amazon Linux AMI.

" + "documentation":"

The image type to match with the instance type to select an AMI. If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized AMI (ECS_AL1) is used. Starting on March 31, 2021, this default will be changing to ECS_AL2 (Amazon Linux 2).

ECS_AL2

Amazon Linux 2− Default for all Amazon Web Services Graviton-based instance families (for example, C6g, M6g, R6g, and T4g) and can be used for all non-GPU instance types.

ECS_AL2_NVIDIA

Amazon Linux 2 (GPU)−Default for all GPU instance families (for example P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

ECS_AL1

Amazon Linux−Default for all non-GPU, non Amazon Web Services Graviton instance families. Amazon Linux is reaching the end-of-life of standard support. For more information, see Amazon Linux AMI.

" }, "imageIdOverride":{ "shape":"ImageIdOverride", "documentation":"

The AMI ID used for instances launched in the compute environment that match the image type. This setting overrides the imageId set in the computeResource object.

" } }, - "documentation":"

Provides information used to select Amazon Machine Images (AMIs) for instances in the compute environment. If Ec2Configuration isn't specified, the default is currently ECS_AL1 (Amazon Linux) for non-GPU, non-Graviton instances. Starting on March 31, 2021, this default will be changing to ECS_AL2 (Amazon Linux 2).

This object isn't applicable to jobs running on Fargate resources.

" + "documentation":"

Provides information used to select Amazon Machine Images (AMIs) for instances in the compute environment. If Ec2Configuration isn't specified, the default is currently ECS_AL1 (Amazon Linux) for non-GPU, non AWSGraviton instances. Starting on March 31, 2021, this default will be changing to ECS_AL2 (Amazon Linux 2).

This object isn't applicable to jobs that are running on Fargate resources.

" }, "Ec2ConfigurationList":{ "type":"list", @@ -1286,15 +1286,15 @@ "members":{ "onStatusReason":{ "shape":"String", - "documentation":"

Contains a glob pattern to match against the StatusReason returned for a job. The pattern can be up to 512 characters long, and can contain letters, numbers, periods (.), colons (:), and white space (including spaces or tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" + "documentation":"

Contains a glob pattern to match against the StatusReason returned for a job. The pattern can be up to 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including spaces or tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" }, "onReason":{ "shape":"String", - "documentation":"

Contains a glob pattern to match against the Reason returned for a job. The pattern can be up to 512 characters long, and can contain letters, numbers, periods (.), colons (:), and white space (including spaces and tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" + "documentation":"

Contains a glob pattern to match against the Reason returned for a job. The pattern can be up to 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including spaces and tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" }, "onExitCode":{ "shape":"String", - "documentation":"

Contains a glob pattern to match against the decimal representation of the ExitCode returned for a job. The pattern can be up to 512 characters long, can contain only numbers, and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" + "documentation":"

Contains a glob pattern to match against the decimal representation of the ExitCode returned for a job. The pattern can be up to 512 characters in length. It can contain only numbers, and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" }, "action":{ "shape":"RetryAction", @@ -1312,10 +1312,10 @@ "members":{ "platformVersion":{ "shape":"String", - "documentation":"

The AWS Fargate platform version where the jobs are running. A platform version is specified only for jobs running on Fargate resources. If one isn't specified, the LATEST platform version is used by default. This uses a recent, approved version of the AWS Fargate platform for compute resources. For more information, see AWS Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. If one isn't specified, the LATEST platform version is used by default. This uses a recent, approved version of the Fargate platform for compute resources. For more information, see Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

The platform configuration for jobs running on Fargate resources. For jobs that run on EC2 resources, you shouldn't specify this parameter.

" + "documentation":"

The platform configuration for jobs that are running on Fargate resources. Jobs that run on EC2 resources must not specify this parameter.

" }, "Host":{ "type":"structure", @@ -1383,11 +1383,11 @@ }, "type":{ "shape":"String", - "documentation":"

The type of job definition. If the job is run on Fargate resources, then multinode isn't supported. For more information about multi-node parallel jobs, see Creating a multi-node parallel job definition in the AWS Batch User Guide.

" + "documentation":"

The type of job definition. If the job is run on Fargate resources, then multinode isn't supported. For more information about multi-node parallel jobs, see Creating a multi-node parallel job definition in the Batch User Guide.

" }, "parameters":{ "shape":"ParametersMap", - "documentation":"

Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are specified as a key-value pair mapping. Parameters in a SubmitJob request override any corresponding parameter defaults from the job definition. For more information about specifying parameters, see Job Definition Parameters in the AWS Batch User Guide.

" + "documentation":"

Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are specified as a key-value pair mapping. Parameters in a SubmitJob request override any corresponding parameter defaults from the job definition. For more information about specifying parameters, see Job Definition Parameters in the Batch User Guide.

" }, "retryStrategy":{ "shape":"RetryStrategy", @@ -1399,7 +1399,7 @@ }, "timeout":{ "shape":"JobTimeout", - "documentation":"

The timeout configuration for jobs that are submitted with this job definition. You can specify a timeout duration after which AWS Batch terminates your jobs if they haven't finished.

" + "documentation":"

The timeout configuration for jobs that are submitted with this job definition. You can specify a timeout duration after which Batch terminates your jobs if they haven't finished.

" }, "nodeProperties":{ "shape":"NodeProperties", @@ -1418,7 +1418,7 @@ "documentation":"

The platform capabilities required by the job definition. If no value is specified, it defaults to EC2. Jobs run on Fargate resources specify FARGATE.

" } }, - "documentation":"

An object representing an AWS Batch job definition.

" + "documentation":"

An object representing an Batch job definition.

" }, "JobDefinitionList":{ "type":"list", @@ -1436,14 +1436,14 @@ "members":{ "jobId":{ "shape":"String", - "documentation":"

The job ID of the AWS Batch job associated with this dependency.

" + "documentation":"

The job ID of the Batch job associated with this dependency.

" }, "type":{ "shape":"ArrayJobDependency", "documentation":"

The type of the job dependency.

" } }, - "documentation":"

An object representing an AWS Batch job dependency.

" + "documentation":"

An object representing an Batch job dependency.

" }, "JobDependencyList":{ "type":"list", @@ -1478,7 +1478,7 @@ }, "status":{ "shape":"JobStatus", - "documentation":"

The current status for the job.

If your jobs don't progress to STARTING, see Jobs Stuck in RUNNABLE Status in the troubleshooting section of the AWS Batch User Guide.

" + "documentation":"

The current status for the job.

If your jobs don't progress to STARTING, see Jobs Stuck in RUNNABLE Status in the troubleshooting section of the Batch User Guide.

" }, "attempts":{ "shape":"AttemptDetails", @@ -1526,7 +1526,7 @@ }, "nodeProperties":{ "shape":"NodeProperties", - "documentation":"

An object representing the node properties of a multi-node parallel job.

This isn't applicable to jobs running on Fargate resources.

" + "documentation":"

An object representing the node properties of a multi-node parallel job.

This isn't applicable to jobs that are running on Fargate resources.

" }, "arrayProperties":{ "shape":"ArrayPropertiesDetail", @@ -1549,7 +1549,7 @@ "documentation":"

The platform capabilities required by the job definition. If no value is specified, it defaults to EC2. Jobs run on Fargate resources specify FARGATE.

" } }, - "documentation":"

An object representing an AWS Batch job.

" + "documentation":"

An object representing an Batch job.

" }, "JobDetailList":{ "type":"list", @@ -1595,10 +1595,10 @@ }, "tags":{ "shape":"TagrisTagsMap", - "documentation":"

The tags applied to the job queue. For more information, see Tagging your AWS Batch resources in AWS Batch User Guide.

" + "documentation":"

The tags applied to the job queue. For more information, see Tagging your Batch resources in Batch User Guide.

" } }, - "documentation":"

An object representing the details of an AWS Batch job queue.

" + "documentation":"

An object representing the details of an Batch job queue.

" }, "JobQueueDetailList":{ "type":"list", @@ -1665,7 +1665,11 @@ }, "nodeProperties":{ "shape":"NodePropertiesSummary", - "documentation":"

The node properties for a single node in a job summary list.

This isn't applicable to jobs running on Fargate resources.

" + "documentation":"

The node properties for a single node in a job summary list.

This isn't applicable to jobs that are running on Fargate resources.

" + }, + "jobDefinition":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the job definition.

" } }, "documentation":"

An object representing summary details of a job.

" @@ -1679,7 +1683,7 @@ "members":{ "attemptDurationSeconds":{ "shape":"Integer", - "documentation":"

The time duration in seconds (measured from the job attempt's startedAt timestamp) after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is 60 seconds.

" + "documentation":"

The time duration in seconds (measured from the job attempt's startedAt timestamp) after which Batch terminates your jobs if they have not finished. The minimum value for the timeout is 60 seconds.

" } }, "documentation":"

An object representing a job timeout configuration.

" @@ -1698,6 +1702,20 @@ }, "documentation":"

A key-value pair object.

" }, + "KeyValuesPair":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of the filter. Filter names are case sensitive.

" + }, + "values":{ + "shape":"StringList", + "documentation":"

The filter values.

" + } + }, + "documentation":"

A filter name and value pair that's used to return a more specific list of results from a ListJobs API operation.

" + }, "LaunchTemplateSpecification":{ "type":"structure", "members":{ @@ -1711,17 +1729,17 @@ }, "version":{ "shape":"String", - "documentation":"

The version number of the launch template, $Latest, or $Default.

If the value is $Latest, the latest version of the launch template is used. If the value is $Default, the default version of the launch template is used.

After the compute environment is created, the launch template version used will not be changed, even if the $Default or $Latest version for the launch template is updated. To use a new launch template version, create a new compute environment, add the new compute environment to the existing job queue, remove the old compute environment from the job queue, and delete the old compute environment.

Default: $Default.

" + "documentation":"

The version number of the launch template, $Latest, or $Default.

If the value is $Latest, the latest version of the launch template is used. If the value is $Default, the default version of the launch template is used.

After the compute environment is created, the launch template version that's used isn't changed, even if the $Default or $Latest version for the launch template is updated. To use a new launch template version, create a new compute environment, add the new compute environment to the existing job queue, remove the old compute environment from the job queue, and delete the old compute environment.

Default: $Default.

" } }, - "documentation":"

An object representing a launch template associated with a compute resource. You must specify either the launch template ID or launch template name in the request, but not both.

If security groups are specified using both the securityGroupIds parameter of CreateComputeEnvironment and the launch template, the values in the securityGroupIds parameter of CreateComputeEnvironment will be used.

This object isn't applicable to jobs running on Fargate resources.

" + "documentation":"

An object representing a launch template associated with a compute resource. You must specify either the launch template ID or launch template name in the request, but not both.

If security groups are specified using both the securityGroupIds parameter of CreateComputeEnvironment and the launch template, the values in the securityGroupIds parameter of CreateComputeEnvironment will be used.

This object isn't applicable to jobs that are running on Fargate resources.

" }, "LinuxParameters":{ "type":"structure", "members":{ "devices":{ "shape":"DevicesList", - "documentation":"

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" + "documentation":"

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.

" }, "initProcessEnabled":{ "shape":"Boolean", @@ -1729,23 +1747,27 @@ }, "sharedMemorySize":{ "shape":"Integer", - "documentation":"

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" + "documentation":"

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.

" }, "tmpfs":{ "shape":"TmpfsList", - "documentation":"

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" + "documentation":"

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.

" }, "maxSwap":{ "shape":"Integer", - "documentation":"

The total amount of swap memory (in MiB) a container can use. This parameter is translated to the --memory-swap option to docker run where the value is the sum of the container memory plus the maxSwap value. For more information, see --memory-swap details in the Docker documentation.

If a maxSwap value of 0 is specified, the container doesn't use swap. Accepted values are 0 or any positive integer. If the maxSwap parameter is omitted, the container doesn't use the swap configuration for the container instance it is running on. A maxSwap value must be set for the swappiness parameter to be used.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" + "documentation":"

The total amount of swap memory (in MiB) a container can use. This parameter is translated to the --memory-swap option to docker run where the value is the sum of the container memory plus the maxSwap value. For more information, see --memory-swap details in the Docker documentation.

If a maxSwap value of 0 is specified, the container doesn't use swap. Accepted values are 0 or any positive integer. If the maxSwap parameter is omitted, the container doesn't use the swap configuration for the container instance it is running on. A maxSwap value must be set for the swappiness parameter to be used.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.

" }, "swappiness":{ "shape":"Integer", - "documentation":"

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 causes swapping not to happen unless absolutely necessary. A swappiness value of 100 causes pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter isn't specified, a default value of 60 is used. If a value isn't specified for maxSwap then this parameter is ignored. If maxSwap is set to 0, the container doesn't use swap. This parameter maps to the --memory-swappiness option to docker run.

Consider the following when you use a per-container swap configuration.

  • Swap space must be enabled and allocated on the container instance for the containers to use.

    The Amazon ECS optimized AMIs don't have swap enabled by default. You must enable swap on the instance to use this feature. For more information, see Instance Store Swap Volumes in the Amazon EC2 User Guide for Linux Instances or How do I allocate memory to work as swap space in an Amazon EC2 instance by using a swap file?

  • The swap space parameters are only supported for job definitions using EC2 resources.

  • If the maxSwap and swappiness parameters are omitted from a job definition, each container will have a default swappiness value of 60, and the total swap usage will be limited to two times the memory reservation of the container.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" + "documentation":"

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 causes swapping not to happen unless absolutely necessary. A swappiness value of 100 causes pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter isn't specified, a default value of 60 is used. If a value isn't specified for maxSwap, then this parameter is ignored. If maxSwap is set to 0, the container doesn't use swap. This parameter maps to the --memory-swappiness option to docker run.

Consider the following when you use a per-container swap configuration.

  • Swap space must be enabled and allocated on the container instance for the containers to use.

    The Amazon ECS optimized AMIs don't have swap enabled by default. You must enable swap on the instance to use this feature. For more information, see Instance Store Swap Volumes in the Amazon EC2 User Guide for Linux Instances or How do I allocate memory to work as swap space in an Amazon EC2 instance by using a swap file?

  • The swap space parameters are only supported for job definitions using EC2 resources.

  • If the maxSwap and swappiness parameters are omitted from a job definition, each container will have a default swappiness value of 60, and the total swap usage will be limited to two times the memory reservation of the container.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.

" } }, "documentation":"

Linux-specific modifications that are applied to the container, such as details for device mappings.

" }, + "ListJobsFilterList":{ + "type":"list", + "member":{"shape":"KeyValuesPair"} + }, "ListJobsRequest":{ "type":"structure", "members":{ @@ -1763,7 +1785,7 @@ }, "jobStatus":{ "shape":"JobStatus", - "documentation":"

The job status used to filter jobs in the specified queue. If you don't specify a status, only RUNNING jobs are returned.

" + "documentation":"

The job status used to filter jobs in the specified queue. If the filters parameter is specified, the jobStatus parameter is ignored and jobs with any status are returned. If you don't specify a status, only RUNNING jobs are returned.

" }, "maxResults":{ "shape":"Integer", @@ -1772,6 +1794,10 @@ "nextToken":{ "shape":"String", "documentation":"

The nextToken value returned from a previous paginated ListJobs request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

This token should be treated as an opaque identifier that's only used to retrieve the next items in a list and not for other programmatic purposes.

" + }, + "filters":{ + "shape":"ListJobsFilterList", + "documentation":"

The filter to apply to the query. Only one filter can be used at a time. When the filter is used, jobStatus is ignored. The filter doesn't apply to child jobs in an array or multi-node parallel (MNP) jobs. The results are sorted by the createdAt field, with the most recent jobs being first.

JOB_NAME

The value of the filter is a case-insensitive match for the job name. If the value ends with an asterisk (*), the filter will match any job name that begins with the string before the '*'. This corresponds to the jobName value. For example, test1 matches both Test1 and test1, and test1* matches both test1 and Test10. When the JOB_NAME filter is used, the results are grouped by the job name and version.

JOB_DEFINITION

The value for the filter is the name or Amazon Resource Name (ARN) of the job definition. This corresponds to the jobDefinition value. The value is case sensitive. When the value for the filter is the job definition name, the results include all the jobs that used any revision of that job definition name. If the value ends with an asterisk (*), the filter will match any job definition name that begins with the string before the '*'. For example, jd1 matches only jd1, and jd1* matches both jd1 and jd1A. The version of the job definition that's used doesn't affect the sort order. When the JOB_DEFINITION filter is used and the ARN is used (which is in the form arn:${Partition}:batch:${Region}:${Account}:job-definition/${JobDefinitionName}:${Revision}), the results include jobs that used the specified revision of the job definition. Asterisk (*) is not supported when the ARN is used.

BEFORE_CREATED_AT

The value for the filter is the time that's before the job was created. This corresponds to the createdAt value. The value is a string representation of the number of seconds since 00:00:00 UTC (midnight) on January 1, 1970.

AFTER_CREATED_AT

The value for the filter is the time that's after the job was created. This corresponds to the createdAt value. The value is a string representation of the number of seconds since 00:00:00 UTC (midnight) on January 1, 1970.

" } }, "documentation":"

Contains the parameters for ListJobs.

" @@ -1796,7 +1822,7 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) that identifies the resource that tags are listed for. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", + "documentation":"

The Amazon Resource Name (ARN) that identifies the resource that tags are listed for. Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "location":"uri", "locationName":"resourceArn" } @@ -1817,7 +1843,7 @@ "members":{ "logDriver":{ "shape":"LogDriver", - "documentation":"

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

The supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, and splunk.

Jobs running on Fargate resources are restricted to the awslogs and splunk log drivers.

awslogs

Specifies the Amazon CloudWatch Logs logging driver. For more information, see Using the awslogs Log Driver in the AWS Batch User Guide and Amazon CloudWatch Logs logging driver in the Docker documentation.

fluentd

Specifies the Fluentd logging driver. For more information, including usage and options, see Fluentd logging driver in the Docker documentation.

gelf

Specifies the Graylog Extended Format (GELF) logging driver. For more information, including usage and options, see Graylog Extended Format logging driver in the Docker documentation.

journald

Specifies the journald logging driver. For more information, including usage and options, see Journald logging driver in the Docker documentation.

json-file

Specifies the JSON file logging driver. For more information, including usage and options, see JSON File logging driver in the Docker documentation.

splunk

Specifies the Splunk logging driver. For more information, including usage and options, see Splunk logging driver in the Docker documentation.

syslog

Specifies the syslog logging driver. For more information, including usage and options, see Syslog logging driver in the Docker documentation.

If you have a custom driver that's not listed earlier that you want to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that's available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you want to have included. However, Amazon Web Services doesn't currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

" + "documentation":"

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

The supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, and splunk.

Jobs that are running on Fargate resources are restricted to the awslogs and splunk log drivers.

awslogs

Specifies the Amazon CloudWatch Logs logging driver. For more information, see Using the awslogs Log Driver in the Batch User Guide and Amazon CloudWatch Logs logging driver in the Docker documentation.

fluentd

Specifies the Fluentd logging driver. For more information, including usage and options, see Fluentd logging driver in the Docker documentation.

gelf

Specifies the Graylog Extended Format (GELF) logging driver. For more information, including usage and options, see Graylog Extended Format logging driver in the Docker documentation.

journald

Specifies the journald logging driver. For more information, including usage and options, see Journald logging driver in the Docker documentation.

json-file

Specifies the JSON file logging driver. For more information, including usage and options, see JSON File logging driver in the Docker documentation.

splunk

Specifies the Splunk logging driver. For more information, including usage and options, see Splunk logging driver in the Docker documentation.

syslog

Specifies the syslog logging driver. For more information, including usage and options, see Syslog logging driver in the Docker documentation.

If you have a custom driver that's not listed earlier that you want to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that's available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you want to have included. However, Amazon Web Services doesn't currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

" }, "options":{ "shape":"LogConfigurationOptionsMap", @@ -1825,7 +1851,7 @@ }, "secretOptions":{ "shape":"SecretList", - "documentation":"

The secrets to pass to the log configuration. For more information, see Specifying Sensitive Data in the AWS Batch User Guide.

" + "documentation":"

The secrets to pass to the log configuration. For more information, see Specifying Sensitive Data in the Batch User Guide.

" } }, "documentation":"

Log configuration options to send to a custom log driver for the container.

" @@ -1875,10 +1901,10 @@ "members":{ "assignPublicIp":{ "shape":"AssignPublicIp", - "documentation":"

Indicates whether the job should have a public IP address. For a job running on Fargate resources in a private subnet to send outbound traffic to the internet (for example, in order to pull container images), the private subnet requires a NAT gateway be attached to route requests to the internet. For more information, see Amazon ECS task networking. The default value is \"DISABLED\".

" + "documentation":"

Indicates whether the job should have a public IP address. For a job that is running on Fargate resources in a private subnet to send outbound traffic to the internet (for example, to pull container images), the private subnet requires a NAT gateway be attached to route requests to the internet. For more information, see Amazon ECS task networking. The default value is \"DISABLED\".

" } }, - "documentation":"

The network configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" + "documentation":"

The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.

" }, "NetworkInterface":{ "type":"structure", @@ -1928,7 +1954,7 @@ "documentation":"

The node property overrides for the job.

" } }, - "documentation":"

Object representing any node overrides to a job definition that's used in a SubmitJob API operation.

This isn't applicable to jobs running on Fargate resources and shouldn't be provided; use containerOverrides instead.

" + "documentation":"

Object representing any node overrides to a job definition that's used in a SubmitJob API operation.

This isn't applicable to jobs that are running on Fargate resources and shouldn't be provided; use containerOverrides instead.

" }, "NodeProperties":{ "type":"structure", @@ -2038,7 +2064,7 @@ }, "type":{ "shape":"JobDefinitionType", - "documentation":"

The type of job definition. For more information about multi-node parallel jobs, see Creating a multi-node parallel job definition in the AWS Batch User Guide.

If the job is run on Fargate resources, then multinode isn't supported.

" + "documentation":"

The type of job definition. For more information about multi-node parallel jobs, see Creating a multi-node parallel job definition in the Batch User Guide.

If the job is run on Fargate resources, then multinode isn't supported.

" }, "parameters":{ "shape":"ParametersMap", @@ -2050,7 +2076,7 @@ }, "nodeProperties":{ "shape":"NodeProperties", - "documentation":"

An object with various properties specific to multi-node parallel jobs. If you specify node properties for a job, it becomes a multi-node parallel job. For more information, see Multi-node Parallel Jobs in the AWS Batch User Guide. If the job definition's type parameter is container, then you must specify either containerProperties or nodeProperties.

If the job runs on Fargate resources, then you must not specify nodeProperties; use containerProperties instead.

" + "documentation":"

An object with various properties specific to multi-node parallel jobs. If you specify node properties for a job, it becomes a multi-node parallel job. For more information, see Multi-node Parallel Jobs in the Batch User Guide. If the job definition's type parameter is container, then you must specify either containerProperties or nodeProperties.

If the job runs on Fargate resources, then you must not specify nodeProperties; use containerProperties instead.

" }, "retryStrategy":{ "shape":"RetryStrategy", @@ -2062,11 +2088,11 @@ }, "timeout":{ "shape":"JobTimeout", - "documentation":"

The timeout configuration for jobs that are submitted with this job definition, after which AWS Batch terminates your jobs if they have not finished. If a job is terminated due to a timeout, it isn't retried. The minimum value for the timeout is 60 seconds. Any timeout configuration that's specified during a SubmitJob operation overrides the timeout configuration defined here. For more information, see Job Timeouts in the AWS Batch User Guide.

" + "documentation":"

The timeout configuration for jobs that are submitted with this job definition, after which Batch terminates your jobs if they have not finished. If a job is terminated due to a timeout, it isn't retried. The minimum value for the timeout is 60 seconds. Any timeout configuration that's specified during a SubmitJob operation overrides the timeout configuration defined here. For more information, see Job Timeouts in the Batch User Guide.

" }, "tags":{ "shape":"TagrisTagsMap", - "documentation":"

The tags that you apply to the job definition to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS Batch User Guide.

" + "documentation":"

The tags that you apply to the job definition to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Batch User Guide.

" }, "platformCapabilities":{ "shape":"PlatformCapabilityList", @@ -2106,7 +2132,7 @@ "members":{ "value":{ "shape":"String", - "documentation":"

The quantity of the specified resource to reserve for the container. The values vary based on the type specified.

type=\"GPU\"

The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.

GPUs are not available for jobs running on Fargate resources.

type=\"MEMORY\"

The memory hard limit (in MiB) present to the container. This parameter is supported for jobs running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

For jobs running on Fargate resources, then value is the hard limit (in MiB), and must match one of the supported values and the VCPU values must be one of the values supported for that memory value.

value = 512

VCPU = 0.25

value = 1024

VCPU = 0.25 or 0.5

value = 2048

VCPU = 0.25, 0.5, or 1

value = 3072

VCPU = 0.5, or 1

value = 4096

VCPU = 0.5, 1, or 2

value = 5120, 6144, or 7168

VCPU = 1 or 2

value = 8192

VCPU = 1, 2, or 4

value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

VCPU = 2 or 4

value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

VCPU = 4

type=\"VCPU\"

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.

For jobs running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, and 4

value = 0.25

MEMORY = 512, 1024, or 2048

value = 0.5

MEMORY = 1024, 2048, 3072, or 4096

value = 1

MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192

value = 2

MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

value = 4

MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

" + "documentation":"

The quantity of the specified resource to reserve for the container. The values vary based on the type specified.

type=\"GPU\"

The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.

GPUs are not available for jobs that are running on Fargate resources.

type=\"MEMORY\"

The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the Batch User Guide.

For jobs that are running on Fargate resources, then value is the hard limit (in MiB), and must match one of the supported values and the VCPU values must be one of the values supported for that memory value.

value = 512

VCPU = 0.25

value = 1024

VCPU = 0.25 or 0.5

value = 2048

VCPU = 0.25, 0.5, or 1

value = 3072

VCPU = 0.5, or 1

value = 4096

VCPU = 0.5, 1, or 2

value = 5120, 6144, or 7168

VCPU = 1 or 2

value = 8192

VCPU = 1, 2, or 4

value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

VCPU = 2 or 4

value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

VCPU = 4

type=\"VCPU\"

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.

For jobs that are running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, and 4

value = 0.25

MEMORY = 512, 1024, or 2048

value = 0.5

MEMORY = 1024, 2048, 3072, or 4096

value = 1

MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192

value = 2

MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

value = 4

MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

" }, "type":{ "shape":"ResourceType", @@ -2146,7 +2172,7 @@ "documentation":"

Array of up to 5 objects that specify conditions under which the job should be retried or failed. If this parameter is specified, then the attempts parameter must also be specified.

" } }, - "documentation":"

The retry strategy associated with a job. For more information, see Automated job retries in the AWS Batch User Guide.

" + "documentation":"

The retry strategy associated with a job. For more information, see Automated job retries in the Batch User Guide.

" }, "Secret":{ "type":"structure", @@ -2161,10 +2187,10 @@ }, "valueFrom":{ "shape":"String", - "documentation":"

The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store.

If the AWS Systems Manager Parameter Store parameter exists in the same Region as the job you're launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

" + "documentation":"

The secret to expose to the container. The supported values are either the full ARN of the Secrets Manager secret or the full ARN of the parameter in the Amazon Web Services Systems Manager Parameter Store.

If the Amazon Web Services Systems Manager Parameter Store parameter exists in the same Region as the job you're launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

" } }, - "documentation":"

An object representing the secret to expose to your container. Secrets can be exposed to a container in the following ways:

  • To inject sensitive data into your containers as environment variables, use the secrets container definition parameter.

  • To reference sensitive information in the log configuration of a container, use the secretOptions container definition parameter.

For more information, see Specifying sensitive data in the AWS Batch User Guide.

" + "documentation":"

An object representing the secret to expose to your container. Secrets can be exposed to a container in the following ways:

  • To inject sensitive data into your containers as environment variables, use the secrets container definition parameter.

  • To reference sensitive information in the log configuration of a container, use the secretOptions container definition parameter.

For more information, see Specifying sensitive data in the Batch User Guide.

" }, "SecretList":{ "type":"list", @@ -2203,7 +2229,7 @@ }, "arrayProperties":{ "shape":"ArrayProperties", - "documentation":"

The array properties for the submitted job, such as the size of the array. The array size can be between 2 and 10,000. If you specify array properties for a job, it becomes an array job. For more information, see Array Jobs in the AWS Batch User Guide.

" + "documentation":"

The array properties for the submitted job, such as the size of the array. The array size can be between 2 and 10,000. If you specify array properties for a job, it becomes an array job. For more information, see Array Jobs in the Batch User Guide.

" }, "dependsOn":{ "shape":"JobDependencyList", @@ -2223,7 +2249,7 @@ }, "nodeOverrides":{ "shape":"NodeOverrides", - "documentation":"

A list of node overrides in JSON format that specify the node range to target and the container overrides for that node range.

This parameter isn't applicable to jobs running on Fargate resources; use containerOverrides instead.

" + "documentation":"

A list of node overrides in JSON format that specify the node range to target and the container overrides for that node range.

This parameter isn't applicable to jobs that are running on Fargate resources; use containerOverrides instead.

" }, "retryStrategy":{ "shape":"RetryStrategy", @@ -2235,11 +2261,11 @@ }, "timeout":{ "shape":"JobTimeout", - "documentation":"

The timeout configuration for this SubmitJob operation. You can specify a timeout duration after which AWS Batch terminates your jobs if they haven't finished. If a job is terminated due to a timeout, it isn't retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout configuration specified in the job definition. For array jobs, child jobs have the same timeout configuration as the parent job. For more information, see Job Timeouts in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The timeout configuration for this SubmitJob operation. You can specify a timeout duration after which Batch terminates your jobs if they haven't finished. If a job is terminated due to a timeout, it isn't retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout configuration specified in the job definition. For array jobs, child jobs have the same timeout configuration as the parent job. For more information, see Job Timeouts in the Amazon Elastic Container Service Developer Guide.

" }, "tags":{ "shape":"TagrisTagsMap", - "documentation":"

The tags that you apply to the job request to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS General Reference.

" + "documentation":"

The tags that you apply to the job request to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General Reference.

" } }, "documentation":"

Contains the parameters for SubmitJob.

" @@ -2285,13 +2311,13 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the resource that tags are added to. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", + "documentation":"

The Amazon Resource Name (ARN) of the resource that tags are added to. Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "location":"uri", "locationName":"resourceArn" }, "tags":{ "shape":"TagrisTagsMap", - "documentation":"

The tags that you apply to the resource to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS General Reference.

" + "documentation":"

The tags that you apply to the resource to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General Reference.

" } } }, @@ -2325,11 +2351,11 @@ "members":{ "jobId":{ "shape":"String", - "documentation":"

The AWS Batch job ID of the job to terminate.

" + "documentation":"

The Batch job ID of the job to terminate.

" }, "reason":{ "shape":"String", - "documentation":"

A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the AWS Batch activity logs.

" + "documentation":"

A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the Batch activity logs.

" } }, "documentation":"

Contains the parameters for TerminateJob.

" @@ -2359,7 +2385,7 @@ "documentation":"

The list of tmpfs volume mount options.

Valid values: \"defaults\" | \"ro\" | \"rw\" | \"suid\" | \"nosuid\" | \"dev\" | \"nodev\" | \"exec\" | \"noexec\" | \"sync\" | \"async\" | \"dirsync\" | \"remount\" | \"mand\" | \"nomand\" | \"atime\" | \"noatime\" | \"diratime\" | \"nodiratime\" | \"bind\" | \"rbind\" | \"unbindable\" | \"runbindable\" | \"private\" | \"rprivate\" | \"shared\" | \"rshared\" | \"slave\" | \"rslave\" | \"relatime\" | \"norelatime\" | \"strictatime\" | \"nostrictatime\" | \"mode\" | \"uid\" | \"gid\" | \"nr_inodes\" | \"nr_blocks\" | \"mpol\"

" } }, - "documentation":"

The container path, mount options, and size of the tmpfs mount.

This object isn't applicable to jobs running on Fargate resources.

" + "documentation":"

The container path, mount options, and size of the tmpfs mount.

This object isn't applicable to jobs that are running on Fargate resources.

" }, "TmpfsList":{ "type":"list", @@ -2386,7 +2412,7 @@ "documentation":"

The soft limit for the ulimit type.

" } }, - "documentation":"

The ulimit settings to pass to the container.

This object isn't applicable to jobs running on Fargate resources.

" + "documentation":"

The ulimit settings to pass to the container.

This object isn't applicable to jobs that are running on Fargate resources.

" }, "Ulimits":{ "type":"list", @@ -2401,7 +2427,7 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the resource from which to delete tags. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", + "documentation":"

The Amazon Resource Name (ARN) of the resource from which to delete tags. Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "location":"uri", "locationName":"resourceArn" }, @@ -2428,15 +2454,15 @@ }, "state":{ "shape":"CEState", - "documentation":"

The state of the compute environment. Compute environments in the ENABLED state can accept jobs from a queue and scale in or out automatically based on the workload demand of its associated queues.

If the state is ENABLED, then the AWS Batch scheduler can attempt to place jobs from an associated job queue on the compute resources within the environment. If the compute environment is managed, then it can scale its instances out or in automatically, based on the job queue demand.

If the state is DISABLED, then the AWS Batch scheduler doesn't attempt to place jobs within the environment. Jobs in a STARTING or RUNNING state continue to progress normally. Managed compute environments in the DISABLED state don't scale out. However, they scale in to minvCpus value after instances become idle.

" + "documentation":"

The state of the compute environment. Compute environments in the ENABLED state can accept jobs from a queue and scale in or out automatically based on the workload demand of its associated queues.

If the state is ENABLED, then the Batch scheduler can attempt to place jobs from an associated job queue on the compute resources within the environment. If the compute environment is managed, then it can scale its instances out or in automatically, based on the job queue demand.

If the state is DISABLED, then the Batch scheduler doesn't attempt to place jobs within the environment. Jobs in a STARTING or RUNNING state continue to progress normally. Managed compute environments in the DISABLED state don't scale out. However, they scale in to minvCpus value after instances become idle.

" }, "computeResources":{ "shape":"ComputeResourceUpdate", - "documentation":"

Details of the compute resources managed by the compute environment. Required for a managed compute environment. For more information, see Compute Environments in the AWS Batch User Guide.

" + "documentation":"

Details of the compute resources managed by the compute environment. Required for a managed compute environment. For more information, see Compute Environments in the Batch User Guide.

" }, "serviceRole":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. For more information, see AWS Batch service IAM role in the AWS Batch User Guide.

If the compute environment has a service-linked role, it cannot be changed to use a regular IAM role. If the compute environment has a regular IAM role, it cannot be changed to use a service-linked role.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your AWS Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. For more information, see Batch service IAM role in the Batch User Guide.

If the compute environment has a service-linked role, it can't be changed to use a regular IAM role. Likewise, if the compute environment has a regular IAM role, it can't be changed to use a service-linked role.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

" } }, "documentation":"

Contains the parameters for UpdateComputeEnvironment.

" @@ -2472,7 +2498,7 @@ }, "computeEnvironmentOrder":{ "shape":"ComputeEnvironmentOrders", - "documentation":"

Details the set of compute environments mapped to a job queue and their order relative to each other. This is one of the parameters used by the job scheduler to determine which compute environment should run a given job. Compute environments must be in the VALID state before you can associate them with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. AWS Batch doesn't support mixing compute environment architecture types in a single job queue.

" + "documentation":"

Details the set of compute environments mapped to a job queue and their order relative to each other. This is one of the parameters used by the job scheduler to determine which compute environment should run a given job. Compute environments must be in the VALID state before you can associate them with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. Batch doesn't support mixing compute environment architecture types in a single job queue.

" } }, "documentation":"

Contains the parameters for UpdateJobQueue.

" @@ -2495,7 +2521,7 @@ "members":{ "host":{ "shape":"Host", - "documentation":"

The contents of the host parameter determine whether your data volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers associated with it stop running.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" + "documentation":"

The contents of the host parameter determine whether your data volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers associated with it stop running.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.

" }, "name":{ "shape":"String", @@ -2503,7 +2529,7 @@ }, "efsVolumeConfiguration":{ "shape":"EFSVolumeConfiguration", - "documentation":"

This parameter is specified when you are using an Amazon Elastic File System file system for job storage. Jobs running on Fargate resources must specify a platformVersion of at least 1.4.0.

" + "documentation":"

This parameter is specified when you are using an Amazon Elastic File System file system for job storage. Jobs that are running on Fargate resources must specify a platformVersion of at least 1.4.0.

" } }, "documentation":"

A data volume used in a job's container properties.

" @@ -2513,5 +2539,5 @@ "member":{"shape":"Volume"} } }, - "documentation":"

Using AWS Batch, you can run batch computing workloads on the AWS Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. AWS Batch uses the advantages of this computing workload to remove the undifferentiated heavy lifting of configuring and managing required infrastructure. At the same time, it also adopts a familiar batch computing software approach. Given these advantages, AWS Batch can help you to efficiently provision resources in response to jobs submitted, thus effectively helping you to eliminate capacity constraints, reduce compute costs, and deliver your results more quickly.

As a fully managed service, AWS Batch can run batch computing workloads of any scale. AWS Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With AWS Batch, there's no need to install or manage batch computing software. This means that you can focus your time and energy on analyzing results and solving your specific problems.

" + "documentation":"Batch

Using Batch, you can run batch computing workloads on the Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. Batch uses the advantages of this computing workload to remove the undifferentiated heavy lifting of configuring and managing required infrastructure. At the same time, it also adopts a familiar batch computing software approach. Given these advantages, Batch can help you to efficiently provision resources in response to jobs submitted, thus effectively helping you to eliminate capacity constraints, reduce compute costs, and deliver your results more quickly.

As a fully managed service, Batch can run batch computing workloads of any scale. Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With Batch, there's no need to install or manage batch computing software. This means that you can focus your time and energy on analyzing results and solving your specific problems.

" } diff --git a/services/braket/pom.xml b/services/braket/pom.xml index 453a1c643a16..638a7518e9c4 100644 --- a/services/braket/pom.xml +++ b/services/braket/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT braket AWS Java SDK :: Services :: Braket diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index 9def71a86fb5..8c14d3299644 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT budgets AWS Java SDK :: Services :: AWS Budgets diff --git a/services/chime/pom.xml b/services/chime/pom.xml index ce1fac8faf31..3d4539966a11 100644 --- a/services/chime/pom.xml +++ b/services/chime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT chime AWS Java SDK :: Services :: Chime diff --git a/services/chime/src/main/resources/codegen-resources/service-2.json b/services/chime/src/main/resources/codegen-resources/service-2.json index f4aff9525b9e..aa2011767487 100644 --- a/services/chime/src/main/resources/codegen-resources/service-2.json +++ b/services/chime/src/main/resources/codegen-resources/service-2.json @@ -3229,6 +3229,49 @@ "documentation":"

Sends a message to a particular channel that the member is a part of.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

Also, STANDARD messages can contain 4KB of data and the 1KB of metadata. CONTROL messages can contain 30 bytes of data and no metadata.

", "endpoint":{"hostPrefix":"messaging-"} }, + "StartMeetingTranscription":{ + "name":"StartMeetingTranscription", + "http":{ + "method":"POST", + "requestUri":"/meetings/{meetingId}/transcription?operation=start", + "responseCode":200 + }, + "input":{"shape":"StartMeetingTranscriptionRequest"}, + "output":{"shape":"StartMeetingTranscriptionResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"UnprocessableEntityException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Start transcription for the specified meetingId.

" + }, + "StopMeetingTranscription":{ + "name":"StopMeetingTranscription", + "http":{ + "method":"POST", + "requestUri":"/meetings/{meetingId}/transcription?operation=stop", + "responseCode":200 + }, + "input":{"shape":"StopMeetingTranscriptionRequest"}, + "output":{"shape":"StopMeetingTranscriptionResponse"}, + "errors":[ + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"UnprocessableEntityException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Stops transcription for the specified meetingId.

" + }, "TagAttendee":{ "name":"TagAttendee", "http":{ @@ -3817,6 +3860,10 @@ "shape":"LicenseList", "documentation":"

Supported licenses for the Amazon Chime account.

" }, + "AccountStatus":{ + "shape":"AccountStatus", + "documentation":"

The status of the account.

" + }, "SigninDelegateGroups":{ "shape":"SigninDelegateGroupList", "documentation":"

The sign-in delegate groups associated with the account.

" @@ -3848,6 +3895,13 @@ }, "documentation":"

Settings related to the Amazon Chime account. This includes settings that start or stop remote control of shared screens, or start or stop the dial-out option in the Amazon Chime web application. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" }, + "AccountStatus":{ + "type":"string", + "enum":[ + "Suspended", + "Active" + ] + }, "AccountType":{ "type":"string", "enum":[ @@ -6867,6 +6921,64 @@ }, "documentation":"

The emergency calling configuration details associated with an Amazon Chime Voice Connector.

" }, + "EngineTranscribeMedicalSettings":{ + "type":"structure", + "required":[ + "LanguageCode", + "Specialty", + "Type" + ], + "members":{ + "LanguageCode":{ + "shape":"TranscribeMedicalLanguageCode", + "documentation":"

The language code specified for the Amazon Transcribe Medical engine.

" + }, + "Specialty":{ + "shape":"TranscribeMedicalSpecialty", + "documentation":"

The specialty specified for the Amazon Transcribe Medical engine.

" + }, + "Type":{ + "shape":"TranscribeMedicalType", + "documentation":"

The type of transcription.

" + }, + "VocabularyName":{ + "shape":"String", + "documentation":"

The name of the vocabulary passed to Amazon Transcribe Medical.

" + }, + "Region":{ + "shape":"TranscribeMedicalRegion", + "documentation":"

The AWS Region passed to Amazon Transcribe Medical. If you don't specify a Region, Amazon Chime uses the Region closest to the meeting's Region.

" + } + }, + "documentation":"

Settings specific to the Amazon Transcribe Medical engine.

" + }, + "EngineTranscribeSettings":{ + "type":"structure", + "required":["LanguageCode"], + "members":{ + "LanguageCode":{ + "shape":"TranscribeLanguageCode", + "documentation":"

The language code specified for the Amazon Transcribe engine.

" + }, + "VocabularyFilterMethod":{ + "shape":"TranscribeVocabularyFilterMethod", + "documentation":"

The filtering method passed to Amazon Transcribe.

" + }, + "VocabularyFilterName":{ + "shape":"String", + "documentation":"

The name of the vocabulary filter passed to Amazon Transcribe.

" + }, + "VocabularyName":{ + "shape":"String", + "documentation":"

The name of the vocabulary passed to Amazon Transcribe.

" + }, + "Region":{ + "shape":"TranscribeRegion", + "documentation":"

The AWS Region passed to Amazon Transcribe. If you don't specify a Region, Amazon Chime uses the Region closest to the meeting's Region.

" + } + }, + "documentation":"

Settings specific to the Amazon Transcribe engine.

" + }, "ErrorCode":{ "type":"string", "enum":[ @@ -9074,10 +9186,10 @@ }, "EventIngestionUrl":{ "shape":"UriType", - "documentation":"

The event ingestion URL.

" + "documentation":"

The URL of the S3 bucket used to store the captured media.

" } }, - "documentation":"

A set of endpoints used by clients to connect to the media service group for a Amazon Chime SDK meeting.

" + "documentation":"

A set of endpoints used by clients to connect to the media service group for an Amazon Chime SDK meeting.

" }, "Meeting":{ "type":"structure", @@ -10767,6 +10879,47 @@ "DESCENDING" ] }, + "StartMeetingTranscriptionRequest":{ + "type":"structure", + "required":[ + "MeetingId", + "TranscriptionConfiguration" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The unique ID of the meeting being transcribed.

", + "location":"uri", + "locationName":"meetingId" + }, + "TranscriptionConfiguration":{ + "shape":"TranscriptionConfiguration", + "documentation":"

The configuration for the current transcription operation. Must contain EngineTranscribeSettings or EngineTranscribeMedicalSettings.

" + } + } + }, + "StartMeetingTranscriptionResponse":{ + "type":"structure", + "members":{ + } + }, + "StopMeetingTranscriptionRequest":{ + "type":"structure", + "required":["MeetingId"], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The unique ID of the meeting for which you stop transcription.

", + "location":"uri", + "locationName":"meetingId" + } + } + }, + "StopMeetingTranscriptionResponse":{ + "type":"structure", + "members":{ + } + }, "StreamingConfiguration":{ "type":"structure", "required":["DataRetentionInHours"], @@ -10996,6 +11149,94 @@ "min":3, "pattern":"^8(00|33|44|55|66|77|88)$" }, + "TranscribeLanguageCode":{ + "type":"string", + "enum":[ + "en-US", + "en-GB", + "es-US", + "fr-CA", + "fr-FR", + "en-AU", + "it-IT", + "de-DE", + "pt-BR", + "ja-JP", + "ko-KR", + "zh-CN" + ] + }, + "TranscribeMedicalLanguageCode":{ + "type":"string", + "enum":["en-US"] + }, + "TranscribeMedicalRegion":{ + "type":"string", + "enum":[ + "us-east-1", + "us-east-2", + "us-west-2", + "ap-southeast-2", + "ca-central-1", + "eu-west-1" + ] + }, + "TranscribeMedicalSpecialty":{ + "type":"string", + "enum":[ + "PRIMARYCARE", + "CARDIOLOGY", + "NEUROLOGY", + "ONCOLOGY", + "RADIOLOGY", + "UROLOGY" + ] + }, + "TranscribeMedicalType":{ + "type":"string", + "enum":[ + "CONVERSATION", + "DICTATION" + ] + }, + "TranscribeRegion":{ + "type":"string", + "enum":[ + "us-east-2", + "us-east-1", + "us-west-2", + "ap-northeast-2", + "ap-southeast-2", + "ap-northeast-1", + "ca-central-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "sa-east-1" + ] + }, + "TranscribeVocabularyFilterMethod":{ + "type":"string", + "enum":[ + "remove", + "mask", + "tag" + ] + }, + "TranscriptionConfiguration":{ + "type":"structure", + "members":{ + "EngineTranscribeSettings":{ + "shape":"EngineTranscribeSettings", + "documentation":"

The transcription configuration settings passed to Amazon Transcribe.

" + }, + "EngineTranscribeMedicalSettings":{ + "shape":"EngineTranscribeMedicalSettings", + "documentation":"

The transcription configuration settings passed to Amazon Transcribe.

" + } + }, + "documentation":"

The configuration for the current transcription operation. Must contain EngineTranscribeSettings or EngineTranscribeMedicalSettings.

" + }, "UnauthorizedClientException":{ "type":"structure", "members":{ diff --git a/services/chimesdkidentity/pom.xml b/services/chimesdkidentity/pom.xml new file mode 100644 index 000000000000..4707b820a039 --- /dev/null +++ b/services/chimesdkidentity/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.17.16-SNAPSHOT + + chimesdkidentity + AWS Java SDK :: Services :: Chime SDK Identity + The AWS Java SDK for Chime SDK Identity module holds the client classes that are used for + communicating with Chime SDK Identity. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.chimesdkidentity + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/chimesdkidentity/src/main/resources/codegen-resources/paginators-1.json b/services/chimesdkidentity/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..03ee29de6c81 --- /dev/null +++ b/services/chimesdkidentity/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,19 @@ +{ + "pagination": { + "ListAppInstanceAdmins": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListAppInstanceUsers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListAppInstances": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/chimesdkidentity/src/main/resources/codegen-resources/service-2.json b/services/chimesdkidentity/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..a34e43eed93c --- /dev/null +++ b/services/chimesdkidentity/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1161 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2021-04-20", + "endpointPrefix":"identity-chime", + "protocol":"rest-json", + "serviceFullName":"Amazon Chime SDK Identity", + "serviceId":"Chime SDK Identity", + "signatureVersion":"v4", + "signingName":"chime", + "uid":"chime-sdk-identity-2021-04-20" + }, + "operations":{ + "CreateAppInstance":{ + "name":"CreateAppInstance", + "http":{ + "method":"POST", + "requestUri":"/app-instances", + "responseCode":201 + }, + "input":{"shape":"CreateAppInstanceRequest"}, + "output":{"shape":"CreateAppInstanceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates an Amazon Chime SDK messaging AppInstance under an AWS account. Only SDK messaging customers use this API. CreateAppInstance supports idempotency behavior as described in the AWS API Standard.

identity

" + }, + "CreateAppInstanceAdmin":{ + "name":"CreateAppInstanceAdmin", + "http":{ + "method":"POST", + "requestUri":"/app-instances/{appInstanceArn}/admins", + "responseCode":201 + }, + "input":{"shape":"CreateAppInstanceAdminRequest"}, + "output":{"shape":"CreateAppInstanceAdminResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Promotes an AppInstanceUser to an AppInstanceAdmin. The promoted user can perform the following actions.

  • ChannelModerator actions across all channels in the AppInstance.

  • DeleteChannelMessage actions.

Only an AppInstanceUser can be promoted to an AppInstanceAdmin role.

" + }, + "CreateAppInstanceUser":{ + "name":"CreateAppInstanceUser", + "http":{ + "method":"POST", + "requestUri":"/app-instance-users", + "responseCode":201 + }, + "input":{"shape":"CreateAppInstanceUserRequest"}, + "output":{"shape":"CreateAppInstanceUserResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates a user under an Amazon Chime AppInstance. The request consists of a unique appInstanceUserId and Name for that user.

" + }, + "DeleteAppInstance":{ + "name":"DeleteAppInstance", + "http":{ + "method":"DELETE", + "requestUri":"/app-instances/{appInstanceArn}", + "responseCode":204 + }, + "input":{"shape":"DeleteAppInstanceRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Deletes an AppInstance and all associated data asynchronously.

" + }, + "DeleteAppInstanceAdmin":{ + "name":"DeleteAppInstanceAdmin", + "http":{ + "method":"DELETE", + "requestUri":"/app-instances/{appInstanceArn}/admins/{appInstanceAdminArn}", + "responseCode":204 + }, + "input":{"shape":"DeleteAppInstanceAdminRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Demotes an AppInstanceAdmin to an AppInstanceUser. This action does not delete the user.

" + }, + "DeleteAppInstanceUser":{ + "name":"DeleteAppInstanceUser", + "http":{ + "method":"DELETE", + "requestUri":"/app-instance-users/{appInstanceUserArn}", + "responseCode":204 + }, + "input":{"shape":"DeleteAppInstanceUserRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Deletes an AppInstanceUser.

" + }, + "DescribeAppInstance":{ + "name":"DescribeAppInstance", + "http":{ + "method":"GET", + "requestUri":"/app-instances/{appInstanceArn}" + }, + "input":{"shape":"DescribeAppInstanceRequest"}, + "output":{"shape":"DescribeAppInstanceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Returns the full details of an AppInstance.

" + }, + "DescribeAppInstanceAdmin":{ + "name":"DescribeAppInstanceAdmin", + "http":{ + "method":"GET", + "requestUri":"/app-instances/{appInstanceArn}/admins/{appInstanceAdminArn}", + "responseCode":200 + }, + "input":{"shape":"DescribeAppInstanceAdminRequest"}, + "output":{"shape":"DescribeAppInstanceAdminResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Returns the full details of an AppInstanceAdmin.

" + }, + "DescribeAppInstanceUser":{ + "name":"DescribeAppInstanceUser", + "http":{ + "method":"GET", + "requestUri":"/app-instance-users/{appInstanceUserArn}" + }, + "input":{"shape":"DescribeAppInstanceUserRequest"}, + "output":{"shape":"DescribeAppInstanceUserResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Returns the full details of an AppInstanceUser.

" + }, + "GetAppInstanceRetentionSettings":{ + "name":"GetAppInstanceRetentionSettings", + "http":{ + "method":"GET", + "requestUri":"/app-instances/{appInstanceArn}/retention-settings", + "responseCode":200 + }, + "input":{"shape":"GetAppInstanceRetentionSettingsRequest"}, + "output":{"shape":"GetAppInstanceRetentionSettingsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Gets the retention settings for an AppInstance.

" + }, + "ListAppInstanceAdmins":{ + "name":"ListAppInstanceAdmins", + "http":{ + "method":"GET", + "requestUri":"/app-instances/{appInstanceArn}/admins", + "responseCode":200 + }, + "input":{"shape":"ListAppInstanceAdminsRequest"}, + "output":{"shape":"ListAppInstanceAdminsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Returns a list of the administrators in the AppInstance.

" + }, + "ListAppInstanceUsers":{ + "name":"ListAppInstanceUsers", + "http":{ + "method":"GET", + "requestUri":"/app-instance-users" + }, + "input":{"shape":"ListAppInstanceUsersRequest"}, + "output":{"shape":"ListAppInstanceUsersResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

List all AppInstanceUsers created under a single AppInstance.

" + }, + "ListAppInstances":{ + "name":"ListAppInstances", + "http":{ + "method":"GET", + "requestUri":"/app-instances" + }, + "input":{"shape":"ListAppInstancesRequest"}, + "output":{"shape":"ListAppInstancesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists all Amazon Chime AppInstances created under a single AWS account.

" + }, + "PutAppInstanceRetentionSettings":{ + "name":"PutAppInstanceRetentionSettings", + "http":{ + "method":"PUT", + "requestUri":"/app-instances/{appInstanceArn}/retention-settings", + "responseCode":200 + }, + "input":{"shape":"PutAppInstanceRetentionSettingsRequest"}, + "output":{"shape":"PutAppInstanceRetentionSettingsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Sets the amount of time in days that a given AppInstance retains data.

" + }, + "UpdateAppInstance":{ + "name":"UpdateAppInstance", + "http":{ + "method":"PUT", + "requestUri":"/app-instances/{appInstanceArn}", + "responseCode":200 + }, + "input":{"shape":"UpdateAppInstanceRequest"}, + "output":{"shape":"UpdateAppInstanceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates AppInstance metadata.

" + }, + "UpdateAppInstanceUser":{ + "name":"UpdateAppInstanceUser", + "http":{ + "method":"PUT", + "requestUri":"/app-instance-users/{appInstanceUserArn}", + "responseCode":200 + }, + "input":{"shape":"UpdateAppInstanceUserRequest"}, + "output":{"shape":"UpdateAppInstanceUserResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates the details of an AppInstanceUser. You can update names and metadata.

" + } + }, + "shapes":{ + "AppInstance":{ + "type":"structure", + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the messaging instance.

" + }, + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of an AppInstance.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which an AppInstance was created. In epoch milliseconds.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time an AppInstance was last updated. In epoch milliseconds.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of an AppInstance.

" + } + }, + "documentation":"

The details of an AppInstance, an instance of an Amazon Chime SDK messaging application.

" + }, + "AppInstanceAdmin":{ + "type":"structure", + "members":{ + "Admin":{ + "shape":"Identity", + "documentation":"

The AppInstanceAdmin data.

" + }, + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance for which the user is an administrator.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which an administrator was created.

" + } + }, + "documentation":"

The details of an AppInstanceAdmin.

" + }, + "AppInstanceAdminList":{ + "type":"list", + "member":{"shape":"AppInstanceAdminSummary"} + }, + "AppInstanceAdminSummary":{ + "type":"structure", + "members":{ + "Admin":{ + "shape":"Identity", + "documentation":"

The details of the AppInstanceAdmin.

" + } + }, + "documentation":"

Summary of the details of an AppInstanceAdmin.

" + }, + "AppInstanceList":{ + "type":"list", + "member":{"shape":"AppInstanceSummary"} + }, + "AppInstanceRetentionSettings":{ + "type":"structure", + "members":{ + "ChannelRetentionSettings":{ + "shape":"ChannelRetentionSettings", + "documentation":"

The length of time in days to retain the messages in a channel.

" + } + }, + "documentation":"

The details of the data-retention settings for an AppInstance.

" + }, + "AppInstanceSummary":{ + "type":"structure", + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The AppInstance ARN.

" + }, + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of the AppInstance.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the AppInstance.

" + } + }, + "documentation":"

Summary of the data for an AppInstance.

" + }, + "AppInstanceUser":{ + "type":"structure", + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstanceUser.

" + }, + "Name":{ + "shape":"UserName", + "documentation":"

The name of the AppInstanceUser.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the AppInstanceUser.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the AppInstanceUser was created.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the AppInstanceUser was last updated.

" + } + }, + "documentation":"

The details of an AppInstanceUser.

" + }, + "AppInstanceUserList":{ + "type":"list", + "member":{"shape":"AppInstanceUserSummary"} + }, + "AppInstanceUserSummary":{ + "type":"structure", + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstanceUser.

" + }, + "Name":{ + "shape":"UserName", + "documentation":"

The name of an AppInstanceUser.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the AppInstanceUser.

" + } + }, + "documentation":"

Summary of the details of an AppInstanceUser.

" + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The input parameters don't match the service's restrictions.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ChannelRetentionSettings":{ + "type":"structure", + "members":{ + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The time in days to retain the messages in a channel.

" + } + }, + "documentation":"

The details of the retention settings for a channel.

" + }, + "ChimeArn":{ + "type":"string", + "max":1600, + "min":5, + "pattern":"arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}" + }, + "ClientRequestToken":{ + "type":"string", + "max":64, + "min":2, + "pattern":"[-_a-zA-Z0-9]*", + "sensitive":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The request could not be processed because of conflict in the current state of the resource.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateAppInstanceAdminRequest":{ + "type":"structure", + "required":[ + "AppInstanceAdminArn", + "AppInstanceArn" + ], + "members":{ + "AppInstanceAdminArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the administrator of the current AppInstance.

" + }, + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

", + "location":"uri", + "locationName":"appInstanceArn" + } + } + }, + "CreateAppInstanceAdminResponse":{ + "type":"structure", + "members":{ + "AppInstanceAdmin":{ + "shape":"Identity", + "documentation":"

The name and ARN of the admin for the AppInstance.

" + }, + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the of the admin for the AppInstance.

" + } + } + }, + "CreateAppInstanceRequest":{ + "type":"structure", + "required":[ + "Name", + "ClientRequestToken" + ], + "members":{ + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of the AppInstance.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the AppInstance. Limited to a 1KB string in UTF-8.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The ClientRequestToken of the AppInstance.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Tags assigned to the AppInstanceUser.

" + } + } + }, + "CreateAppInstanceResponse":{ + "type":"structure", + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The Amazon Resource Number (ARN) of the AppInstance.

" + } + } + }, + "CreateAppInstanceUserRequest":{ + "type":"structure", + "required":[ + "AppInstanceArn", + "AppInstanceUserId", + "Name", + "ClientRequestToken" + ], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance request.

" + }, + "AppInstanceUserId":{ + "shape":"UserId", + "documentation":"

The user ID of the AppInstance.

" + }, + "Name":{ + "shape":"UserName", + "documentation":"

The user's name.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The request's metadata. Limited to a 1KB string in UTF-8.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The token assigned to the user requesting an AppInstance.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Tags assigned to the AppInstanceUser.

" + } + } + }, + "CreateAppInstanceUserResponse":{ + "type":"structure", + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The user's ARN.

" + } + } + }, + "DeleteAppInstanceAdminRequest":{ + "type":"structure", + "required":[ + "AppInstanceAdminArn", + "AppInstanceArn" + ], + "members":{ + "AppInstanceAdminArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance's administrator.

", + "location":"uri", + "locationName":"appInstanceAdminArn" + }, + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

", + "location":"uri", + "locationName":"appInstanceArn" + } + } + }, + "DeleteAppInstanceRequest":{ + "type":"structure", + "required":["AppInstanceArn"], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

", + "location":"uri", + "locationName":"appInstanceArn" + } + } + }, + "DeleteAppInstanceUserRequest":{ + "type":"structure", + "required":["AppInstanceUserArn"], + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the user request being deleted.

", + "location":"uri", + "locationName":"appInstanceUserArn" + } + } + }, + "DescribeAppInstanceAdminRequest":{ + "type":"structure", + "required":[ + "AppInstanceAdminArn", + "AppInstanceArn" + ], + "members":{ + "AppInstanceAdminArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstanceAdmin.

", + "location":"uri", + "locationName":"appInstanceAdminArn" + }, + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

", + "location":"uri", + "locationName":"appInstanceArn" + } + } + }, + "DescribeAppInstanceAdminResponse":{ + "type":"structure", + "members":{ + "AppInstanceAdmin":{ + "shape":"AppInstanceAdmin", + "documentation":"

The ARN and name of the AppInstanceUser, the ARN of the AppInstance, and the created and last-updated timestamps. All timestamps use epoch milliseconds.

" + } + } + }, + "DescribeAppInstanceRequest":{ + "type":"structure", + "required":["AppInstanceArn"], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

", + "location":"uri", + "locationName":"appInstanceArn" + } + } + }, + "DescribeAppInstanceResponse":{ + "type":"structure", + "members":{ + "AppInstance":{ + "shape":"AppInstance", + "documentation":"

The ARN, metadata, created and last-updated timestamps, and the name of the AppInstance. All timestamps use epoch milliseconds.

" + } + } + }, + "DescribeAppInstanceUserRequest":{ + "type":"structure", + "required":["AppInstanceUserArn"], + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstanceUser.

", + "location":"uri", + "locationName":"appInstanceUserArn" + } + } + }, + "DescribeAppInstanceUserResponse":{ + "type":"structure", + "members":{ + "AppInstanceUser":{ + "shape":"AppInstanceUser", + "documentation":"

The name of the AppInstanceUser.

" + } + } + }, + "ErrorCode":{ + "type":"string", + "enum":[ + "BadRequest", + "Conflict", + "Forbidden", + "NotFound", + "PreconditionFailed", + "ResourceLimitExceeded", + "ServiceFailure", + "AccessDenied", + "ServiceUnavailable", + "Throttled", + "Throttling", + "Unauthorized", + "Unprocessable", + "VoiceConnectorGroupAssociationsExist", + "PhoneNumberAssociationsExist" + ] + }, + "ForbiddenException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The client is permanently forbidden from making the request.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "GetAppInstanceRetentionSettingsRequest":{ + "type":"structure", + "required":["AppInstanceArn"], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

", + "location":"uri", + "locationName":"appInstanceArn" + } + } + }, + "GetAppInstanceRetentionSettingsResponse":{ + "type":"structure", + "members":{ + "AppInstanceRetentionSettings":{ + "shape":"AppInstanceRetentionSettings", + "documentation":"

The retention settings for the AppInstance.

" + }, + "InitiateDeletionTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp representing the time at which the specified items are retained, in Epoch Seconds.

" + } + } + }, + "Identity":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ChimeArn", + "documentation":"

The ARN in an Identity.

" + }, + "Name":{ + "shape":"ResourceName", + "documentation":"

The name in an Identity.

" + } + }, + "documentation":"

The details of a user.

" + }, + "ListAppInstanceAdminsRequest":{ + "type":"structure", + "required":["AppInstanceArn"], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

", + "location":"uri", + "locationName":"appInstanceArn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of administrators that you want to return.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned from previous API requests until the number of administrators is reached.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListAppInstanceAdminsResponse":{ + "type":"structure", + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

" + }, + "AppInstanceAdmins":{ + "shape":"AppInstanceAdminList", + "documentation":"

The information for each administrator.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned from previous API requests until the number of administrators is reached.

" + } + } + }, + "ListAppInstanceUsersRequest":{ + "type":"structure", + "required":["AppInstanceArn"], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

", + "location":"querystring", + "locationName":"app-instance-arn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of requests that you want returned.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested users are returned.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListAppInstanceUsersResponse":{ + "type":"structure", + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

" + }, + "AppInstanceUsers":{ + "shape":"AppInstanceUserList", + "documentation":"

The information for each requested AppInstanceUser.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested users are returned.

" + } + } + }, + "ListAppInstancesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of AppInstances that you want to return.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API requests until you reach the maximum number of AppInstances.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListAppInstancesResponse":{ + "type":"structure", + "members":{ + "AppInstances":{ + "shape":"AppInstanceList", + "documentation":"

The information for each AppInstance.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API requests until the maximum number of AppInstances is reached.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "Metadata":{ + "type":"string", + "max":1024, + "min":0, + "pattern":".*", + "sensitive":true + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":0, + "pattern":".*", + "sensitive":true + }, + "NonEmptyResourceName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*", + "sensitive":true + }, + "PutAppInstanceRetentionSettingsRequest":{ + "type":"structure", + "required":[ + "AppInstanceArn", + "AppInstanceRetentionSettings" + ], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

", + "location":"uri", + "locationName":"appInstanceArn" + }, + "AppInstanceRetentionSettings":{ + "shape":"AppInstanceRetentionSettings", + "documentation":"

The time in days to retain data. Data type: number.

" + } + } + }, + "PutAppInstanceRetentionSettingsResponse":{ + "type":"structure", + "members":{ + "AppInstanceRetentionSettings":{ + "shape":"AppInstanceRetentionSettings", + "documentation":"

The time in days to retain data. Data type: number.

" + }, + "InitiateDeletionTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the API deletes data.

" + } + } + }, + "ResourceLimitExceededException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The request exceeds the resource limit.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ResourceName":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*", + "sensitive":true + }, + "RetentionDays":{ + "type":"integer", + "max":5475, + "min":1 + }, + "ServiceFailureException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The service encountered an unexpected error.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The service is currently unavailable.

", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "String":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key of the tag.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value of the tag.

" + } + }, + "documentation":"

Describes a tag applied to a resource.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "sensitive":true + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":50, + "min":1 + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1, + "sensitive":true + }, + "ThrottledClientException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The client exceeded its request rate limit.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "UnauthorizedClientException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The client is not currently authorized to make the request.

", + "error":{"httpStatusCode":401}, + "exception":true + }, + "UpdateAppInstanceRequest":{ + "type":"structure", + "required":[ + "AppInstanceArn", + "Name", + "Metadata" + ], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

", + "location":"uri", + "locationName":"appInstanceArn" + }, + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name that you want to change.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata that you want to change.

" + } + } + }, + "UpdateAppInstanceResponse":{ + "type":"structure", + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

" + } + } + }, + "UpdateAppInstanceUserRequest":{ + "type":"structure", + "required":[ + "AppInstanceUserArn", + "Name", + "Metadata" + ], + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstanceUser.

", + "location":"uri", + "locationName":"appInstanceUserArn" + }, + "Name":{ + "shape":"UserName", + "documentation":"

The name of the AppInstanceUser.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the AppInstanceUser.

" + } + } + }, + "UpdateAppInstanceUserResponse":{ + "type":"structure", + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstanceUser.

" + } + } + }, + "UserId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[A-Za-z0-9]([A-Za-z0-9\\:\\-\\_\\.\\@]{0,62}[A-Za-z0-9])?", + "sensitive":true + }, + "UserName":{ + "type":"string", + "max":100, + "min":1, + "pattern":".*\\S.*", + "sensitive":true + } + }, + "documentation":"

The Amazon Chime SDK Identity APIs in this section allow software developers to create and manage unique instances of their messaging applications. These APIs provide the overarching framework for creating and sending messages. For more information about the identity APIs, refer to .

" +} diff --git a/services/chimesdkmessaging/pom.xml b/services/chimesdkmessaging/pom.xml new file mode 100644 index 000000000000..b9548d8931a9 --- /dev/null +++ b/services/chimesdkmessaging/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.17.16-SNAPSHOT + + chimesdkmessaging + AWS Java SDK :: Services :: Chime SDK Messaging + The AWS Java SDK for Chime SDK Messaging module holds the client classes that are used for + communicating with Chime SDK Messaging. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.chimesdkmessaging + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/chimesdkmessaging/src/main/resources/codegen-resources/paginators-1.json b/services/chimesdkmessaging/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..f17af841290c --- /dev/null +++ b/services/chimesdkmessaging/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,39 @@ +{ + "pagination": { + "ListChannelBans": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListChannelMemberships": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListChannelMembershipsForAppInstanceUser": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListChannelMessages": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListChannelModerators": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListChannels": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListChannelsModeratedByAppInstanceUser": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/chimesdkmessaging/src/main/resources/codegen-resources/service-2.json b/services/chimesdkmessaging/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..d925bd060d45 --- /dev/null +++ b/services/chimesdkmessaging/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2477 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2021-05-15", + "endpointPrefix":"messaging-chime", + "protocol":"rest-json", + "serviceFullName":"Amazon Chime SDK Messaging", + "serviceId":"Chime SDK Messaging", + "signatureVersion":"v4", + "signingName":"chime", + "uid":"chime-sdk-messaging-2021-05-15" + }, + "operations":{ + "BatchCreateChannelMembership":{ + "name":"BatchCreateChannelMembership", + "http":{ + "method":"POST", + "requestUri":"/channels/{channelArn}/memberships?operation=batch-create", + "responseCode":200 + }, + "input":{"shape":"BatchCreateChannelMembershipRequest"}, + "output":{"shape":"BatchCreateChannelMembershipResponse"}, + "errors":[ + {"shape":"ServiceFailureException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"} + ], + "documentation":"

Adds a specified number of users to a channel.

" + }, + "CreateChannel":{ + "name":"CreateChannel", + "http":{ + "method":"POST", + "requestUri":"/channels", + "responseCode":201 + }, + "input":{"shape":"CreateChannelRequest"}, + "output":{"shape":"CreateChannelResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates a channel to which you can add users and send messages.

Restriction: You can't change a channel's privacy.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "CreateChannelBan":{ + "name":"CreateChannelBan", + "http":{ + "method":"POST", + "requestUri":"/channels/{channelArn}/bans", + "responseCode":201 + }, + "input":{"shape":"CreateChannelBanRequest"}, + "output":{"shape":"CreateChannelBanResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Permanently bans a member from a channel. Moderators can't add banned members to a channel. To undo a ban, you first have to DeleteChannelBan, and then CreateChannelMembership. Bans are cleaned up when you delete users or channels.

If you ban a user who is already part of a channel, that user is automatically kicked from the channel.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "CreateChannelMembership":{ + "name":"CreateChannelMembership", + "http":{ + "method":"POST", + "requestUri":"/channels/{channelArn}/memberships", + "responseCode":201 + }, + "input":{"shape":"CreateChannelMembershipRequest"}, + "output":{"shape":"CreateChannelMembershipResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds a user to a channel. The InvitedBy response field is derived from the request header. A channel member can:

  • List messages

  • Send messages

  • Receive messages

  • Edit their own messages

  • Leave the channel

Privacy settings impact this action as follows:

  • Public Channels: You do not need to be a member to list messages, but you must be a member to send messages.

  • Private Channels: You must be a member to list or send messages.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "CreateChannelModerator":{ + "name":"CreateChannelModerator", + "http":{ + "method":"POST", + "requestUri":"/channels/{channelArn}/moderators", + "responseCode":201 + }, + "input":{"shape":"CreateChannelModeratorRequest"}, + "output":{"shape":"CreateChannelModeratorResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates a new ChannelModerator. A channel moderator can:

  • Add and remove other members of the channel.

  • Add and remove other moderators of the channel.

  • Add and remove user bans for the channel.

  • Redact messages in the channel.

  • List messages in the channel.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "DeleteChannel":{ + "name":"DeleteChannel", + "http":{ + "method":"DELETE", + "requestUri":"/channels/{channelArn}", + "responseCode":204 + }, + "input":{"shape":"DeleteChannelRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Immediately makes a channel and its memberships inaccessible and marks them for deletion. This is an irreversible process.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "DeleteChannelBan":{ + "name":"DeleteChannelBan", + "http":{ + "method":"DELETE", + "requestUri":"/channels/{channelArn}/bans/{memberArn}", + "responseCode":204 + }, + "input":{"shape":"DeleteChannelBanRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Removes a user from a channel's ban list.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "DeleteChannelMembership":{ + "name":"DeleteChannelMembership", + "http":{ + "method":"DELETE", + "requestUri":"/channels/{channelArn}/memberships/{memberArn}", + "responseCode":204 + }, + "input":{"shape":"DeleteChannelMembershipRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Removes a member from a channel.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "DeleteChannelMessage":{ + "name":"DeleteChannelMessage", + "http":{ + "method":"DELETE", + "requestUri":"/channels/{channelArn}/messages/{messageId}", + "responseCode":204 + }, + "input":{"shape":"DeleteChannelMessageRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Deletes a channel message. Only admins can perform this action. Deletion makes messages inaccessible immediately. A background process deletes any revisions created by UpdateChannelMessage.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "DeleteChannelModerator":{ + "name":"DeleteChannelModerator", + "http":{ + "method":"DELETE", + "requestUri":"/channels/{channelArn}/moderators/{channelModeratorArn}", + "responseCode":204 + }, + "input":{"shape":"DeleteChannelModeratorRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Deletes a channel moderator.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "DescribeChannel":{ + "name":"DescribeChannel", + "http":{ + "method":"GET", + "requestUri":"/channels/{channelArn}", + "responseCode":200 + }, + "input":{"shape":"DescribeChannelRequest"}, + "output":{"shape":"DescribeChannelResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Returns the full details of a channel in an Amazon Chime AppInstance.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "DescribeChannelBan":{ + "name":"DescribeChannelBan", + "http":{ + "method":"GET", + "requestUri":"/channels/{channelArn}/bans/{memberArn}", + "responseCode":200 + }, + "input":{"shape":"DescribeChannelBanRequest"}, + "output":{"shape":"DescribeChannelBanResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Returns the full details of a channel ban.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "DescribeChannelMembership":{ + "name":"DescribeChannelMembership", + "http":{ + "method":"GET", + "requestUri":"/channels/{channelArn}/memberships/{memberArn}", + "responseCode":200 + }, + "input":{"shape":"DescribeChannelMembershipRequest"}, + "output":{"shape":"DescribeChannelMembershipResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Returns the full details of a user's channel membership.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "DescribeChannelMembershipForAppInstanceUser":{ + "name":"DescribeChannelMembershipForAppInstanceUser", + "http":{ + "method":"GET", + "requestUri":"/channels/{channelArn}?scope=app-instance-user-membership", + "responseCode":200 + }, + "input":{"shape":"DescribeChannelMembershipForAppInstanceUserRequest"}, + "output":{"shape":"DescribeChannelMembershipForAppInstanceUserResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Returns the details of a channel based on the membership of the specified AppInstanceUser.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "DescribeChannelModeratedByAppInstanceUser":{ + "name":"DescribeChannelModeratedByAppInstanceUser", + "http":{ + "method":"GET", + "requestUri":"/channels/{channelArn}?scope=app-instance-user-moderated-channel", + "responseCode":200 + }, + "input":{"shape":"DescribeChannelModeratedByAppInstanceUserRequest"}, + "output":{"shape":"DescribeChannelModeratedByAppInstanceUserResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Returns the full details of a channel moderated by the specified AppInstanceUser.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "DescribeChannelModerator":{ + "name":"DescribeChannelModerator", + "http":{ + "method":"GET", + "requestUri":"/channels/{channelArn}/moderators/{channelModeratorArn}", + "responseCode":200 + }, + "input":{"shape":"DescribeChannelModeratorRequest"}, + "output":{"shape":"DescribeChannelModeratorResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Returns the full details of a single ChannelModerator.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "GetChannelMessage":{ + "name":"GetChannelMessage", + "http":{ + "method":"GET", + "requestUri":"/channels/{channelArn}/messages/{messageId}", + "responseCode":200 + }, + "input":{"shape":"GetChannelMessageRequest"}, + "output":{"shape":"GetChannelMessageResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Gets the full details of a channel message.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "GetMessagingSessionEndpoint":{ + "name":"GetMessagingSessionEndpoint", + "http":{ + "method":"GET", + "requestUri":"/endpoints/messaging-session", + "responseCode":200 + }, + "input":{"shape":"GetMessagingSessionEndpointRequest"}, + "output":{"shape":"GetMessagingSessionEndpointResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

The details of the endpoint for the messaging session.

" + }, + "ListChannelBans":{ + "name":"ListChannelBans", + "http":{ + "method":"GET", + "requestUri":"/channels/{channelArn}/bans", + "responseCode":200 + }, + "input":{"shape":"ListChannelBansRequest"}, + "output":{"shape":"ListChannelBansResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists all the users banned from a particular channel.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "ListChannelMemberships":{ + "name":"ListChannelMemberships", + "http":{ + "method":"GET", + "requestUri":"/channels/{channelArn}/memberships", + "responseCode":200 + }, + "input":{"shape":"ListChannelMembershipsRequest"}, + "output":{"shape":"ListChannelMembershipsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists all channel memberships in a channel.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "ListChannelMembershipsForAppInstanceUser":{ + "name":"ListChannelMembershipsForAppInstanceUser", + "http":{ + "method":"GET", + "requestUri":"/channels?scope=app-instance-user-memberships", + "responseCode":200 + }, + "input":{"shape":"ListChannelMembershipsForAppInstanceUserRequest"}, + "output":{"shape":"ListChannelMembershipsForAppInstanceUserResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists all channels that a particular AppInstanceUser is a part of. Only an AppInstanceAdmin can call the API with a user ARN that is not their own.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "ListChannelMessages":{ + "name":"ListChannelMessages", + "http":{ + "method":"GET", + "requestUri":"/channels/{channelArn}/messages", + "responseCode":200 + }, + "input":{"shape":"ListChannelMessagesRequest"}, + "output":{"shape":"ListChannelMessagesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

List all the messages in a channel. Returns a paginated list of ChannelMessages. By default, sorted by creation timestamp in descending order.

Redacted messages appear in the results as empty, since they are only redacted, not deleted. Deleted messages do not appear in the results. This action always returns the latest version of an edited message.

Also, the x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "ListChannelModerators":{ + "name":"ListChannelModerators", + "http":{ + "method":"GET", + "requestUri":"/channels/{channelArn}/moderators", + "responseCode":200 + }, + "input":{"shape":"ListChannelModeratorsRequest"}, + "output":{"shape":"ListChannelModeratorsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists all the moderators for a channel.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "ListChannels":{ + "name":"ListChannels", + "http":{ + "method":"GET", + "requestUri":"/channels", + "responseCode":200 + }, + "input":{"shape":"ListChannelsRequest"}, + "output":{"shape":"ListChannelsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists all Channels created under a single Chime App as a paginated list. You can specify filters to narrow results.

Functionality & restrictions

  • Use privacy = PUBLIC to retrieve all public channels in the account.

  • Only an AppInstanceAdmin can set privacy = PRIVATE to list the private channels in an account.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "ListChannelsModeratedByAppInstanceUser":{ + "name":"ListChannelsModeratedByAppInstanceUser", + "http":{ + "method":"GET", + "requestUri":"/channels?scope=app-instance-user-moderated-channels", + "responseCode":200 + }, + "input":{"shape":"ListChannelsModeratedByAppInstanceUserRequest"}, + "output":{"shape":"ListChannelsModeratedByAppInstanceUserResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

A list of the channels moderated by an AppInstanceUser.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "RedactChannelMessage":{ + "name":"RedactChannelMessage", + "http":{ + "method":"POST", + "requestUri":"/channels/{channelArn}/messages/{messageId}?operation=redact", + "responseCode":200 + }, + "input":{"shape":"RedactChannelMessageRequest"}, + "output":{"shape":"RedactChannelMessageResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Redacts message content, but not metadata. The message exists in the back end, but the action returns null content, and the state shows as redacted.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "SendChannelMessage":{ + "name":"SendChannelMessage", + "http":{ + "method":"POST", + "requestUri":"/channels/{channelArn}/messages", + "responseCode":201 + }, + "input":{"shape":"SendChannelMessageRequest"}, + "output":{"shape":"SendChannelMessageResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Sends a message to a particular channel that the member is a part of.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

Also, STANDARD messages can contain 4KB of data and the 1KB of metadata. CONTROL messages can contain 30 bytes of data and no metadata.

" + }, + "UpdateChannel":{ + "name":"UpdateChannel", + "http":{ + "method":"PUT", + "requestUri":"/channels/{channelArn}", + "responseCode":200 + }, + "input":{"shape":"UpdateChannelRequest"}, + "output":{"shape":"UpdateChannelResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Update a channel's attributes.

Restriction: You can't change a channel's privacy.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "UpdateChannelMessage":{ + "name":"UpdateChannelMessage", + "http":{ + "method":"PUT", + "requestUri":"/channels/{channelArn}/messages/{messageId}", + "responseCode":200 + }, + "input":{"shape":"UpdateChannelMessageRequest"}, + "output":{"shape":"UpdateChannelMessageResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates the content of a message.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + }, + "UpdateChannelReadMarker":{ + "name":"UpdateChannelReadMarker", + "http":{ + "method":"PUT", + "requestUri":"/channels/{channelArn}/readMarker", + "responseCode":200 + }, + "input":{"shape":"UpdateChannelReadMarkerRequest"}, + "output":{"shape":"UpdateChannelReadMarkerResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

The details of the time when a user last read messages in a channel.

The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header.

" + } + }, + "shapes":{ + "AppInstanceUserMembershipSummary":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"ChannelMembershipType", + "documentation":"

The type of ChannelMembership.

" + }, + "ReadMarkerTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a message was last read.

" + } + }, + "documentation":"

Summary of the membership details of an AppInstanceUser.

" + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The input parameters don't match the service's restrictions.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "BatchChannelMemberships":{ + "type":"structure", + "members":{ + "InvitedBy":{ + "shape":"Identity", + "documentation":"

The identifier of the member who invited another member.

" + }, + "Type":{ + "shape":"ChannelMembershipType", + "documentation":"

The membership types set for the channel users.

" + }, + "Members":{ + "shape":"Members", + "documentation":"

The users successfully added to the request.

" + }, + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel to which you're adding users.

" + } + }, + "documentation":"

The membership information, including member ARNs, the channel ARN, and membership types.

" + }, + "BatchCreateChannelMembershipError":{ + "type":"structure", + "members":{ + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member that the service couldn't add.

" + }, + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

The error code.

" + }, + "ErrorMessage":{ + "shape":"String", + "documentation":"

The error message.

" + } + }, + "documentation":"

A list of failed member ARNs, error codes, and error messages.

" + }, + "BatchCreateChannelMembershipErrors":{ + "type":"list", + "member":{"shape":"BatchCreateChannelMembershipError"} + }, + "BatchCreateChannelMembershipRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MemberArns", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel to which you're adding users.

", + "location":"uri", + "locationName":"channelArn" + }, + "Type":{ + "shape":"ChannelMembershipType", + "documentation":"

The membership type of a user, DEFAULT or HIDDEN. Default members are always returned as part of ListChannelMemberships. Hidden members are only returned if the type filter in ListChannelMemberships equals HIDDEN. Otherwise hidden members are not returned. This is only supported by moderators.

" + }, + "MemberArns":{ + "shape":"MemberArns", + "documentation":"

The ARNs of the members you want to add to the channel.

" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "BatchCreateChannelMembershipResponse":{ + "type":"structure", + "members":{ + "BatchChannelMemberships":{ + "shape":"BatchChannelMemberships", + "documentation":"

The list of channel memberships in the response.

" + }, + "Errors":{ + "shape":"BatchCreateChannelMembershipErrors", + "documentation":"

If the action fails for one or more of the memberships in the request, a list of the memberships is returned, along with error codes and error messages.

" + } + } + }, + "Channel":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of a channel.

" + }, + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of a channel.

" + }, + "Mode":{ + "shape":"ChannelMode", + "documentation":"

The mode of the channel.

" + }, + "Privacy":{ + "shape":"ChannelPrivacy", + "documentation":"

The channel's privacy setting.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The channel's metadata.

" + }, + "CreatedBy":{ + "shape":"Identity", + "documentation":"

The AppInstanceUser who created the channel.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the AppInstanceUser created the channel.

" + }, + "LastMessageTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a member sent the last message in the channel.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a channel was last updated.

" + } + }, + "documentation":"

The details of a channel.

" + }, + "ChannelBan":{ + "type":"structure", + "members":{ + "Member":{ + "shape":"Identity", + "documentation":"

The member being banned from the channel.

" + }, + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel from which a member is being banned.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the ban was created.

" + }, + "CreatedBy":{ + "shape":"Identity", + "documentation":"

The AppInstanceUser who created the ban.

" + } + }, + "documentation":"

The details of a channel ban.

" + }, + "ChannelBanSummary":{ + "type":"structure", + "members":{ + "Member":{ + "shape":"Identity", + "documentation":"

The member being banned from a channel.

" + } + }, + "documentation":"

Summary of the details of a ChannelBan.

" + }, + "ChannelBanSummaryList":{ + "type":"list", + "member":{"shape":"ChannelBanSummary"} + }, + "ChannelMembership":{ + "type":"structure", + "members":{ + "InvitedBy":{ + "shape":"Identity", + "documentation":"

The identifier of the member who invited another member.

" + }, + "Type":{ + "shape":"ChannelMembershipType", + "documentation":"

The membership type set for the channel member.

" + }, + "Member":{ + "shape":"Identity", + "documentation":"

The data of the channel member.

" + }, + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member's channel.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the channel membership was created.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a channel membership was last updated.

" + } + }, + "documentation":"

The details of a channel member.

" + }, + "ChannelMembershipForAppInstanceUserSummary":{ + "type":"structure", + "members":{ + "ChannelSummary":{ + "shape":"ChannelSummary", + "documentation":"

Returns the channel data for an AppInstance.

" + }, + "AppInstanceUserMembershipSummary":{ + "shape":"AppInstanceUserMembershipSummary", + "documentation":"

Returns the channel membership data for an AppInstance.

" + } + }, + "documentation":"

Summary of the channel membership details of an AppInstanceUser.

" + }, + "ChannelMembershipForAppInstanceUserSummaryList":{ + "type":"list", + "member":{"shape":"ChannelMembershipForAppInstanceUserSummary"} + }, + "ChannelMembershipSummary":{ + "type":"structure", + "members":{ + "Member":{ + "shape":"Identity", + "documentation":"

A member's summary data.

" + } + }, + "documentation":"

Summary of the details of a ChannelMembership.

" + }, + "ChannelMembershipSummaryList":{ + "type":"list", + "member":{"shape":"ChannelMembershipSummary"} + }, + "ChannelMembershipType":{ + "type":"string", + "enum":[ + "DEFAULT", + "HIDDEN" + ] + }, + "ChannelMessage":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID of a message.

" + }, + "Content":{ + "shape":"Content", + "documentation":"

The message content.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The message metadata.

" + }, + "Type":{ + "shape":"ChannelMessageType", + "documentation":"

The message type.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the message was created.

" + }, + "LastEditedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a message was edited.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a message was updated.

" + }, + "Sender":{ + "shape":"Identity", + "documentation":"

The message sender.

" + }, + "Redacted":{ + "shape":"NonNullableBoolean", + "documentation":"

Hides the content of a message.

" + }, + "Persistence":{ + "shape":"ChannelMessagePersistenceType", + "documentation":"

The persistence setting for a channel message.

" + } + }, + "documentation":"

The details of a message in a channel.

" + }, + "ChannelMessagePersistenceType":{ + "type":"string", + "enum":[ + "PERSISTENT", + "NON_PERSISTENT" + ] + }, + "ChannelMessageSummary":{ + "type":"structure", + "members":{ + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID of the message.

" + }, + "Content":{ + "shape":"Content", + "documentation":"

The content of the message.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the message.

" + }, + "Type":{ + "shape":"ChannelMessageType", + "documentation":"

The type of message.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the message summary was created.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a message was last updated.

" + }, + "LastEditedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a message was last edited.

" + }, + "Sender":{ + "shape":"Identity", + "documentation":"

The message sender.

" + }, + "Redacted":{ + "shape":"NonNullableBoolean", + "documentation":"

Indicates whether a message was redacted.

" + } + }, + "documentation":"

Summary of the messages in a Channel.

" + }, + "ChannelMessageSummaryList":{ + "type":"list", + "member":{"shape":"ChannelMessageSummary"} + }, + "ChannelMessageType":{ + "type":"string", + "enum":[ + "STANDARD", + "CONTROL" + ] + }, + "ChannelMode":{ + "type":"string", + "enum":[ + "UNRESTRICTED", + "RESTRICTED" + ] + }, + "ChannelModeratedByAppInstanceUserSummary":{ + "type":"structure", + "members":{ + "ChannelSummary":{ + "shape":"ChannelSummary", + "documentation":"

Summary of the details of a Channel.

" + } + }, + "documentation":"

Summary of the details of a moderated channel.

" + }, + "ChannelModeratedByAppInstanceUserSummaryList":{ + "type":"list", + "member":{"shape":"ChannelModeratedByAppInstanceUserSummary"} + }, + "ChannelModerator":{ + "type":"structure", + "members":{ + "Moderator":{ + "shape":"Identity", + "documentation":"

The moderator's data.

" + }, + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the moderator's channel.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the moderator was created.

" + }, + "CreatedBy":{ + "shape":"Identity", + "documentation":"

The AppInstanceUser who created the moderator.

" + } + }, + "documentation":"

The details of a channel moderator.

" + }, + "ChannelModeratorSummary":{ + "type":"structure", + "members":{ + "Moderator":{ + "shape":"Identity", + "documentation":"

The data for a moderator.

" + } + }, + "documentation":"

Summary of the details of a ChannelModerator.

" + }, + "ChannelModeratorSummaryList":{ + "type":"list", + "member":{"shape":"ChannelModeratorSummary"} + }, + "ChannelPrivacy":{ + "type":"string", + "enum":[ + "PUBLIC", + "PRIVATE" + ] + }, + "ChannelSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of the channel.

" + }, + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "Mode":{ + "shape":"ChannelMode", + "documentation":"

The mode of the channel.

" + }, + "Privacy":{ + "shape":"ChannelPrivacy", + "documentation":"

The privacy setting of the channel.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the channel.

" + }, + "LastMessageTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the last message in a channel was sent.

" + } + }, + "documentation":"

Summary of the details of a Channel.

" + }, + "ChannelSummaryList":{ + "type":"list", + "member":{"shape":"ChannelSummary"} + }, + "ChimeArn":{ + "type":"string", + "max":1600, + "min":5, + "pattern":"arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}" + }, + "ClientRequestToken":{ + "type":"string", + "max":64, + "min":2, + "pattern":"[-_a-zA-Z0-9]*", + "sensitive":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The request could not be processed because of conflict in the current state of the resource.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "Content":{ + "type":"string", + "max":4096, + "min":0, + "pattern":"[\\s\\S]*", + "sensitive":true + }, + "CreateChannelBanRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MemberArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the ban request.

", + "location":"uri", + "locationName":"channelArn" + }, + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member being banned.

" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "CreateChannelBanResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the response to the ban request.

" + }, + "Member":{ + "shape":"Identity", + "documentation":"

The ChannelArn and BannedIdentity of the member in the ban response.

" + } + } + }, + "CreateChannelMembershipRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MemberArn", + "Type", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel to which you're adding users.

", + "location":"uri", + "locationName":"channelArn" + }, + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member you want to add to the channel.

" + }, + "Type":{ + "shape":"ChannelMembershipType", + "documentation":"

The membership type of a user, DEFAULT or HIDDEN. Default members are always returned as part of ListChannelMemberships. Hidden members are only returned if the type filter in ListChannelMemberships equals HIDDEN. Otherwise hidden members are not returned. This is only supported by moderators.

" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "CreateChannelMembershipResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "Member":{ + "shape":"Identity", + "documentation":"

The ARN and metadata of the member being added.

" + } + } + }, + "CreateChannelModeratorRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "ChannelModeratorArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "ChannelModeratorArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the moderator.

" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "CreateChannelModeratorResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "ChannelModerator":{ + "shape":"Identity", + "documentation":"

The ARNs of the channel and the moderator.

" + } + } + }, + "CreateChannelRequest":{ + "type":"structure", + "required":[ + "AppInstanceArn", + "Name", + "ClientRequestToken", + "ChimeBearer" + ], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel request.

" + }, + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of the channel.

" + }, + "Mode":{ + "shape":"ChannelMode", + "documentation":"

The channel mode: UNRESTRICTED or RESTRICTED. Administrators, moderators, and channel members can add themselves and other members to unrestricted channels. Only administrators and moderators can add members to restricted channels.

" + }, + "Privacy":{ + "shape":"ChannelPrivacy", + "documentation":"

The channel's privacy level: PUBLIC or PRIVATE. Private channels aren't discoverable by users outside the channel. Public channels are discoverable by anyone in the AppInstance.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the creation request. Limited to 1KB and UTF-8.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The client token for the request. An Idempotency token.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags for the creation request.

" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "CreateChannelResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + } + } + }, + "DeleteChannelBanRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MemberArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel from which the AppInstanceUser was banned.

", + "location":"uri", + "locationName":"channelArn" + }, + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstanceUser that you want to reinstate.

", + "location":"uri", + "locationName":"memberArn" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "DeleteChannelMembershipRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MemberArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel from which you want to remove the user.

", + "location":"uri", + "locationName":"channelArn" + }, + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member that you're removing from the channel.

", + "location":"uri", + "locationName":"memberArn" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "DeleteChannelMessageRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MessageId", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID of the message being deleted.

", + "location":"uri", + "locationName":"messageId" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "DeleteChannelModeratorRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "ChannelModeratorArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "ChannelModeratorArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the moderator being deleted.

", + "location":"uri", + "locationName":"channelModeratorArn" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "DeleteChannelRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel being deleted.

", + "location":"uri", + "locationName":"channelArn" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "DescribeChannelBanRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MemberArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel from which the user is banned.

", + "location":"uri", + "locationName":"channelArn" + }, + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member being banned.

", + "location":"uri", + "locationName":"memberArn" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "DescribeChannelBanResponse":{ + "type":"structure", + "members":{ + "ChannelBan":{ + "shape":"ChannelBan", + "documentation":"

The details of the ban.

" + } + } + }, + "DescribeChannelMembershipForAppInstanceUserRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "AppInstanceUserArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel to which the user belongs.

", + "location":"uri", + "locationName":"channelArn" + }, + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the user in a channel.

", + "location":"querystring", + "locationName":"app-instance-user-arn" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "DescribeChannelMembershipForAppInstanceUserResponse":{ + "type":"structure", + "members":{ + "ChannelMembership":{ + "shape":"ChannelMembershipForAppInstanceUserSummary", + "documentation":"

The channel to which a user belongs.

" + } + } + }, + "DescribeChannelMembershipRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MemberArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member.

", + "location":"uri", + "locationName":"memberArn" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "DescribeChannelMembershipResponse":{ + "type":"structure", + "members":{ + "ChannelMembership":{ + "shape":"ChannelMembership", + "documentation":"

The details of the membership.

" + } + } + }, + "DescribeChannelModeratedByAppInstanceUserRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "AppInstanceUserArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the moderated channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstanceUser in the moderated channel.

", + "location":"querystring", + "locationName":"app-instance-user-arn" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "DescribeChannelModeratedByAppInstanceUserResponse":{ + "type":"structure", + "members":{ + "Channel":{ + "shape":"ChannelModeratedByAppInstanceUserSummary", + "documentation":"

The moderated channel.

" + } + } + }, + "DescribeChannelModeratorRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "ChannelModeratorArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "ChannelModeratorArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel moderator.

", + "location":"uri", + "locationName":"channelModeratorArn" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "DescribeChannelModeratorResponse":{ + "type":"structure", + "members":{ + "ChannelModerator":{ + "shape":"ChannelModerator", + "documentation":"

The details of the channel moderator.

" + } + } + }, + "DescribeChannelRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "DescribeChannelResponse":{ + "type":"structure", + "members":{ + "Channel":{ + "shape":"Channel", + "documentation":"

The channel details.

" + } + } + }, + "ErrorCode":{ + "type":"string", + "enum":[ + "BadRequest", + "Conflict", + "Forbidden", + "NotFound", + "PreconditionFailed", + "ResourceLimitExceeded", + "ServiceFailure", + "AccessDenied", + "ServiceUnavailable", + "Throttled", + "Throttling", + "Unauthorized", + "Unprocessable", + "VoiceConnectorGroupAssociationsExist", + "PhoneNumberAssociationsExist" + ] + }, + "ForbiddenException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The client is permanently forbidden from making the request.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "GetChannelMessageRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MessageId", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID of the message.

", + "location":"uri", + "locationName":"messageId" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "GetChannelMessageResponse":{ + "type":"structure", + "members":{ + "ChannelMessage":{ + "shape":"ChannelMessage", + "documentation":"

The details of and content in the message.

" + } + } + }, + "GetMessagingSessionEndpointRequest":{ + "type":"structure", + "members":{ + } + }, + "GetMessagingSessionEndpointResponse":{ + "type":"structure", + "members":{ + "Endpoint":{ + "shape":"MessagingSessionEndpoint", + "documentation":"

The endpoint returned in the response.

" + } + } + }, + "Identity":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ChimeArn", + "documentation":"

The ARN in an Identity.

" + }, + "Name":{ + "shape":"ResourceName", + "documentation":"

The name in an Identity.

" + } + }, + "documentation":"

The details of a user.

" + }, + "ListChannelBansRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of bans that you want returned.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested bans are returned.

", + "location":"querystring", + "locationName":"next-token" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "ListChannelBansResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested bans are returned.

" + }, + "ChannelBans":{ + "shape":"ChannelBanSummaryList", + "documentation":"

The information for each requested ban.

" + } + } + }, + "ListChannelMembershipsForAppInstanceUserRequest":{ + "type":"structure", + "required":["ChimeBearer"], + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstanceUsers

", + "location":"querystring", + "locationName":"app-instance-user-arn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of users that you want returned.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned from previous API requests until the number of channel memberships is reached.

", + "location":"querystring", + "locationName":"next-token" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "ListChannelMembershipsForAppInstanceUserResponse":{ + "type":"structure", + "members":{ + "ChannelMemberships":{ + "shape":"ChannelMembershipForAppInstanceUserSummaryList", + "documentation":"

The token passed by previous API calls until all requested users are returned.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested users are returned.

" + } + } + }, + "ListChannelMembershipsRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The maximum number of channel memberships that you want returned.

", + "location":"uri", + "locationName":"channelArn" + }, + "Type":{ + "shape":"ChannelMembershipType", + "documentation":"

The membership type of a user, DEFAULT or HIDDEN. Default members are always returned as part of ListChannelMemberships. Hidden members are only returned if the type filter in ListChannelMemberships equals HIDDEN. Otherwise hidden members are not returned.

", + "location":"querystring", + "locationName":"type" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of channel memberships that you want returned.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested channel memberships are returned.

", + "location":"querystring", + "locationName":"next-token" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "ListChannelMembershipsResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "ChannelMemberships":{ + "shape":"ChannelMembershipSummaryList", + "documentation":"

The information for the requested channel memberships.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested channel memberships are returned.

" + } + } + }, + "ListChannelMessagesRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The order in which you want messages sorted. Default is Descending, based on time created.

", + "location":"querystring", + "locationName":"sort-order" + }, + "NotBefore":{ + "shape":"Timestamp", + "documentation":"

The initial or starting time stamp for your requested messages.

", + "location":"querystring", + "locationName":"not-before" + }, + "NotAfter":{ + "shape":"Timestamp", + "documentation":"

The final or ending time stamp for your requested messages.

", + "location":"querystring", + "locationName":"not-after" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of messages that you want returned.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested messages are returned.

", + "location":"querystring", + "locationName":"next-token" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "ListChannelMessagesResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel containing the requested messages.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested messages are returned.

" + }, + "ChannelMessages":{ + "shape":"ChannelMessageSummaryList", + "documentation":"

The information about, and content of, each requested message.

" + } + } + }, + "ListChannelModeratorsRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of moderators that you want returned.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested moderators are returned.

", + "location":"querystring", + "locationName":"next-token" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "ListChannelModeratorsResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested moderators are returned.

" + }, + "ChannelModerators":{ + "shape":"ChannelModeratorSummaryList", + "documentation":"

The information about and names of each moderator.

" + } + } + }, + "ListChannelsModeratedByAppInstanceUserRequest":{ + "type":"structure", + "required":["ChimeBearer"], + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the user in the moderated channel.

", + "location":"querystring", + "locationName":"app-instance-user-arn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of channels in the request.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned from previous API requests until the number of channels moderated by the user is reached.

", + "location":"querystring", + "locationName":"next-token" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "ListChannelsModeratedByAppInstanceUserResponse":{ + "type":"structure", + "members":{ + "Channels":{ + "shape":"ChannelModeratedByAppInstanceUserSummaryList", + "documentation":"

The moderated channels in the request.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned from previous API requests until the number of channels moderated by the user is reached.

" + } + } + }, + "ListChannelsRequest":{ + "type":"structure", + "required":[ + "AppInstanceArn", + "ChimeBearer" + ], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the AppInstance.

", + "location":"querystring", + "locationName":"app-instance-arn" + }, + "Privacy":{ + "shape":"ChannelPrivacy", + "documentation":"

The privacy setting. PUBLIC retrieves all the public channels. PRIVATE retrieves private channels. Only an AppInstanceAdmin can retrieve private channels.

", + "location":"querystring", + "locationName":"privacy" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of channels that you want to return.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested channels are returned.

", + "location":"querystring", + "locationName":"next-token" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "ListChannelsResponse":{ + "type":"structure", + "members":{ + "Channels":{ + "shape":"ChannelSummaryList", + "documentation":"

The information about each channel.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned from previous API requests until the number of channels is reached.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "MemberArns":{ + "type":"list", + "member":{"shape":"ChimeArn"}, + "max":100, + "min":1 + }, + "Members":{ + "type":"list", + "member":{"shape":"Identity"} + }, + "MessageId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[-_a-zA-Z0-9]*" + }, + "MessagingSessionEndpoint":{ + "type":"structure", + "members":{ + "Url":{ + "shape":"UrlType", + "documentation":"

The endpoint to which you establish a websocket connection.

" + } + }, + "documentation":"

The websocket endpoint used to connect to Amazon Chime SDK messaging.

" + }, + "Metadata":{ + "type":"string", + "max":1024, + "min":0, + "pattern":".*", + "sensitive":true + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":0, + "pattern":".*", + "sensitive":true + }, + "NonEmptyContent":{ + "type":"string", + "min":1, + "pattern":"[\\s\\S]*", + "sensitive":true + }, + "NonEmptyResourceName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*", + "sensitive":true + }, + "NonNullableBoolean":{"type":"boolean"}, + "NotFoundException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

One or more of the resources in the request does not exist in the system.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "RedactChannelMessageRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MessageId", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel containing the messages that you want to redact.

", + "location":"uri", + "locationName":"channelArn" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID of the message being redacted.

", + "location":"uri", + "locationName":"messageId" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "RedactChannelMessageResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel containing the messages that you want to redact.

" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID of the message being redacted.

" + } + } + }, + "ResourceLimitExceededException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The request exceeds the resource limit.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ResourceName":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*", + "sensitive":true + }, + "SendChannelMessageRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "Content", + "Type", + "Persistence", + "ClientRequestToken", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "Content":{ + "shape":"NonEmptyContent", + "documentation":"

The content of the message.

" + }, + "Type":{ + "shape":"ChannelMessageType", + "documentation":"

The type of message, STANDARD or CONTROL.

" + }, + "Persistence":{ + "shape":"ChannelMessagePersistenceType", + "documentation":"

Boolean that controls whether the message is persisted on the back end. Required.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The optional metadata for each message.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The Idempotency token for each client request.

", + "idempotencyToken":true + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "SendChannelMessageResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID string assigned to each message.

" + } + } + }, + "ServiceFailureException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The service encountered an unexpected error.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The service is currently unavailable.

", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, + "String":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key of the tag.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value of the tag.

" + } + }, + "documentation":"

Describes a tag applied to a resource.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "sensitive":true + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":50, + "min":1 + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1, + "sensitive":true + }, + "ThrottledClientException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The client exceeded its request rate limit.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "UnauthorizedClientException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The client is not currently authorized to make the request.

", + "error":{"httpStatusCode":401}, + "exception":true + }, + "UpdateChannelMessageRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MessageId", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID string of the message being updated.

", + "location":"uri", + "locationName":"messageId" + }, + "Content":{ + "shape":"Content", + "documentation":"

The content of the message being updated.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the message being updated.

" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "UpdateChannelMessageResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID string of the message being updated.

" + } + } + }, + "UpdateChannelReadMarkerRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "UpdateChannelReadMarkerResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + } + } + }, + "UpdateChannelRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "Name", + "Mode", + "ChimeBearer" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of the channel.

" + }, + "Mode":{ + "shape":"ChannelMode", + "documentation":"

The mode of the update request.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata for the update request.

" + }, + "ChimeBearer":{ + "shape":"ChimeArn", + "documentation":"

The AppInstanceUserArn of the user that makes the API call.

", + "location":"header", + "locationName":"x-amz-chime-bearer" + } + } + }, + "UpdateChannelResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + } + } + }, + "UrlType":{ + "type":"string", + "max":4096 + } + }, + "documentation":"

The Amazon Chime SDK Messaging APIs in this section allow software developers to send and receive messages in custom messaging applications. These APIs depend on the frameworks provided by the Amazon Chime SDK Identity APIs. For more information about the messaging APIs, see .

" +} diff --git a/services/cloud9/pom.xml b/services/cloud9/pom.xml index c3af06710a2e..24fb8a808c99 100644 --- a/services/cloud9/pom.xml +++ b/services/cloud9/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 cloud9 diff --git a/services/clouddirectory/pom.xml b/services/clouddirectory/pom.xml index b36d1959d8fe..265d55cf6ded 100644 --- a/services/clouddirectory/pom.xml +++ b/services/clouddirectory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT clouddirectory AWS Java SDK :: Services :: Amazon CloudDirectory diff --git a/services/cloudformation/pom.xml b/services/cloudformation/pom.xml index 5bc39b827885..02677ee9bdbc 100644 --- a/services/cloudformation/pom.xml +++ b/services/cloudformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cloudformation AWS Java SDK :: Services :: AWS CloudFormation diff --git a/services/cloudformation/src/main/resources/codegen-resources/service-2.json b/services/cloudformation/src/main/resources/codegen-resources/service-2.json index 93b3cebc0d96..75e3dda0e280 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudformation/src/main/resources/codegen-resources/service-2.json @@ -72,7 +72,7 @@ "errors":[ {"shape":"TokenAlreadyExistsException"} ], - "documentation":"

For a specified stack that is in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again.

A stack goes into the UPDATE_ROLLBACK_FAILED state when AWS CloudFormation cannot roll back all changes after a failed stack update. For example, you might have a stack that is rolling back to an old database instance that was deleted outside of AWS CloudFormation. Because AWS CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail.

" + "documentation":"

For a specified stack that is in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again.

A stack goes into the UPDATE_ROLLBACK_FAILED state when CloudFormation cannot roll back all changes after a failed stack update. For example, you might have a stack that is rolling back to an old database instance that was deleted outside of CloudFormation. Because CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail.

" }, "CreateChangeSet":{ "name":"CreateChangeSet", @@ -90,7 +90,7 @@ {"shape":"InsufficientCapabilitiesException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a list of changes that will be applied to a stack so that you can review the changes before executing them. You can create a change set for a stack that doesn't exist or an existing stack. If you create a change set for a stack that doesn't exist, the change set shows all of the resources that AWS CloudFormation will create. If you create a change set for an existing stack, AWS CloudFormation compares the stack's information with the information that you submit in the change set and lists the differences. Use change sets to understand which resources AWS CloudFormation will create or change, and how it will change resources in an existing stack, before you create or update a stack.

To create a change set for a stack that doesn't exist, for the ChangeSetType parameter, specify CREATE. To create a change set for an existing stack, specify UPDATE for the ChangeSetType parameter. To create a change set for an import operation, specify IMPORT for the ChangeSetType parameter. After the CreateChangeSet call successfully completes, AWS CloudFormation starts creating the change set. To check the status of the change set or to review it, use the DescribeChangeSet action.

When you are satisfied with the changes the change set will make, execute the change set by using the ExecuteChangeSet action. AWS CloudFormation doesn't make changes until you execute the change set.

To create a change set for the entire stack hierachy, set IncludeNestedStacks to True.

" + "documentation":"

Creates a list of changes that will be applied to a stack so that you can review the changes before executing them. You can create a change set for a stack that doesn't exist or an existing stack. If you create a change set for a stack that doesn't exist, the change set shows all of the resources that CloudFormation will create. If you create a change set for an existing stack, CloudFormation compares the stack's information with the information that you submit in the change set and lists the differences. Use change sets to understand which resources CloudFormation will create or change, and how it will change resources in an existing stack, before you create or update a stack.

To create a change set for a stack that doesn't exist, for the ChangeSetType parameter, specify CREATE. To create a change set for an existing stack, specify UPDATE for the ChangeSetType parameter. To create a change set for an import operation, specify IMPORT for the ChangeSetType parameter. After the CreateChangeSet call successfully completes, CloudFormation starts creating the change set. To check the status of the change set or to review it, use the DescribeChangeSet action.

When you are satisfied with the changes the change set will make, execute the change set by using the ExecuteChangeSet action. CloudFormation doesn't make changes until you execute the change set.

To create a change set for the entire stack hierachy, set IncludeNestedStacks to True.

" }, "CreateStack":{ "name":"CreateStack", @@ -182,7 +182,7 @@ "errors":[ {"shape":"InvalidChangeSetStatusException"} ], - "documentation":"

Deletes the specified change set. Deleting change sets ensures that no one executes the wrong change set.

If the call successfully completes, AWS CloudFormation successfully deleted the change set.

If IncludeNestedStacks specifies True during the creation of the nested change set, then DeleteChangeSet will delete all change sets that belong to the stacks hierarchy and will also delete all change sets for nested stacks with the status of REVIEW_IN_PROGRESS.

" + "documentation":"

Deletes the specified change set. Deleting change sets ensures that no one executes the wrong change set.

If the call successfully completes, CloudFormation successfully deleted the change set.

If IncludeNestedStacks specifies True during the creation of the nested change set, then DeleteChangeSet will delete all change sets that belong to the stacks hierarchy and will also delete all change sets for nested stacks with the status of REVIEW_IN_PROGRESS.

" }, "DeleteStack":{ "name":"DeleteStack", @@ -262,7 +262,7 @@ "shape":"DescribeAccountLimitsOutput", "resultWrapper":"DescribeAccountLimitsResult" }, - "documentation":"

Retrieves your account's AWS CloudFormation limits, such as the maximum number of stacks that you can create in your account. For more information about account limits, see AWS CloudFormation Limits in the AWS CloudFormation User Guide.

" + "documentation":"

Retrieves your account's CloudFormation limits, such as the maximum number of stacks that you can create in your account. For more information about account limits, see CloudFormation Limits in the CloudFormation User Guide.

" }, "DescribeChangeSet":{ "name":"DescribeChangeSet", @@ -278,7 +278,7 @@ "errors":[ {"shape":"ChangeSetNotFoundException"} ], - "documentation":"

Returns the inputs for the change set and a list of changes that AWS CloudFormation will make if you execute the change set. For more information, see Updating Stacks Using Change Sets in the AWS CloudFormation User Guide.

" + "documentation":"

Returns the inputs for the change set and a list of changes that CloudFormation will make if you execute the change set. For more information, see Updating Stacks Using Change Sets in the CloudFormation User Guide.

" }, "DescribePublisher":{ "name":"DescribePublisher", @@ -321,7 +321,7 @@ "shape":"DescribeStackEventsOutput", "resultWrapper":"DescribeStackEventsResult" }, - "documentation":"

Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, go to Stacks in the AWS CloudFormation User Guide.

You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID).

" + "documentation":"

Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, go to Stacks in the CloudFormation User Guide.

You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID).

" }, "DescribeStackInstance":{ "name":"DescribeStackInstance", @@ -338,7 +338,7 @@ {"shape":"StackSetNotFoundException"}, {"shape":"StackInstanceNotFoundException"} ], - "documentation":"

Returns the stack instance that's associated with the specified stack set, AWS account, and Region.

For a list of stack instances that are associated with a specific stack set, use ListStackInstances.

" + "documentation":"

Returns the stack instance that's associated with the specified stack set, account, and Region.

For a list of stack instances that are associated with a specific stack set, use ListStackInstances.

" }, "DescribeStackResource":{ "name":"DescribeStackResource", @@ -364,7 +364,7 @@ "shape":"DescribeStackResourceDriftsOutput", "resultWrapper":"DescribeStackResourceDriftsResult" }, - "documentation":"

Returns drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where AWS CloudFormation detects configuration drift.

For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that have not yet been checked for drift are not included. Resources that do not currently support drift detection are not checked, and so not included. For a list of resources that support drift detection, see Resources that Support Drift Detection.

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all supported resources for a given stack.

" + "documentation":"

Returns drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where CloudFormation detects configuration drift.

For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that have not yet been checked for drift are not included. Resources that do not currently support drift detection are not checked, and so not included. For a list of resources that support drift detection, see Resources that Support Drift Detection.

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all supported resources for a given stack.

" }, "DescribeStackResources":{ "name":"DescribeStackResources", @@ -377,7 +377,7 @@ "shape":"DescribeStackResourcesOutput", "resultWrapper":"DescribeStackResourcesResult" }, - "documentation":"

Returns AWS resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned.

Only the first 100 resources will be returned. If your stack has more resources than this, you should use ListStackResources instead.

For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted.

You must specify either StackName or PhysicalResourceId, but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId, go to the AWS CloudFormation User Guide.

A ValidationError is returned if you specify both StackName and PhysicalResourceId in the same request.

" + "documentation":"

Returns Amazon Web Services resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned.

Only the first 100 resources will be returned. If your stack has more resources than this, you should use ListStackResources instead.

For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted.

You must specify either StackName or PhysicalResourceId, but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId, go to the CloudFormation User Guide.

A ValidationError is returned if you specify both StackName and PhysicalResourceId in the same request.

" }, "DescribeStackSet":{ "name":"DescribeStackSet", @@ -471,7 +471,7 @@ "shape":"DetectStackDriftOutput", "resultWrapper":"DetectStackDriftResult" }, - "documentation":"

Detects whether a stack's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, AWS CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources.

For a list of stack resources that currently support drift detection, see Resources that Support Drift Detection.

DetectStackDrift can take up to several minutes, depending on the number of resources contained within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources.

When detecting drift on a stack, AWS CloudFormation does not detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift directly on the nested stack itself.

" + "documentation":"

Detects whether a stack's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources.

For a list of stack resources that currently support drift detection, see Resources that Support Drift Detection.

DetectStackDrift can take up to several minutes, depending on the number of resources contained within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources.

When detecting drift on a stack, CloudFormation does not detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift directly on the nested stack itself.

" }, "DetectStackResourceDrift":{ "name":"DetectStackResourceDrift", @@ -484,7 +484,7 @@ "shape":"DetectStackResourceDriftOutput", "resultWrapper":"DetectStackResourceDriftResult" }, - "documentation":"

Returns information about whether a resource's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which AWS CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

Resources that do not currently support drift detection cannot be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.

" + "documentation":"

Returns information about whether a resource's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

Resources that do not currently support drift detection cannot be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.

" }, "DetectStackSetDrift":{ "name":"DetectStackSetDrift", @@ -515,7 +515,7 @@ "shape":"EstimateTemplateCostOutput", "resultWrapper":"EstimateTemplateCostResult" }, - "documentation":"

Returns the estimated monthly cost of a template. The return value is an AWS Simple Monthly Calculator URL with a query string that describes the resources required to run the template.

" + "documentation":"

Returns the estimated monthly cost of a template. The return value is an Amazon Web Services Simple Monthly Calculator URL with a query string that describes the resources required to run the template.

" }, "ExecuteChangeSet":{ "name":"ExecuteChangeSet", @@ -534,7 +534,7 @@ {"shape":"InsufficientCapabilitiesException"}, {"shape":"TokenAlreadyExistsException"} ], - "documentation":"

Updates a stack using the input information that was provided when the specified change set was created. After the call successfully completes, AWS CloudFormation starts updating the stack. Use the DescribeStacks action to view the status of the update.

When you execute a change set, AWS CloudFormation deletes all other change sets associated with the stack because they aren't valid for the updated stack.

If a stack policy is associated with the stack, AWS CloudFormation enforces the policy during the update. You can't specify a temporary stack policy that overrides the current policy.

To create a change set for the entire stack hierachy, IncludeNestedStacks must have been set to True.

" + "documentation":"

Updates a stack using the input information that was provided when the specified change set was created. After the call successfully completes, CloudFormation starts updating the stack. Use the DescribeStacks action to view the status of the update.

When you execute a change set, CloudFormation deletes all other change sets associated with the stack because they aren't valid for the updated stack.

If a stack policy is associated with the stack, CloudFormation enforces the policy during the update. You can't specify a temporary stack policy that overrides the current policy.

To create a change set for the entire stack hierachy, IncludeNestedStacks must have been set to True.

" }, "GetStackPolicy":{ "name":"GetStackPolicy", @@ -581,6 +581,28 @@ ], "documentation":"

Returns information about a new or existing template. The GetTemplateSummary action is useful for viewing parameter information, such as default parameter values and parameter types, before you create or update a stack or stack set.

You can use the GetTemplateSummary action when you submit a template, or you can get template information for a stack set, or a running or deleted stack.

For deleted stacks, GetTemplateSummary returns the template information for up to 90 days after the stack has been deleted. If the template does not exist, a ValidationError is returned.

" }, + "ImportStacksToStackSet":{ + "name":"ImportStacksToStackSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportStacksToStackSetInput"}, + "output":{ + "shape":"ImportStacksToStackSetOutput", + "resultWrapper":"ImportStacksToStackSetResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"StackSetNotFoundException"}, + {"shape":"InvalidOperationException"}, + {"shape":"OperationInProgressException"}, + {"shape":"OperationIdAlreadyExistsException"}, + {"shape":"StackNotFoundException"}, + {"shape":"StaleRequestException"} + ], + "documentation":"

Import existing stacks into a new stack sets. Use the stack import operation to import up to 10 stacks into a new stack set in the same account as the source stack or in a different administrator account and Region, by specifying the stack ID of the stack you intend to import.

ImportStacksToStackSet is only supported by self-managed permissions.

" + }, "ListChangeSets":{ "name":"ListChangeSets", "http":{ @@ -592,7 +614,7 @@ "shape":"ListChangeSetsOutput", "resultWrapper":"ListChangeSetsResult" }, - "documentation":"

Returns the ID and status of each active change set for a stack. For example, AWS CloudFormation lists change sets that are in the CREATE_IN_PROGRESS or CREATE_PENDING state.

" + "documentation":"

Returns the ID and status of each active change set for a stack. For example, CloudFormation lists change sets that are in the CREATE_IN_PROGRESS or CREATE_PENDING state.

" }, "ListExports":{ "name":"ListExports", @@ -605,7 +627,7 @@ "shape":"ListExportsOutput", "resultWrapper":"ListExportsResult" }, - "documentation":"

Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function.

For more information, see AWS CloudFormation Export Stack Output Values.

" + "documentation":"

Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function.

For more information, see CloudFormation Export Stack Output Values.

" }, "ListImports":{ "name":"ListImports", @@ -634,7 +656,7 @@ "errors":[ {"shape":"StackSetNotFoundException"} ], - "documentation":"

Returns summary information about stack instances that are associated with the specified stack set. You can filter for stack instances that are associated with a specific AWS account name or Region, or that have a specific status.

" + "documentation":"

Returns summary information about stack instances that are associated with the specified stack set. You can filter for stack instances that are associated with a specific account name or Region, or that have a specific status.

" }, "ListStackResources":{ "name":"ListStackResources", @@ -693,7 +715,7 @@ "shape":"ListStackSetsOutput", "resultWrapper":"ListStackSetsResult" }, - "documentation":"

Returns summary information about stack sets that are associated with the user.

  • [Self-managed permissions] If you set the CallAs parameter to SELF while signed in to your AWS account, ListStackSets returns all self-managed stack sets in your AWS account.

  • [Service-managed permissions] If you set the CallAs parameter to SELF while signed in to the organization's management account, ListStackSets returns all stack sets in the management account.

  • [Service-managed permissions] If you set the CallAs parameter to DELEGATED_ADMIN while signed in to your member account, ListStackSets returns all stack sets with service-managed permissions in the management account.

" + "documentation":"

Returns summary information about stack sets that are associated with the user.

  • [Self-managed permissions] If you set the CallAs parameter to SELF while signed in to your account, ListStackSets returns all self-managed stack sets in your account.

  • [Service-managed permissions] If you set the CallAs parameter to SELF while signed in to the organization's management account, ListStackSets returns all stack sets in the management account.

  • [Service-managed permissions] If you set the CallAs parameter to DELEGATED_ADMIN while signed in to your member account, ListStackSets returns all stack sets with service-managed permissions in the management account.

" }, "ListStacks":{ "name":"ListStacks", @@ -809,7 +831,7 @@ "errors":[ {"shape":"CFNRegistryException"} ], - "documentation":"

Registers your account as a publisher of public extensions in the CloudFormation registry. Public extensions are available for use by all CloudFormation users. This publisher ID applies to your account in all AWS regions.

For information on requirements for registering as a public extension publisher, see Registering your account to publish CloudFormation extensions in the CloudFormation CLI User Guide.

", + "documentation":"

Registers your account as a publisher of public extensions in the CloudFormation registry. Public extensions are available for use by all CloudFormation users. This publisher ID applies to your account in all Regions.

For information on requirements for registering as a public extension publisher, see Registering your account to publish CloudFormation extensions in the CloudFormation CLI User Guide.

", "idempotent":true }, "RegisterType":{ @@ -826,7 +848,7 @@ "errors":[ {"shape":"CFNRegistryException"} ], - "documentation":"

Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your AWS account, and includes:

  • Validating the extension schema

  • Determining which handlers, if any, have been specified for the extension

  • Making the extension available for use in your account

For more information on how to develop extensions and ready them for registeration, see Creating Resource Providers in the CloudFormation CLI User Guide.

You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per region. Use DeregisterType to deregister specific extension versions if necessary.

Once you have initiated a registration request using RegisterType , you can use DescribeTypeRegistration to monitor the progress of the registration request.

Once you have registered a private extension in your account and region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

", + "documentation":"

Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your account, and includes:

  • Validating the extension schema

  • Determining which handlers, if any, have been specified for the extension

  • Making the extension available for use in your account

For more information on how to develop extensions and ready them for registeration, see Creating Resource Providers in the CloudFormation CLI User Guide.

You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per region. Use DeregisterType to deregister specific extension versions if necessary.

Once you have initiated a registration request using RegisterType , you can use DescribeTypeRegistration to monitor the progress of the registration request.

Once you have registered a private extension in your account and region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

", "idempotent":true }, "SetStackPolicy":{ @@ -853,7 +875,7 @@ {"shape":"CFNRegistryException"}, {"shape":"TypeNotFoundException"} ], - "documentation":"

Specifies the configuration data for a registered CloudFormation extension, in the given account and region.

To view the current configuration data for an extension, refer to the ConfigurationSchema element of DescribeType. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

It is strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Using dynamic references to specify template values in the AWS CloudFormation User Guide.

" + "documentation":"

Specifies the configuration data for a registered CloudFormation extension, in the given account and region.

To view the current configuration data for an extension, refer to the ConfigurationSchema element of DescribeType. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

It is strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Using dynamic references to specify template values in the CloudFormation User Guide.

" }, "SetTypeDefaultVersion":{ "name":"SetTypeDefaultVersion", @@ -880,7 +902,7 @@ "requestUri":"/" }, "input":{"shape":"SignalResourceInput"}, - "documentation":"

Sends a signal to the specified resource with a success or failure status. You can use the SignalResource API in conjunction with a creation policy or update policy. AWS CloudFormation doesn't proceed with a stack creation or update until resources receive the required number of signals or the timeout period is exceeded. The SignalResource API is useful in cases where you want to send signals from anywhere other than an Amazon EC2 instance.

" + "documentation":"

Sends a signal to the specified resource with a success or failure status. You can use the SignalResource API in conjunction with a creation policy or update policy. CloudFormation doesn't proceed with a stack creation or update until resources receive the required number of signals or the timeout period is exceeded. The SignalResource API is useful in cases where you want to send signals from anywhere other than an Amazon EC2 instance.

" }, "StopStackSetOperation":{ "name":"StopStackSetOperation", @@ -915,7 +937,7 @@ {"shape":"CFNRegistryException"}, {"shape":"TypeNotFoundException"} ], - "documentation":"

Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry.

  • For resource types, this includes passing all contracts tests defined for the type.

  • For modules, this includes determining if the module's model meets all necessary requirements.

For more information, see Testing your public extension prior to publishing in the CloudFormation CLI User Guide.

If you do not specify a version, CloudFormation uses the default version of the extension in your account and region for testing.

To perform testing, CloudFormation assumes the execution role specified when the test was registered. For more information, see RegisterType.

Once you've initiated testing on an extension using TestType, you can use DescribeType to monitor the current test status and test status description for the extension.

An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.

", + "documentation":"

Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry.

  • For resource types, this includes passing all contracts tests defined for the type.

  • For modules, this includes determining if the module's model meets all necessary requirements.

For more information, see Testing your public extension prior to publishing in the CloudFormation CLI User Guide.

If you do not specify a version, CloudFormation uses the default version of the extension in your account and region for testing.

To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType.

Once you've initiated testing on an extension using TestType, you can use DescribeType to monitor the current test status and test status description for the extension.

An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.

", "idempotent":true }, "UpdateStack":{ @@ -988,7 +1010,7 @@ "shape":"UpdateTerminationProtectionOutput", "resultWrapper":"UpdateTerminationProtectionResult" }, - "documentation":"

Updates termination protection for the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the AWS CloudFormation User Guide.

For nested stacks, termination protection is set on the root stack and cannot be changed directly on the nested stack.

" + "documentation":"

Updates termination protection for the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide.

For nested stacks, termination protection is set on the root stack and cannot be changed directly on the nested stack.

" }, "ValidateTemplate":{ "name":"ValidateTemplate", @@ -1001,7 +1023,7 @@ "shape":"ValidateTemplateOutput", "resultWrapper":"ValidateTemplateResult" }, - "documentation":"

Validates a specified template. AWS CloudFormation first checks if the template is valid JSON. If it isn't, AWS CloudFormation checks if the template is valid YAML. If both these checks fail, AWS CloudFormation returns a template validation error.

" + "documentation":"

Validates a specified template. CloudFormation first checks if the template is valid JSON. If it isn't, CloudFormation checks if the template is valid YAML. If both these checks fail, CloudFormation returns a template validation error.

" } }, "shapes":{ @@ -1015,14 +1037,14 @@ "members":{ "Status":{ "shape":"AccountGateStatus", - "documentation":"

The status of the account gate function.

  • SUCCEEDED: The account gate function has determined that the account and Region passes any requirements for a stack set operation to occur. AWS CloudFormation proceeds with the stack operation in that account and Region.

  • FAILED: The account gate function has determined that the account and Region does not meet the requirements for a stack set operation to occur. AWS CloudFormation cancels the stack set operation in that account and Region, and sets the stack set operation result status for that account and Region to FAILED.

  • SKIPPED: AWS CloudFormation has skipped calling the account gate function for this account and Region, for one of the following reasons:

    • An account gate function has not been specified for the account and Region. AWS CloudFormation proceeds with the stack set operation in this account and Region.

    • The AWSCloudFormationStackSetExecutionRole of the stack set adminstration account lacks permissions to invoke the function. AWS CloudFormation proceeds with the stack set operation in this account and Region.

    • Either no action is necessary, or no action is possible, on the stack. AWS CloudFormation skips the stack set operation in this account and Region.

" + "documentation":"

The status of the account gate function.

  • SUCCEEDED: The account gate function has determined that the account and Region passes any requirements for a stack set operation to occur. CloudFormation proceeds with the stack operation in that account and Region.

  • FAILED: The account gate function has determined that the account and Region does not meet the requirements for a stack set operation to occur. AWS CloudFormation cancels the stack set operation in that account and Region, and sets the stack set operation result status for that account and Region to FAILED.

  • SKIPPED: CloudFormation has skipped calling the account gate function for this account and Region, for one of the following reasons:

    • An account gate function has not been specified for the account and Region. CloudFormation proceeds with the stack set operation in this account and Region.

    • The AWSCloudFormationStackSetExecutionRole of the stack set adminstration account lacks permissions to invoke the function. CloudFormation proceeds with the stack set operation in this account and Region.

    • Either no action is necessary, or no action is possible, on the stack. CloudFormation skips the stack set operation in this account and Region.

" }, "StatusReason":{ "shape":"AccountGateStatusReason", "documentation":"

The reason for the account gate status assigned to this account and Region for the stack set operation.

" } }, - "documentation":"

Structure that contains the results of the account gate function which AWS CloudFormation invokes, if present, before proceeding with a stack set operation in an account and Region.

For each account and Region, AWS CloudFormation lets you specify a Lamdba function that encapsulates any requirements that must be met before CloudFormation can proceed with a stack set operation in that account and Region. CloudFormation invokes the function each time a stack set operation is requested for that account and Region; if the function returns FAILED, CloudFormation cancels the operation in that account and Region, and sets the stack set operation result status for that account and Region to FAILED.

For more information, see Configuring a target account gate.

" + "documentation":"

Structure that contains the results of the account gate function which CloudFormation invokes, if present, before proceeding with a stack set operation in an account and Region.

For each account and Region, CloudFormation lets you specify a Lamdba function that encapsulates any requirements that must be met before CloudFormation can proceed with a stack set operation in that account and Region. CloudFormation invokes the function each time a stack set operation is requested for that account and Region; if the function returns FAILED, CloudFormation cancels the operation in that account and Region, and sets the stack set operation result status for that account and Region to FAILED.

For more information, see Configuring a target account gate.

" }, "AccountGateStatus":{ "type":"string", @@ -1045,7 +1067,7 @@ "documentation":"

The value that is associated with the account limit name.

" } }, - "documentation":"

The AccountLimit data type.

CloudFormation has the following limits per account:

  • Number of concurrent resources

  • Number of stacks

  • Number of stack outputs

For more information about these account limits, and other CloudFormation limits, see AWS CloudFormation Limits in the AWS CloudFormation User Guide.

" + "documentation":"

The AccountLimit data type.

CloudFormation has the following limits per account:

  • Number of concurrent resources

  • Number of stacks

  • Number of stack outputs

For more information about these account limits, and other CloudFormation limits, see CloudFormation Limits in the CloudFormation User Guide.

" }, "AccountLimitList":{ "type":"list", @@ -1135,14 +1157,14 @@ "members":{ "Enabled":{ "shape":"AutoDeploymentNullable", - "documentation":"

If set to true, StackSets automatically deploys additional stack instances to AWS Organizations accounts that are added to a target organization or organizational unit (OU) in the specified Regions. If an account is removed from a target organization or OU, StackSets deletes stack instances from the account in the specified Regions.

" + "documentation":"

If set to true, StackSets automatically deploys additional stack instances to Organizations accounts that are added to a target organization or organizational unit (OU) in the specified Regions. If an account is removed from a target organization or OU, StackSets deletes stack instances from the account in the specified Regions.

" }, "RetainStacksOnAccountRemoval":{ "shape":"RetainStacksOnAccountRemovalNullable", "documentation":"

If set to true, stack resources are retained when an account is removed from a target organization or OU. If set to false, stack resources are deleted. Specify only if Enabled is set to True.

" } }, - "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).

" + "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to Organizations accounts that are added to a target organization or organizational unit (OU).

" }, "AutoDeploymentNullable":{"type":"boolean"}, "AutoUpdate":{"type":"boolean"}, @@ -1232,7 +1254,7 @@ }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

A unique identifier for this CancelUpdateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to cancel an update on a stack with the same name. You might retry CancelUpdateStack requests to ensure that AWS CloudFormation successfully received them.

" + "documentation":"

A unique identifier for this CancelUpdateStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to cancel an update on a stack with the same name. You might retry CancelUpdateStack requests to ensure that CloudFormation successfully received them.

" } }, "documentation":"

The input for the CancelUpdateStack action.

" @@ -1265,14 +1287,14 @@ "members":{ "Type":{ "shape":"ChangeType", - "documentation":"

The type of entity that AWS CloudFormation changes. Currently, the only entity type is Resource.

" + "documentation":"

The type of entity that CloudFormation changes. Currently, the only entity type is Resource.

" }, "ResourceChange":{ "shape":"ResourceChange", - "documentation":"

A ResourceChange structure that describes the resource and action that AWS CloudFormation will perform.

" + "documentation":"

A ResourceChange structure that describes the resource and action that CloudFormation will perform.

" } }, - "documentation":"

The Change structure describes the changes AWS CloudFormation will perform if you execute the change set.

" + "documentation":"

The Change structure describes the changes CloudFormation will perform if you execute the change set.

" }, "ChangeAction":{ "type":"string", @@ -1352,7 +1374,7 @@ }, "ExecutionStatus":{ "shape":"ExecutionStatus", - "documentation":"

If the change set execution status is AVAILABLE, you can execute the change set. If you can’t execute the change set, the status indicates why. For example, a change set might be in an UNAVAILABLE state because AWS CloudFormation is still creating it or in an OBSOLETE state because the stack was already updated.

" + "documentation":"

If the change set execution status is AVAILABLE, you can execute the change set. If you can’t execute the change set, the status indicates why. For example, a change set might be in an UNAVAILABLE state because CloudFormation is still creating it or in an OBSOLETE state because the stack was already updated.

" }, "Status":{ "shape":"ChangeSetStatus", @@ -1360,7 +1382,7 @@ }, "StatusReason":{ "shape":"ChangeSetStatusReason", - "documentation":"

A description of the change set's status. For example, if your change set is in the FAILED state, AWS CloudFormation shows the error message.

" + "documentation":"

A description of the change set's status. For example, if your change set is in the FAILED state, CloudFormation shows the error message.

" }, "CreationTime":{ "shape":"CreationTime", @@ -1444,15 +1466,15 @@ }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to roll back the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation always uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.

If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to roll back the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. As long as users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.

If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials.

" }, "ResourcesToSkip":{ "shape":"ResourcesToSkip", - "documentation":"

A list of the logical IDs of the resources that AWS CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was cancelled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason.

Specify this property to skip rolling back resources that AWS CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. AWS CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable.

Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources.

To skip resources that are part of nested stacks, use the following format: NestedStackName.ResourceLogicalID. If you want to specify the logical ID of a stack resource (Type: AWS::CloudFormation::Stack) in the ResourcesToSkip list, then its corresponding embedded stack must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED.

Don't confuse a child stack's name with its corresponding logical ID defined in the parent stack. For an example of a continue update rollback operation with nested stacks, see Using ResourcesToSkip to recover a nested stacks hierarchy.

" + "documentation":"

A list of the logical IDs of the resources that CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was cancelled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason.

Specify this property to skip rolling back resources that CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable.

Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources.

To skip resources that are part of nested stacks, use the following format: NestedStackName.ResourceLogicalID. If you want to specify the logical ID of a stack resource (Type: AWS::CloudFormation::Stack) in the ResourcesToSkip list, then its corresponding embedded stack must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED.

Don't confuse a child stack's name with its corresponding logical ID defined in the parent stack. For an example of a continue update rollback operation with nested stacks, see Using ResourcesToSkip to recover a nested stacks hierarchy.

" }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

A unique identifier for this ContinueUpdateRollback request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to continue the rollback to a stack with the same name. You might retry ContinueUpdateRollback requests to ensure that AWS CloudFormation successfully received them.

" + "documentation":"

A unique identifier for this ContinueUpdateRollback request. Specify this token if you plan to retry requests so that CloudFormationknows that you're not attempting to continue the rollback to a stack with the same name. You might retry ContinueUpdateRollback requests to ensure that CloudFormation successfully received them.

" } }, "documentation":"

The input for the ContinueUpdateRollback action.

" @@ -1472,15 +1494,15 @@ "members":{ "StackName":{ "shape":"StackNameOrId", - "documentation":"

The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates the change set by comparing this stack's information with the information that you submit, such as a modified template or different parameter input values.

" + "documentation":"

The name or the unique ID of the stack for which you are creating a change set. CloudFormation generates the change set by comparing this stack's information with the information that you submit, such as a modified template or different parameter input values.

" }, "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

A structure that contains the body of the revised template, with a minimum length of 1 byte and a maximum length of 51,200 bytes. AWS CloudFormation generates the change set by comparing this template with the template of the stack that you specified.

Conditional: You must specify only TemplateBody or TemplateURL.

" + "documentation":"

A structure that contains the body of the revised template, with a minimum length of 1 byte and a maximum length of 51,200 bytes. CloudFormation generates the change set by comparing this template with the template of the stack that you specified.

Conditional: You must specify only TemplateBody or TemplateURL.

" }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

The location of the file that contains the revised template. The URL must point to a template (max size: 460,800 bytes) that is located in an S3 bucket or a Systems Manager document. AWS CloudFormation generates the change set by comparing this template with the stack that you specified.

Conditional: You must specify only TemplateBody or TemplateURL.

" + "documentation":"

The location of the file that contains the revised template. The URL must point to a template (max size: 460,800 bytes) that is located in an S3 bucket or a Systems Manager document. CloudFormation generates the change set by comparing this template with the stack that you specified.

Conditional: You must specify only TemplateBody or TemplateURL.

" }, "UsePreviousTemplate":{ "shape":"UsePreviousTemplate", @@ -1492,27 +1514,27 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to create the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.

    This capacity does not apply to creating change sets, and specifying it when creating change sets has no effect.

    If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.

    For more information on macros, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

    This capacity does not apply to creating change sets, and specifying it when creating change sets has no effect.

    If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.

    For more information on macros, see Using CloudFormation Macros to Perform Custom Processing on Templates.

" }, "ResourceTypes":{ "shape":"ResourceTypes", - "documentation":"

The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance.

If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for condition keys in IAM policies for AWS CloudFormation. For more information, see Controlling Access with AWS Identity and Access Management in the AWS CloudFormation User Guide.

" + "documentation":"

The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance.

If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for condition keys in IAM policies for CloudFormation. For more information, see Controlling Access with Identity and Access Management in the CloudFormation User Guide.

" }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes when executing the change set. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.

If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes when executing the change set. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation uses this role for all future operations on the stack. As long as users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.

If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials.

" }, "RollbackConfiguration":{ "shape":"RollbackConfiguration", - "documentation":"

The rollback triggers for AWS CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

" + "documentation":"

The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

" }, "NotificationARNs":{ "shape":"NotificationARNs", - "documentation":"

The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that AWS CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list.

" + "documentation":"

The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list.

" }, "Tags":{ "shape":"Tags", - "documentation":"

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to resources in the stack. You can specify a maximum of 50 tags.

" + "documentation":"

Key-value pairs to associate with this stack. CloudFormation also propagates these tags to resources in the stack. You can specify a maximum of 50 tags.

" }, "ChangeSetName":{ "shape":"ChangeSetName", @@ -1520,7 +1542,7 @@ }, "ClientToken":{ "shape":"ClientToken", - "documentation":"

A unique identifier for this CreateChangeSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another change set with the same name. You might retry CreateChangeSet requests to ensure that AWS CloudFormation successfully received them.

" + "documentation":"

A unique identifier for this CreateChangeSet request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to create another change set with the same name. You might retry CreateChangeSet requests to ensure that CloudFormation successfully received them.

" }, "Description":{ "shape":"Description", @@ -1528,7 +1550,7 @@ }, "ChangeSetType":{ "shape":"ChangeSetType", - "documentation":"

The type of change set operation. To create a change set for a new stack, specify CREATE. To create a change set for an existing stack, specify UPDATE. To create a change set for an import operation, specify IMPORT.

If you create a change set for a new stack, AWS Cloudformation creates a stack with a unique stack ID, but no template or resources. The stack will be in the REVIEW_IN_PROGRESS state until you execute the change set.

By default, AWS CloudFormation specifies UPDATE. You can't use the UPDATE type to create a change set for a new stack or the CREATE type to create a change set for an existing stack.

" + "documentation":"

The type of change set operation. To create a change set for a new stack, specify CREATE. To create a change set for an existing stack, specify UPDATE. To create a change set for an import operation, specify IMPORT.

If you create a change set for a new stack, CloudFormation creates a stack with a unique stack ID, but no template or resources. The stack will be in the REVIEW_IN_PROGRESS state until you execute the change set.

By default, CloudFormation specifies UPDATE. You can't use the UPDATE type to create a change set for a new stack or the CREATE type to create a change set for an existing stack.

" }, "ResourcesToImport":{ "shape":"ResourcesToImport", @@ -1565,11 +1587,11 @@ }, "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" + "documentation":"

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to the Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" + "documentation":"

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to the Template Anatomy in the CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" }, "Parameters":{ "shape":"Parameters", @@ -1581,7 +1603,7 @@ }, "RollbackConfiguration":{ "shape":"RollbackConfiguration", - "documentation":"

The rollback triggers for AWS CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

" + "documentation":"

The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

" }, "TimeoutInMinutes":{ "shape":"TimeoutMinutes", @@ -1593,15 +1615,15 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to create the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.

    If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.

    You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.

    Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without AWS CloudFormation being notified.

    For more information, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

    If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.

    You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.

    Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.

    For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.

" }, "ResourceTypes":{ "shape":"ResourceTypes", - "documentation":"

The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. Use the following syntax to describe template resource types: AWS::* (for all AWS resource), Custom::* (for all custom resources), Custom::logical_ID (for a specific custom resource), AWS::service_name::* (for all resources of a particular AWS service), and AWS::service_name::resource_logical_ID (for a specific AWS resource).

If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for AWS CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management.

" + "documentation":"

The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. Use the following syntax to describe template resource types: AWS::* (for all Amazon Web Services resources), Custom::* (for all custom resources), Custom::logical_ID (for a specific custom resource), AWS::service_name::* (for all resources of a particular Amazon Web Services service), and AWS::service_name::resource_logical_ID (for a specific Amazon Web Services resource).

If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity and Access Management.

" }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to create the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation always uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.

If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to create the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. As long as users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.

If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials.

" }, "OnFailure":{ "shape":"OnFailure", @@ -1609,7 +1631,7 @@ }, "StackPolicyBody":{ "shape":"StackPolicyBody", - "documentation":"

Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

" + "documentation":"

Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

" }, "StackPolicyURL":{ "shape":"StackPolicyURL", @@ -1617,15 +1639,15 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified.

" + "documentation":"

Key-value pairs to associate with this stack. CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified.

" }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that AWS CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" + "documentation":"

A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" }, "EnableTerminationProtection":{ "shape":"EnableTerminationProtection", - "documentation":"

Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the AWS CloudFormation User Guide. Termination protection is disabled on stacks by default.

For nested stacks, termination protection is set on the root stack and cannot be changed directly on the nested stack.

" + "documentation":"

Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. Termination protection is disabled on stacks by default.

For nested stacks, termination protection is set on the root stack and cannot be changed directly on the nested stack.

" } }, "documentation":"

The input for CreateStack action.

" @@ -1643,32 +1665,32 @@ }, "Accounts":{ "shape":"AccountList", - "documentation":"

[Self-managed permissions] The names of one or more AWS accounts that you want to create stack instances in the specified Region(s) for.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Self-managed permissions] The names of one or more accounts that you want to create stack instances in the specified Region(s) for.

You can specify Accounts or DeploymentTargets, but not both.

" }, "DeploymentTargets":{ "shape":"DeploymentTargets", - "documentation":"

[Service-managed permissions] The AWS Organizations accounts for which to create stack instances in the specified Regions.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Service-managed permissions] The Organizations accounts for which to create stack instances in the specified Regions.

You can specify Accounts or DeploymentTargets, but not both.

" }, "Regions":{ "shape":"RegionList", - "documentation":"

The names of one or more Regions where you want to create stack instances using the specified AWS account(s).

" + "documentation":"

The names of one or more Regions where you want to create stack instances using the specified accounts.

" }, "ParameterOverrides":{ "shape":"Parameters", - "documentation":"

A list of stack set parameters whose values you want to override in the selected stack instances.

Any overridden parameter values will be applied to all stack instances in the specified accounts and Regions. When specifying parameters and their values, be aware of how AWS CloudFormation sets parameter values during stack instance operations:

  • To override the current value for a parameter, include the parameter and specify its value.

  • To leave a parameter set to its present value, you can do one of the following:

    • Do not include the parameter in the list.

    • Include the parameter and specify UsePreviousValue as true. (You cannot specify both a value and set UsePreviousValue to true.)

  • To set all overridden parameter back to the values specified in the stack set, specify a parameter list but do not include any parameters.

  • To leave all parameters set to their present values, do not specify this property at all.

During stack set updates, any parameter values overridden for a stack instance are not updated, but retain their overridden value.

You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template.

" + "documentation":"

A list of stack set parameters whose values you want to override in the selected stack instances.

Any overridden parameter values will be applied to all stack instances in the specified accounts and Regions. When specifying parameters and their values, be aware of how CloudFormation sets parameter values during stack instance operations:

  • To override the current value for a parameter, include the parameter and specify its value.

  • To leave an overridden parameter set to its present value, include the parameter and specify UsePreviousValue as true. (You cannot specify both a value and set UsePreviousValue to true.)

  • To set an overridden parameter back to the value specified in the stack set, specify a parameter list but do not include the parameter in the list.

  • To leave all parameters set to their present values, do not specify this property at all.

During stack set updates, any parameter values overridden for a stack instance are not updated, but retain their overridden value.

You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template.

" }, "OperationPreferences":{ "shape":"StackSetOperationPreferences", - "documentation":"

Preferences for how AWS CloudFormation performs this stack set operation.

" + "documentation":"

Preferences for how CloudFormation performs this stack set operation.

" }, "OperationId":{ "shape":"ClientRequestToken", - "documentation":"

The unique identifier for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", + "documentation":"

The unique identifier for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", "idempotencyToken":true }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -1705,11 +1727,15 @@ }, "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" + "documentation":"

The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" + "documentation":"

The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information, see Template Anatomy in the CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" + }, + "StackId":{ + "shape":"StackId", + "documentation":"

The stack ID you are importing into a new stack set. Specify the Amazon Resource Number (ARN) of the stack.

" }, "Parameters":{ "shape":"Parameters", @@ -1717,35 +1743,35 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for AWS CloudFormation to create the stack set and related stack instances.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates.

    Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your account; for example, by creating new Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.

    Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.

" }, "Tags":{ "shape":"Tags", - "documentation":"

The key-value pairs to associate with this stack set and the stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the stacks. A maximum number of 50 tags can be specified.

If you specify tags as part of a CreateStackSet action, AWS CloudFormation checks to see if you have the required IAM permission to tag resources. If you don't, the entire CreateStackSet action fails with an access denied error, and the stack set is not created.

" + "documentation":"

The key-value pairs to associate with this stack set and the stacks created from it. CloudFormation also propagates these tags to supported resources that are created in the stacks. A maximum number of 50 tags can be specified.

If you specify tags as part of a CreateStackSet action, CloudFormation checks to see if you have the required IAM permission to tag resources. If you don't, the entire CreateStackSet action fails with an access denied error, and the stack set is not created.

" }, "AdministrationRoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Number (ARN) of the IAM role to use to create this stack set.

Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the AWS CloudFormation User Guide.

" + "documentation":"

The Amazon Resource Number (ARN) of the IAM role to use to create this stack set.

Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the CloudFormation User Guide.

" }, "ExecutionRoleName":{ "shape":"ExecutionRoleName", - "documentation":"

The name of the IAM execution role to use to create the stack set. If you do not specify an execution role, AWS CloudFormation uses the AWSCloudFormationStackSetExecutionRole role for the stack set operation.

Specify an IAM role only if you are using customized execution roles to control which stack resources users and groups can include in their stack sets.

" + "documentation":"

The name of the IAM execution role to use to create the stack set. If you do not specify an execution role, CloudFormation uses the AWSCloudFormationStackSetExecutionRole role for the stack set operation.

Specify an IAM role only if you are using customized execution roles to control which stack resources users and groups can include in their stack sets.

" }, "PermissionModel":{ "shape":"PermissionModels", - "documentation":"

Describes how the IAM roles required for stack set operations are created. By default, SELF-MANAGED is specified.

" + "documentation":"

Describes how the IAM roles required for stack set operations are created. By default, SELF-MANAGED is specified.

" }, "AutoDeployment":{ "shape":"AutoDeployment", - "documentation":"

Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to the target organization or organizational unit (OU). Specify only if PermissionModel is SERVICE_MANAGED.

" + "documentation":"

Describes whether StackSets automatically deploys to Organizations accounts that are added to the target organization or organizational unit (OU). Specify only if PermissionModel is SERVICE_MANAGED.

" }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • To create a stack set with service-managed permissions while signed in to the management account, specify SELF.

  • To create a stack set with service-managed permissions while signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated admin in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

Stack sets with service-managed permissions are created in the management account, including stack sets that are created by delegated administrators.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • To create a stack set with service-managed permissions while signed in to the management account, specify SELF.

  • To create a stack set with service-managed permissions while signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated admin in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

Stack sets with service-managed permissions are created in the management account, including stack sets that are created by delegated administrators.

" }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

A unique identifier for this CreateStackSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another stack set with the same name. You might retry CreateStackSet requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

", + "documentation":"

A unique identifier for this CreateStackSet request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to create another stack set with the same name. You might retry CreateStackSet requests to ensure that CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

", "idempotencyToken":true } } @@ -1825,15 +1851,15 @@ }, "RetainResources":{ "shape":"RetainResources", - "documentation":"

For stacks in the DELETE_FAILED state, a list of resource logical IDs that are associated with the resources you want to retain. During deletion, AWS CloudFormation deletes the stack but does not delete the retained resources.

Retaining resources is useful when you cannot delete a resource, such as a non-empty S3 bucket, but you want to delete the stack.

" + "documentation":"

For stacks in the DELETE_FAILED state, a list of resource logical IDs that are associated with the resources you want to retain. During deletion, CloudFormation deletes the stack but does not delete the retained resources.

Retaining resources is useful when you cannot delete a resource, such as a non-empty S3 bucket, but you want to delete the stack.

" }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to delete the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf.

If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to delete the stack. CloudFormation uses the role's credentials to make calls on your behalf.

If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials.

" }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry DeleteStack requests to ensure that AWS CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" + "documentation":"

A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry DeleteStack requests to ensure that CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" } }, "documentation":"

The input for DeleteStack action.

" @@ -1852,11 +1878,11 @@ }, "Accounts":{ "shape":"AccountList", - "documentation":"

[Self-managed permissions] The names of the AWS accounts that you want to delete stack instances for.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Self-managed permissions] The names of the accounts that you want to delete stack instances for.

You can specify Accounts or DeploymentTargets, but not both.

" }, "DeploymentTargets":{ "shape":"DeploymentTargets", - "documentation":"

[Service-managed permissions] The AWS Organizations accounts from which to delete stack instances.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Service-managed permissions] The Organizations accounts from which to delete stack instances.

You can specify Accounts or DeploymentTargets, but not both.

" }, "Regions":{ "shape":"RegionList", @@ -1864,7 +1890,7 @@ }, "OperationPreferences":{ "shape":"StackSetOperationPreferences", - "documentation":"

Preferences for how AWS CloudFormation performs this stack set operation.

" + "documentation":"

Preferences for how CloudFormation performs this stack set operation.

" }, "RetainStacks":{ "shape":"RetainStacks", @@ -1872,12 +1898,12 @@ }, "OperationId":{ "shape":"ClientRequestToken", - "documentation":"

The unique identifier for this stack set operation.

If you don't specify an operation ID, the SDK generates one automatically.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You can retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", + "documentation":"

The unique identifier for this stack set operation.

If you don't specify an operation ID, the SDK generates one automatically.

The operation ID also functions as an idempotency token, to ensure that CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You can retry stack set operation requests to ensure that CloudFormation successfully received them.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", "idempotencyToken":true }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -1900,7 +1926,7 @@ }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -1915,18 +1941,18 @@ "members":{ "Accounts":{ "shape":"AccountList", - "documentation":"

The names of one or more AWS accounts for which you want to deploy stack set updates.

" + "documentation":"

The names of one or more accounts for which you want to deploy stack set updates.

" }, "AccountsUrl":{ "shape":"AccountsUrl", - "documentation":"

Returns the value of the AccountsUrl property.

" + "documentation":"

Returns the value of the AccountsUrl property.

" }, "OrganizationalUnitIds":{ "shape":"OrganizationalUnitIdList", "documentation":"

The organization root ID or organizational unit (OU) IDs to which StackSets deploys.

" } }, - "documentation":"

[Service-managed permissions] The AWS Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization.

For update operations, you can specify either Accounts or OrganizationalUnitIds. For create and delete operations, specify OrganizationalUnitIds.

" + "documentation":"

[Service-managed permissions] The Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization.

For update operations, you can specify either Accounts or OrganizationalUnitIds. For create and delete operations, specify OrganizationalUnitIds.

" }, "DeprecatedStatus":{ "type":"string", @@ -1976,7 +2002,7 @@ "members":{ "AccountLimits":{ "shape":"AccountLimitList", - "documentation":"

An account limit structure that contain a list of AWS CloudFormation account limits and their values.

" + "documentation":"

An account limit structure that contain a list of CloudFormation account limits and their values.

" }, "NextToken":{ "shape":"NextToken", @@ -2037,7 +2063,7 @@ }, "ExecutionStatus":{ "shape":"ExecutionStatus", - "documentation":"

If the change set execution status is AVAILABLE, you can execute the change set. If you can’t execute the change set, the status indicates why. For example, a change set might be in an UNAVAILABLE state because AWS CloudFormation is still creating it or in an OBSOLETE state because the stack was already updated.

" + "documentation":"

If the change set execution status is AVAILABLE, you can execute the change set. If you can’t execute the change set, the status indicates why. For example, a change set might be in an UNAVAILABLE state because CloudFormation is still creating it or in an OBSOLETE state because the stack was already updated.

" }, "Status":{ "shape":"ChangeSetStatus", @@ -2045,7 +2071,7 @@ }, "StatusReason":{ "shape":"ChangeSetStatusReason", - "documentation":"

A description of the change set's status. For example, if your attempt to create a change set failed, AWS CloudFormation shows the error message.

" + "documentation":"

A description of the change set's status. For example, if your attempt to create a change set failed, CloudFormation shows the error message.

" }, "NotificationARNs":{ "shape":"NotificationARNs", @@ -2053,7 +2079,7 @@ }, "RollbackConfiguration":{ "shape":"RollbackConfiguration", - "documentation":"

The rollback triggers for AWS CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

" + "documentation":"

The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

" }, "Capabilities":{ "shape":"Capabilities", @@ -2065,7 +2091,7 @@ }, "Changes":{ "shape":"Changes", - "documentation":"

A list of Change structures that describes the resources AWS CloudFormation changes if you execute the change set.

" + "documentation":"

A list of Change structures that describes the resources CloudFormation changes if you execute the change set.

" }, "NextToken":{ "shape":"NextToken", @@ -2122,7 +2148,7 @@ "members":{ "StackDriftDetectionId":{ "shape":"StackDriftDetectionId", - "documentation":"

The ID of the drift detection results of this operation.

AWS CloudFormation generates new results, with a new drift detection ID, each time this operation is run. However, the number of drift results AWS CloudFormation retains for any given stack, and for how long, may vary.

" + "documentation":"

The ID of the drift detection results of this operation.

CloudFormation generates new results, with a new drift detection ID, each time this operation is run. However, the number of drift results CloudFormation retains for any given stack, and for how long, may vary.

" } } }, @@ -2141,15 +2167,15 @@ }, "StackDriftDetectionId":{ "shape":"StackDriftDetectionId", - "documentation":"

The ID of the drift detection results of this operation.

AWS CloudFormation generates new results, with a new drift detection ID, each time this operation is run. However, the number of reports AWS CloudFormation retains for any given stack, and for how long, may vary.

" + "documentation":"

The ID of the drift detection results of this operation.

CloudFormation generates new results, with a new drift detection ID, each time this operation is run. However, the number of reports CloudFormation retains for any given stack, and for how long, may vary.

" }, "StackDriftStatus":{ "shape":"StackDriftStatus", - "documentation":"

Status of the stack's actual configuration compared to its expected configuration.

  • DRIFTED: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.

  • NOT_CHECKED: AWS CloudFormation has not checked if the stack differs from its expected template configuration.

  • IN_SYNC: The stack's actual configuration matches its expected template configuration.

  • UNKNOWN: This value is reserved for future use.

" + "documentation":"

Status of the stack's actual configuration compared to its expected configuration.

  • DRIFTED: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.

  • NOT_CHECKED: CloudFormation has not checked if the stack differs from its expected template configuration.

  • IN_SYNC: The stack's actual configuration matches its expected template configuration.

  • UNKNOWN: This value is reserved for future use.

" }, "DetectionStatus":{ "shape":"StackDriftDetectionStatus", - "documentation":"

The status of the stack drift detection operation.

  • DETECTION_COMPLETE: The stack drift detection operation has successfully completed for all resources in the stack that support drift detection. (Resources that do not currently support stack detection remain unchecked.)

    If you specified logical resource IDs for AWS CloudFormation to use as a filter for the stack drift detection operation, only the resources with those logical IDs are checked for drift.

  • DETECTION_FAILED: The stack drift detection operation has failed for at least one resource in the stack. Results will be available for resources on which AWS CloudFormation successfully completed drift detection.

  • DETECTION_IN_PROGRESS: The stack drift detection operation is currently in progress.

" + "documentation":"

The status of the stack drift detection operation.

  • DETECTION_COMPLETE: The stack drift detection operation has successfully completed for all resources in the stack that support drift detection. (Resources that do not currently support stack detection remain unchecked.)

    If you specified logical resource IDs for CloudFormation to use as a filter for the stack drift detection operation, only the resources with those logical IDs are checked for drift.

  • DETECTION_FAILED: The stack drift detection operation has failed for at least one resource in the stack. Results will be available for resources on which CloudFormation successfully completed drift detection.

  • DETECTION_IN_PROGRESS: The stack drift detection operation is currently in progress.

" }, "DetectionStatusReason":{ "shape":"StackDriftDetectionStatusReason", @@ -2207,7 +2233,7 @@ }, "StackInstanceAccount":{ "shape":"Account", - "documentation":"

The ID of an AWS account that's associated with this stack instance.

" + "documentation":"

The ID of an account that's associated with this stack instance.

" }, "StackInstanceRegion":{ "shape":"Region", @@ -2215,7 +2241,7 @@ }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -2238,7 +2264,7 @@ }, "StackResourceDriftStatusFilters":{ "shape":"StackResourceDriftStatusFilters", - "documentation":"

The resource drift status values to use as filters for the resource drift results returned.

  • DELETED: The resource differs from its expected template configuration in that the resource has been deleted.

  • MODIFIED: One or more resource properties differ from their expected template values.

  • IN_SYNC: The resources's actual configuration matches its expected template configuration.

  • NOT_CHECKED: AWS CloudFormation does not currently return this value.

" + "documentation":"

The resource drift status values to use as filters for the resource drift results returned.

  • DELETED: The resource differs from its expected template configuration in that the resource has been deleted.

  • MODIFIED: One or more resource properties differ from their expected template values.

  • IN_SYNC: The resources's actual configuration matches its expected template configuration.

  • NOT_CHECKED: CloudFormation does not currently return this value.

" }, "NextToken":{ "shape":"NextToken", @@ -2256,7 +2282,7 @@ "members":{ "StackResourceDrifts":{ "shape":"StackResourceDrifts", - "documentation":"

Drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where AWS CloudFormation detects drift.

For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that have not yet been checked for drift are not included. Resources that do not currently support drift detection are not checked, and so not included. For a list of resources that support drift detection, see Resources that Support Drift Detection.

" + "documentation":"

Drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where CloudFormation detects drift.

For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that have not yet been checked for drift are not included. Resources that do not currently support drift detection are not checked, and so not included. For a list of resources that support drift detection, see Resources that Support Drift Detection.

" }, "NextToken":{ "shape":"NextToken", @@ -2305,7 +2331,7 @@ }, "PhysicalResourceId":{ "shape":"PhysicalResourceId", - "documentation":"

The name or unique identifier that corresponds to a physical instance ID of a resource supported by AWS CloudFormation.

For example, for an Amazon Elastic Compute Cloud (EC2) instance, PhysicalResourceId corresponds to the InstanceId. You can pass the EC2 InstanceId to DescribeStackResources to find which stack the instance belongs to and what other resources are part of the stack.

Required: Conditional. If you do not specify PhysicalResourceId, you must specify StackName.

Default: There is no default value.

" + "documentation":"

The name or unique identifier that corresponds to a physical instance ID of a resource supported by CloudFormation.

For example, for an Amazon Elastic Compute Cloud (EC2) instance, PhysicalResourceId corresponds to the InstanceId. You can pass the EC2 InstanceId to DescribeStackResources to find which stack the instance belongs to and what other resources are part of the stack.

Required: Conditional. If you do not specify PhysicalResourceId, you must specify StackName.

Default: There is no default value.

" } }, "documentation":"

The input for DescribeStackResources action.

" @@ -2330,7 +2356,7 @@ }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -2351,7 +2377,7 @@ }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -2471,7 +2497,7 @@ }, "ProvisioningType":{ "shape":"ProvisioningType", - "documentation":"

For resource type extensions, the provisioning behavior of the resource type. AWS CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.

Valid values include:

  • FULLY_MUTABLE: The resource type includes an update handler to process updates to the type during stack update operations.

  • IMMUTABLE: The resource type does not include an update handler, so the type cannot be updated and must instead be replaced during stack update operations.

  • NON_PROVISIONABLE: The resource type does not include all of the following handlers, and therefore cannot actually be provisioned.

    • create

    • read

    • delete

" + "documentation":"

For resource type extensions, the provisioning behavior of the resource type. CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.

Valid values include:

  • FULLY_MUTABLE: The resource type includes an update handler to process updates to the type during stack update operations.

  • IMMUTABLE: The resource type does not include an update handler, so the type cannot be updated and must instead be replaced during stack update operations.

  • NON_PROVISIONABLE: The resource type does not include all of the following handlers, and therefore cannot actually be provisioned.

    • create

    • read

    • delete

" }, "DeprecatedStatus":{ "shape":"DeprecatedStatus", @@ -2487,11 +2513,11 @@ }, "ExecutionRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM execution role used to register the extension. This applies only to private extensions you have registered in your account. For more information, see RegisterType.

If the registered extension calls any AWS APIs, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your extension with the appropriate credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM execution role used to register the extension. This applies only to private extensions you have registered in your account. For more information, see RegisterType.

If the registered extension calls any Amazon Web Services APIs, you must create an IAM execution role that includes the necessary permissions to call those Amazon Web Services APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your extension with the appropriate credentials.

" }, "Visibility":{ "shape":"Visibility", - "documentation":"

The scope at which the extension is visible and usable in CloudFormation operations.

Valid values include:

  • PRIVATE: The extension is only visible and usable within the account in which it is registered. AWS CloudFormation marks any extensions you register as PRIVATE.

  • PUBLIC: The extension is publically visible and usable within any Amazon account.

" + "documentation":"

The scope at which the extension is visible and usable in CloudFormation operations.

Valid values include:

  • PRIVATE: The extension is only visible and usable within the account in which it is registered. CloudFormation marks any extensions you register as PRIVATE.

  • PUBLIC: The extension is publically visible and usable within any Amazon account.

" }, "SourceUrl":{ "shape":"OptionalSecureUrl", @@ -2539,7 +2565,7 @@ }, "AutoUpdate":{ "shape":"AutoUpdate", - "documentation":"

Whether CloudFormation automatically updates the extension in this account and region when a new minor version is published by the extension publisher. Major versions released by the publisher must be manually updated. For more information, see Activating public extensions for use in your account in the AWS CloudFormation User Guide.

" + "documentation":"

Whether CloudFormation automatically updates the extension in this account and region when a new minor version is published by the extension publisher. Major versions released by the publisher must be manually updated. For more information, see Activating public extensions for use in your account in the CloudFormation User Guide.

" } } }, @@ -2599,7 +2625,7 @@ "members":{ "StackDriftDetectionId":{ "shape":"StackDriftDetectionId", - "documentation":"

The ID of the drift detection results of this operation.

AWS CloudFormation generates new results, with a new drift detection ID, each time this operation is run. However, the number of drift results AWS CloudFormation retains for any given stack, and for how long, may vary.

" + "documentation":"

The ID of the drift detection results of this operation.

CloudFormation generates new results, with a new drift detection ID, each time this operation is run. However, the number of drift results CloudFormation retains for any given stack, and for how long, may vary.

" } } }, @@ -2646,7 +2672,7 @@ }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -2688,11 +2714,11 @@ "members":{ "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)

Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used.

" + "documentation":"

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the CloudFormation User Guide.)

Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used.

" }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

" + "documentation":"

Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

" }, "Parameters":{ "shape":"Parameters", @@ -2706,7 +2732,7 @@ "members":{ "Url":{ "shape":"Url", - "documentation":"

An AWS Simple Monthly Calculator URL with a query string that describes the resources required to run the template.

" + "documentation":"

An Amazon Web Services Simple Monthly Calculator URL with a query string that describes the resources required to run the template.

" } }, "documentation":"

The output for a EstimateTemplateCost action.

" @@ -2733,7 +2759,7 @@ }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

A unique identifier for this ExecuteChangeSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to execute a change set to update a stack with the same name. You might retry ExecuteChangeSet requests to ensure that AWS CloudFormation successfully received them.

" + "documentation":"

A unique identifier for this ExecuteChangeSet request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to execute a change set to update a stack with the same name. You might retry ExecuteChangeSet requests to ensure that CloudFormation successfully received them.

" } }, "documentation":"

The input for the ExecuteChangeSet action.

" @@ -2814,7 +2840,7 @@ "members":{ "StackPolicyBody":{ "shape":"StackPolicyBody", - "documentation":"

Structure containing the stack policy body. (For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide.)

" + "documentation":"

Structure containing the stack policy body. (For more information, go to Prevent Updates to Stack Resources in the CloudFormation User Guide.)

" } }, "documentation":"

The output for the GetStackPolicy action.

" @@ -2828,11 +2854,11 @@ }, "ChangeSetName":{ "shape":"ChangeSetNameOrId", - "documentation":"

The name or Amazon Resource Name (ARN) of a change set for which AWS CloudFormation returns the associated template. If you specify a name, you must also specify the StackName.

" + "documentation":"

The name or Amazon Resource Name (ARN) of a change set for which CloudFormation returns the associated template. If you specify a name, you must also specify the StackName.

" }, "TemplateStage":{ "shape":"TemplateStage", - "documentation":"

For templates that include transforms, the stage of the template that AWS CloudFormation returns. To get the user-submitted template, specify Original. To get the template after AWS CloudFormation has processed all transforms, specify Processed.

If the template doesn't include transforms, Original and Processed return the same template. By default, AWS CloudFormation specifies Processed.

" + "documentation":"

For templates that include transforms, the stage of the template that CloudFormation returns. To get the user-submitted template, specify Original. To get the template after CloudFormation has processed all transforms, specify Processed.

If the template doesn't include transforms, Original and Processed return the same template. By default, CloudFormation specifies Processed.

" } }, "documentation":"

The input for a GetTemplate action.

" @@ -2842,11 +2868,11 @@ "members":{ "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

Structure containing the template body. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)

AWS CloudFormation returns the same template that was used when the stack was created.

" + "documentation":"

Structure containing the template body. (For more information, go to Template Anatomy in the CloudFormation User Guide.)

CloudFormation returns the same template that was used when the stack was created.

" }, "StagesAvailable":{ "shape":"StageList", - "documentation":"

The stage of the template that you can retrieve. For stacks, the Original and Processed templates are always available. For change sets, the Original template is always available. After AWS CloudFormation finishes creating the change set, the Processed template becomes available.

" + "documentation":"

The stage of the template that you can retrieve. For stacks, the Original and Processed templates are always available. For change sets, the Original template is always available. After CloudFormation finishes creating the change set, the Processed template becomes available.

" } }, "documentation":"

The output for GetTemplate action.

" @@ -2856,11 +2882,11 @@ "members":{ "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL.

" + "documentation":"

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information about templates, see Template Anatomy in the CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL.

" }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL.

" + "documentation":"

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information about templates, see Template Anatomy in the CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL.

" }, "StackName":{ "shape":"StackNameOrId", @@ -2872,7 +2898,7 @@ }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } }, "documentation":"

The input for the GetTemplateSummary action.

" @@ -2890,7 +2916,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

The capabilities found within the template. If your template contains IAM resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

" + "documentation":"

The capabilities found within the template. If your template contains IAM resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in CloudFormation Templates.

" }, "CapabilitiesReason":{ "shape":"CapabilitiesReason", @@ -2902,7 +2928,7 @@ }, "Version":{ "shape":"Version", - "documentation":"

The AWS template format version, which identifies the capabilities of the template.

" + "documentation":"

The Amazon Web Services template format version, which identifies the capabilities of the template.

" }, "Metadata":{ "shape":"Metadata", @@ -2947,6 +2973,42 @@ "Bitbucket" ] }, + "ImportStacksToStackSetInput":{ + "type":"structure", + "required":[ + "StackSetName", + "StackIds" + ], + "members":{ + "StackSetName":{ + "shape":"StackSetNameOrId", + "documentation":"

The name of the stack set. The name must be unique in the Region where you create your stack set.

" + }, + "StackIds":{ + "shape":"StackIdList", + "documentation":"

The IDs of the stacks you are importing into a stack set. You import up to 10 stacks per stack set at a time.

" + }, + "OperationPreferences":{"shape":"StackSetOperationPreferences"}, + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

A unique, user defined, identifier for the stack set operation.

", + "idempotencyToken":true + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • For service managed stack sets, specify DELEGATED_ADMIN.

" + } + } + }, + "ImportStacksToStackSetOutput":{ + "type":"structure", + "members":{ + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The unique identifier for the stack set operation.

" + } + } + }, "Imports":{ "type":"list", "member":{"shape":"StackName"} @@ -3017,7 +3079,7 @@ "type":"structure", "members":{ }, - "documentation":"

The quota for the resource has already been reached.

For information on resource and stack limitations, see Limits in the AWS CloudFormation User Guide.

", + "documentation":"

The quota for the resource has already been reached.

For information on resource and stack limitations, see Limits in the CloudFormation User Guide.

", "error":{ "code":"LimitExceededException", "httpStatusCode":400, @@ -3084,7 +3146,7 @@ "members":{ "ExportName":{ "shape":"ExportName", - "documentation":"

The name of the exported output value. AWS CloudFormation returns the stack names that are importing this value.

" + "documentation":"

The name of the exported output value. CloudFormation returns the stack names that are importing this value.

" }, "NextToken":{ "shape":"NextToken", @@ -3127,7 +3189,7 @@ }, "StackInstanceAccount":{ "shape":"Account", - "documentation":"

The name of the AWS account that you want to list stack instances for.

" + "documentation":"

The name of the account that you want to list stack instances for.

" }, "StackInstanceRegion":{ "shape":"Region", @@ -3135,7 +3197,7 @@ }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -3206,7 +3268,7 @@ }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -3241,7 +3303,7 @@ }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -3275,7 +3337,7 @@ }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -3417,7 +3479,7 @@ }, "ProvisioningType":{ "shape":"ProvisioningType", - "documentation":"

For resource types, the provisioning behavior of the resource type. AWS CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.

Valid values include:

  • FULLY_MUTABLE: The resource type includes an update handler to process updates to the type during stack update operations.

  • IMMUTABLE: The resource type does not include an update handler, so the type cannot be updated and must instead be replaced during stack update operations.

  • NON_PROVISIONABLE: The resource type does not include create, read, and delete handlers, and therefore cannot actually be provisioned.

The default is FULLY_MUTABLE.

" + "documentation":"

For resource types, the provisioning behavior of the resource type. CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.

Valid values include:

  • FULLY_MUTABLE: The resource type includes an update handler to process updates to the type during stack update operations.

  • IMMUTABLE: The resource type does not include an update handler, so the type cannot be updated and must instead be replaced during stack update operations.

  • NON_PROVISIONABLE: The resource type does not include create, read, and delete handlers, and therefore cannot actually be provisioned.

The default is FULLY_MUTABLE.

" }, "DeprecatedStatus":{ "shape":"DeprecatedStatus", @@ -3659,7 +3721,7 @@ "members":{ "ParameterKey":{ "shape":"ParameterKey", - "documentation":"

The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template.

" + "documentation":"

The key associated with the parameter. If you don't specify a key and value for a particular parameter, CloudFormation uses the default value that is specified in your template.

" }, "ParameterValue":{ "shape":"ParameterValue", @@ -3684,7 +3746,7 @@ "documentation":"

A list of values that are permitted for a parameter.

" } }, - "documentation":"

A set of criteria that AWS CloudFormation uses to validate parameter values. Although other constraints might be defined in the stack template, AWS CloudFormation returns only the AllowedValues property.

" + "documentation":"

A set of criteria that CloudFormation uses to validate parameter values. Although other constraints might be defined in the stack template, CloudFormation returns only the AllowedValues property.

" }, "ParameterDeclaration":{ "type":"structure", @@ -3703,7 +3765,7 @@ }, "NoEcho":{ "shape":"NoEcho", - "documentation":"

Flag that indicates whether the parameter value is shown as plain text in logs and in the AWS Management Console.

" + "documentation":"

Flag that indicates whether the parameter value is shown as plain text in logs and in the Management Console.

" }, "Description":{ "shape":"Description", @@ -3711,7 +3773,7 @@ }, "ParameterConstraints":{ "shape":"ParameterConstraints", - "documentation":"

The criteria that AWS CloudFormation uses to validate parameter values.

" + "documentation":"

The criteria that CloudFormation uses to validate parameter values.

" } }, "documentation":"

The ParameterDeclaration data type.

" @@ -3756,7 +3818,7 @@ "documentation":"

The resource context value.

" } }, - "documentation":"

Context information that enables AWS CloudFormation to uniquely identify a resource. AWS CloudFormation uses context key-value pairs in cases where a resource's logical and physical IDs are not enough to uniquely identify that resource. Each context key-value pair specifies a resource that contains the targeted resource.

" + "documentation":"

Context information that enables CloudFormation to uniquely identify a resource. CloudFormation uses context key-value pairs in cases where a resource's logical and physical IDs are not enough to uniquely identify that resource. Each context key-value pair specifies a resource that contains the targeted resource.

" }, "PrivateTypeArn":{ "type":"string", @@ -3829,7 +3891,7 @@ }, "PublicVersionNumber":{ "shape":"PublicVersionNumber", - "documentation":"

The version number to assign to this version of the extension.

Use the following format, and adhere to semantic versioning when assigning a version number to your extension:

MAJOR.MINOR.PATCH

For more information, see Semantic Versioning 2.0.0.

If you do not specify a version number, CloudFormation increments the version number by one minor version release.

" + "documentation":"

The version number to assign to this version of the extension.

Use the following format, and adhere to semantic versioning when assigning a version number to your extension:

MAJOR.MINOR.PATCH

For more information, see Semantic Versioning 2.0.0.

If you do not specify a version number, CloudFormation increments the version number by one minor version release.

The first time you publish a type, CloudFormation sets the version number to 1.0.0, regardless of the value you specify.

" } } }, @@ -3929,7 +3991,7 @@ "members":{ "AcceptTermsAndConditions":{ "shape":"AcceptTermsAndConditions", - "documentation":"

Whether you accept the terms and conditions for publishing extensions in the CloudFormation registry. You must accept the terms and conditions in order to register to publish public extensions to the CloudFormation registry.

The default is false.

" + "documentation":"

Whether you accept the Terms and Conditions for publishing extensions in the CloudFormation registry. You must accept the terms and conditions in order to register to publish public extensions to the CloudFormation registry.

The default is false.

" }, "ConnectionArn":{ "shape":"ConnectionArn", @@ -3963,7 +4025,7 @@ }, "SchemaHandlerPackage":{ "shape":"S3Url", - "documentation":"

A url to the S3 bucket containing the extension project package that contains the neccessary files for the extension you want to register.

For information on generating a schema handler package for the extension you want to register, see submit in the CloudFormation CLI User Guide.

The user registering the extension must be able to access the package in the S3 bucket. That is, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the AWS Identity and Access Management User Guide.

" + "documentation":"

A url to the S3 bucket containing the extension project package that contains the neccessary files for the extension you want to register.

For information on generating a schema handler package for the extension you want to register, see submit in the CloudFormation CLI User Guide.

The user registering the extension must be able to access the package in the S3 bucket. That is, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Identity and Access Management User Guide.

" }, "LoggingConfig":{ "shape":"LoggingConfig", @@ -3971,7 +4033,7 @@ }, "ExecutionRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the extension.

For CloudFormation to assume the specified execution role, the role must contain a trust relationship with the CloudFormation service principle (resources.cloudformation.amazonaws.com). For more information on adding trust relationships, see Modifying a role trust policy in the AWS Identity and Access Management User Guide.

If your extension calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource type handler, thereby supplying your resource type with the appropriate credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the extension.

For CloudFormation to assume the specified execution role, the role must contain a trust relationship with the CloudFormation service principle (resources.cloudformation.amazonaws.com). For more information on adding trust relationships, see Modifying a role trust policy in the Identity and Access Management User Guide.

If your extension calls Amazon Web Services APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those Amazon Web Services APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource type handler, thereby supplying your resource type with the appropriate credentials.

" }, "ClientRequestToken":{ "shape":"RequestToken", @@ -4047,7 +4109,7 @@ "documentation":"

A list of the major versions of the extension type that the macro supports.

" } }, - "documentation":"

For extensions that are modules, a public third-party extension that must be activated in your account in order for the module itself to be activated.

For more information, see Activating public modules for use in your account in the AWS CloudFormation User Guide.

" + "documentation":"

For extensions that are modules, a public third-party extension that must be activated in your account in order for the module itself to be activated.

For more information, see Activating public modules for use in your account in the CloudFormation User Guide.

" }, "RequiredActivatedTypes":{ "type":"list", @@ -4077,7 +4139,7 @@ "members":{ "Action":{ "shape":"ChangeAction", - "documentation":"

The action that AWS CloudFormation takes on the resource, such as Add (adds a new resource), Modify (changes a resource), Remove (deletes a resource), Import (imports a resource), or Dynamic (exact action for the resource cannot be determined).

" + "documentation":"

The action that CloudFormation takes on the resource, such as Add (adds a new resource), Modify (changes a resource), Remove (deletes a resource), Import (imports a resource), or Dynamic (exact action for the resource cannot be determined).

" }, "LogicalResourceId":{ "shape":"LogicalResourceId", @@ -4089,11 +4151,11 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of AWS CloudFormation resource, such as AWS::S3::Bucket.

" + "documentation":"

The type of CloudFormation resource, such as AWS::S3::Bucket.

" }, "Replacement":{ "shape":"Replacement", - "documentation":"

For the Modify action, indicates whether AWS CloudFormation will replace the resource by creating a new one and deleting the old one. This value depends on the value of the RequiresRecreation property in the ResourceTargetDefinition structure. For example, if the RequiresRecreation field is Always and the Evaluation field is Static, Replacement is True. If the RequiresRecreation field is Always and the Evaluation field is Dynamic, Replacement is Conditionally.

If you have multiple changes with different RequiresRecreation values, the Replacement value depends on the change with the most impact. A RequiresRecreation value of Always has the most impact, followed by Conditionally, and then Never.

" + "documentation":"

For the Modify action, indicates whether CloudFormation will replace the resource by creating a new one and deleting the old one. This value depends on the value of the RequiresRecreation property in the ResourceTargetDefinition structure. For example, if the RequiresRecreation field is Always and the Evaluation field is Static, Replacement is True. If the RequiresRecreation field is Always and the Evaluation field is Dynamic, Replacement is Conditionally.

If you have multiple changes with different RequiresRecreation values, the Replacement value depends on the change with the most impact. A RequiresRecreation value of Always has the most impact, followed by Conditionally, and then Never.

" }, "Scope":{ "shape":"Scope", @@ -4101,7 +4163,7 @@ }, "Details":{ "shape":"ResourceChangeDetails", - "documentation":"

For the Modify action, a list of ResourceChangeDetail structures that describes the changes that AWS CloudFormation will make to the resource.

" + "documentation":"

For the Modify action, a list of ResourceChangeDetail structures that describes the changes that CloudFormation will make to the resource.

" }, "ChangeSetId":{ "shape":"ChangeSetId", @@ -4112,29 +4174,29 @@ "documentation":"

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

" } }, - "documentation":"

The ResourceChange structure describes the resource and the action that AWS CloudFormation will perform on it if you execute this change set.

" + "documentation":"

The ResourceChange structure describes the resource and the action that CloudFormation will perform on it if you execute this change set.

" }, "ResourceChangeDetail":{ "type":"structure", "members":{ "Target":{ "shape":"ResourceTargetDefinition", - "documentation":"

A ResourceTargetDefinition structure that describes the field that AWS CloudFormation will change and whether the resource will be recreated.

" + "documentation":"

A ResourceTargetDefinition structure that describes the field that CloudFormation will change and whether the resource will be recreated.

" }, "Evaluation":{ "shape":"EvaluationType", - "documentation":"

Indicates whether AWS CloudFormation can determine the target value, and whether the target value will change before you execute a change set.

For Static evaluations, AWS CloudFormation can determine that the target value will change, and its value. For example, if you directly modify the InstanceType property of an EC2 instance, AWS CloudFormation knows that this property value will change, and its value, so this is a Static evaluation.

For Dynamic evaluations, cannot determine the target value because it depends on the result of an intrinsic function, such as a Ref or Fn::GetAtt intrinsic function, when the stack is updated. For example, if your template includes a reference to a resource that is conditionally recreated, the value of the reference (the physical ID of the resource) might change, depending on if the resource is recreated. If the resource is recreated, it will have a new physical ID, so all references to that resource will also be updated.

" + "documentation":"

Indicates whether CloudFormation can determine the target value, and whether the target value will change before you execute a change set.

For Static evaluations, CloudFormation can determine that the target value will change, and its value. For example, if you directly modify the InstanceType property of an EC2 instance, CloudFormation knows that this property value will change, and its value, so this is a Static evaluation.

For Dynamic evaluations, cannot determine the target value because it depends on the result of an intrinsic function, such as a Ref or Fn::GetAtt intrinsic function, when the stack is updated. For example, if your template includes a reference to a resource that is conditionally recreated, the value of the reference (the physical ID of the resource) might change, depending on if the resource is recreated. If the resource is recreated, it will have a new physical ID, so all references to that resource will also be updated.

" }, "ChangeSource":{ "shape":"ChangeSource", - "documentation":"

The group to which the CausingEntity value belongs. There are five entity groups:

  • ResourceReference entities are Ref intrinsic functions that refer to resources in the template, such as { \"Ref\" : \"MyEC2InstanceResource\" }.

  • ParameterReference entities are Ref intrinsic functions that get template parameter values, such as { \"Ref\" : \"MyPasswordParameter\" }.

  • ResourceAttribute entities are Fn::GetAtt intrinsic functions that get resource attribute values, such as { \"Fn::GetAtt\" : [ \"MyEC2InstanceResource\", \"PublicDnsName\" ] }.

  • DirectModification entities are changes that are made directly to the template.

  • Automatic entities are AWS::CloudFormation::Stack resource types, which are also known as nested stacks. If you made no changes to the AWS::CloudFormation::Stack resource, AWS CloudFormation sets the ChangeSource to Automatic because the nested stack's template might have changed. Changes to a nested stack's template aren't visible to AWS CloudFormation until you run an update on the parent stack.

" + "documentation":"

The group to which the CausingEntity value belongs. There are five entity groups:

  • ResourceReference entities are Ref intrinsic functions that refer to resources in the template, such as { \"Ref\" : \"MyEC2InstanceResource\" }.

  • ParameterReference entities are Ref intrinsic functions that get template parameter values, such as { \"Ref\" : \"MyPasswordParameter\" }.

  • ResourceAttribute entities are Fn::GetAtt intrinsic functions that get resource attribute values, such as { \"Fn::GetAtt\" : [ \"MyEC2InstanceResource\", \"PublicDnsName\" ] }.

  • DirectModification entities are changes that are made directly to the template.

  • Automatic entities are AWS::CloudFormation::Stack resource types, which are also known as nested stacks. If you made no changes to the AWS::CloudFormation::Stack resource, CloudFormation sets the ChangeSource to Automatic because the nested stack's template might have changed. Changes to a nested stack's template aren't visible to CloudFormation until you run an update on the parent stack.

" }, "CausingEntity":{ "shape":"CausingEntity", "documentation":"

The identity of the entity that triggered this change. This entity is a member of the group that is specified by the ChangeSource field. For example, if you modified the value of the KeyPairName parameter, the CausingEntity is the name of the parameter (KeyPairName).

If the ChangeSource value is DirectModification, no value is given for CausingEntity.

" } }, - "documentation":"

For a resource with Modify as the action, the ResourceChange structure describes the changes AWS CloudFormation will make to that resource.

" + "documentation":"

For a resource with Modify as the action, the ResourceChange structure describes the changes CloudFormation will make to that resource.

" }, "ResourceChangeDetails":{ "type":"list", @@ -4236,10 +4298,10 @@ }, "RequiresRecreation":{ "shape":"RequiresRecreation", - "documentation":"

If the Attribute value is Properties, indicates whether a change to this property causes the resource to be recreated. The value can be Never, Always, or Conditionally. To determine the conditions for a Conditionally recreation, see the update behavior for that property in the AWS CloudFormation User Guide.

" + "documentation":"

If the Attribute value is Properties, indicates whether a change to this property causes the resource to be recreated. The value can be Never, Always, or Conditionally. To determine the conditions for a Conditionally recreation, see the update behavior for that property in the CloudFormation User Guide.

" } }, - "documentation":"

The field that AWS CloudFormation will change, such as the name of a resource's property, and whether the resource will be recreated.

" + "documentation":"

The field that CloudFormation will change, such as the name of a resource's property, and whether the resource will be recreated.

" }, "ResourceToImport":{ "type":"structure", @@ -4251,7 +4313,7 @@ "members":{ "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource to import into your stack, such as AWS::S3::Bucket. For a list of supported resource types, see Resources that support import operations in the AWS CloudFormation User Guide.

" + "documentation":"

The type of resource to import into your stack, such as AWS::S3::Bucket. For a list of supported resource types, see Resources that support import operations in the CloudFormation User Guide.

" }, "LogicalResourceId":{ "shape":"LogicalResourceId", @@ -4309,14 +4371,14 @@ "members":{ "RollbackTriggers":{ "shape":"RollbackTriggers", - "documentation":"

The triggers to monitor during stack creation or update actions.

By default, AWS CloudFormation saves the rollback triggers specified for a stack and applies them to any subsequent update operations for the stack, unless you specify otherwise. If you do specify rollback triggers for this parameter, those triggers replace any list of triggers previously specified for the stack. This means:

  • To use the rollback triggers previously specified for this stack, if any, don't specify this parameter.

  • To specify new or updated rollback triggers, you must specify all the triggers that you want used for this stack, even triggers you've specifed before (for example, when creating the stack or during a previous stack update). Any triggers that you don't include in the updated list of triggers are no longer applied to the stack.

  • To remove all currently specified triggers, specify an empty list for this parameter.

If a specified trigger is missing, the entire stack operation fails and is rolled back.

" + "documentation":"

The triggers to monitor during stack creation or update actions.

By default, CloudFormation saves the rollback triggers specified for a stack and applies them to any subsequent update operations for the stack, unless you specify otherwise. If you do specify rollback triggers for this parameter, those triggers replace any list of triggers previously specified for the stack. This means:

  • To use the rollback triggers previously specified for this stack, if any, don't specify this parameter.

  • To specify new or updated rollback triggers, you must specify all the triggers that you want used for this stack, even triggers you've specifed before (for example, when creating the stack or during a previous stack update). Any triggers that you don't include in the updated list of triggers are no longer applied to the stack.

  • To remove all currently specified triggers, specify an empty list for this parameter.

If a specified trigger is missing, the entire stack operation fails and is rolled back.

" }, "MonitoringTimeInMinutes":{ "shape":"MonitoringTimeInMinutes", "documentation":"

The amount of time, in minutes, during which CloudFormation should monitor all the rollback triggers after the stack creation or update operation deploys all necessary resources.

The default is 0 minutes.

If you specify a monitoring period but do not specify any rollback triggers, CloudFormation still waits the specified period of time before cleaning up old resources after update operations. You can use this monitoring period to perform any manual stack validation desired, and manually cancel the stack creation or update (using CancelUpdateStack, for example) as necessary.

If you specify 0 for this parameter, CloudFormation still monitors the specified rollback triggers during stack creation and update operations. Then, for update operations, it begins disposing of old resources immediately once the operation completes.

" } }, - "documentation":"

Structure containing the rollback triggers for AWS CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

Rollback triggers enable you to have AWS CloudFormation monitor the state of your application during stack creation and updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've specified. For more information, see Monitor and Roll Back Stack Operations.

" + "documentation":"

Structure containing the rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

Rollback triggers enable you to have CloudFormation monitor the state of your application during stack creation and updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've specified. For more information, see Monitor and Roll Back Stack Operations.

" }, "RollbackTrigger":{ "type":"structure", @@ -4334,7 +4396,7 @@ "documentation":"

The resource type of the rollback trigger. Currently, AWS::CloudWatch::Alarm is the only supported resource type.

" } }, - "documentation":"

A rollback trigger AWS CloudFormation monitors during creation and updating of stacks. If any of the alarms you specify goes to ALARM state during the stack operation or within the specified monitoring period afterwards, CloudFormation rolls back the entire stack operation.

" + "documentation":"

A rollback trigger CloudFormation monitors during creation and updating of stacks. If any of the alarms you specify goes to ALARM state during the stack operation or within the specified monitoring period afterwards, CloudFormation rolls back the entire stack operation.

" }, "RollbackTriggers":{ "type":"list", @@ -4366,7 +4428,7 @@ }, "StackPolicyBody":{ "shape":"StackPolicyBody", - "documentation":"

Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

" + "documentation":"

Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

" }, "StackPolicyURL":{ "shape":"StackPolicyURL", @@ -4459,7 +4521,7 @@ }, "Status":{ "shape":"ResourceSignalStatus", - "documentation":"

The status of the signal, which is either success or failure. A failure signal causes AWS CloudFormation to immediately fail the stack creation or update.

" + "documentation":"

The status of the signal, which is either success or failure. A failure signal causes CloudFormation to immediately fail the stack creation or update.

" } }, "documentation":"

The input for the SignalResource action.

" @@ -4506,7 +4568,7 @@ }, "RollbackConfiguration":{ "shape":"RollbackConfiguration", - "documentation":"

The rollback triggers for AWS CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

" + "documentation":"

The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

" }, "StackStatus":{ "shape":"StackStatus", @@ -4538,7 +4600,7 @@ }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that is associated with the stack. During a stack operation, AWS CloudFormation uses this role's credentials to make calls on your behalf.

" + "documentation":"

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that is associated with the stack. During a stack operation, CloudFormation uses this role's credentials to make calls on your behalf.

" }, "Tags":{ "shape":"Tags", @@ -4546,15 +4608,15 @@ }, "EnableTerminationProtection":{ "shape":"EnableTerminationProtection", - "documentation":"

Whether termination protection is enabled for the stack.

For nested stacks, termination protection is set on the root stack and cannot be changed directly on the nested stack. For more information, see Protecting a Stack From Being Deleted in the AWS CloudFormation User Guide.

" + "documentation":"

Whether termination protection is enabled for the stack.

For nested stacks, termination protection is set on the root stack and cannot be changed directly on the nested stack. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide.

" }, "ParentId":{ "shape":"StackId", - "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.

For more information, see Working with Nested Stacks in the AWS CloudFormation User Guide.

" + "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.

For more information, see Working with Nested Stacks in the CloudFormation User Guide.

" }, "RootId":{ "shape":"StackId", - "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.

For more information, see Working with Nested Stacks in the AWS CloudFormation User Guide.

" + "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.

For more information, see Working with Nested Stacks in the CloudFormation User Guide.

" }, "DriftInformation":{ "shape":"StackDriftInformation", @@ -4583,7 +4645,7 @@ "members":{ "StackDriftStatus":{ "shape":"StackDriftStatus", - "documentation":"

Status of the stack's actual configuration compared to its expected template configuration.

  • DRIFTED: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.

  • NOT_CHECKED: AWS CloudFormation has not checked if the stack differs from its expected template configuration.

  • IN_SYNC: The stack's actual configuration matches its expected template configuration.

  • UNKNOWN: This value is reserved for future use.

" + "documentation":"

Status of the stack's actual configuration compared to its expected template configuration.

  • DRIFTED: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.

  • NOT_CHECKED: CloudFormation has not checked if the stack differs from its expected template configuration.

  • IN_SYNC: The stack's actual configuration matches its expected template configuration.

  • UNKNOWN: This value is reserved for future use.

" }, "LastCheckTimestamp":{ "shape":"Timestamp", @@ -4598,7 +4660,7 @@ "members":{ "StackDriftStatus":{ "shape":"StackDriftStatus", - "documentation":"

Status of the stack's actual configuration compared to its expected template configuration.

  • DRIFTED: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.

  • NOT_CHECKED: AWS CloudFormation has not checked if the stack differs from its expected template configuration.

  • IN_SYNC: The stack's actual configuration matches its expected template configuration.

  • UNKNOWN: This value is reserved for future use.

" + "documentation":"

Status of the stack's actual configuration compared to its expected template configuration.

  • DRIFTED: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.

  • NOT_CHECKED: CloudFormation has not checked if the stack differs from its expected template configuration.

  • IN_SYNC: The stack's actual configuration matches its expected template configuration.

  • UNKNOWN: This value is reserved for future use.

" }, "LastCheckTimestamp":{ "shape":"Timestamp", @@ -4647,7 +4709,7 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

Type of resource. (For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)

" + "documentation":"

Type of resource. (For more information, go to Amazon Web Services Resource Types Reference in the CloudFormation User Guide.)

" }, "Timestamp":{ "shape":"Timestamp", @@ -4677,6 +4739,10 @@ "member":{"shape":"StackEvent"} }, "StackId":{"type":"string"}, + "StackIdList":{ + "type":"list", + "member":{"shape":"StackId"} + }, "StackInstance":{ "type":"structure", "members":{ @@ -4686,11 +4752,11 @@ }, "Region":{ "shape":"Region", - "documentation":"

The name of the AWS Region that the stack instance is associated with.

" + "documentation":"

The name of the Region that the stack instance is associated with.

" }, "Account":{ "shape":"Account", - "documentation":"

[Self-managed permissions] The name of the AWS account that the stack instance is associated with.

" + "documentation":"

[Self-managed permissions] The name of the account that the stack instance is associated with.

" }, "StackId":{ "shape":"StackId", @@ -4718,14 +4784,14 @@ }, "DriftStatus":{ "shape":"StackDriftStatus", - "documentation":"

Status of the stack instance's actual configuration compared to the expected template and parameter configuration of the stack set to which it belongs.

  • DRIFTED: The stack differs from the expected template and parameter configuration of the stack set to which it belongs. A stack instance is considered to have drifted if one or more of the resources in the associated stack have drifted.

  • NOT_CHECKED: AWS CloudFormation has not checked if the stack instance differs from its expected stack set configuration.

  • IN_SYNC: The stack instance's actual configuration matches its expected stack set configuration.

  • UNKNOWN: This value is reserved for future use.

" + "documentation":"

Status of the stack instance's actual configuration compared to the expected template and parameter configuration of the stack set to which it belongs.

  • DRIFTED: The stack differs from the expected template and parameter configuration of the stack set to which it belongs. A stack instance is considered to have drifted if one or more of the resources in the associated stack have drifted.

  • NOT_CHECKED: CloudFormation has not checked if the stack instance differs from its expected stack set configuration.

  • IN_SYNC: The stack instance's actual configuration matches its expected stack set configuration.

  • UNKNOWN: This value is reserved for future use.

" }, "LastDriftCheckTimestamp":{ "shape":"Timestamp", "documentation":"

Most recent time when CloudFormation performed a drift detection operation on the stack instance. This value will be NULL for any stack instance on which drift detection has not yet been performed.

" } }, - "documentation":"

An AWS CloudFormation stack, in a specific account and Region, that's part of a stack set operation. A stack instance is a reference to an attempted or actual stack in a given account within a given Region. A stack instance can exist without a stack—for example, if the stack couldn't be created for some reason. A stack instance is associated with only one stack set. Each stack instance contains the ID of its associated stack set, as well as the ID of the actual stack and the stack status.

" + "documentation":"

An CloudFormation stack, in a specific account and Region, that's part of a stack set operation. A stack instance is a reference to an attempted or actual stack in a given account within a given Region. A stack instance can exist without a stack—for example, if the stack couldn't be created for some reason. A stack instance is associated with only one stack set. Each stack instance contains the ID of its associated stack set, as well as the ID of the actual stack and the stack status.

" }, "StackInstanceComprehensiveStatus":{ "type":"structure", @@ -4809,11 +4875,11 @@ }, "Region":{ "shape":"Region", - "documentation":"

The name of the AWS Region that the stack instance is associated with.

" + "documentation":"

The name of the Region that the stack instance is associated with.

" }, "Account":{ "shape":"Account", - "documentation":"

[Self-managed permissions] The name of the AWS account that the stack instance is associated with.

" + "documentation":"

[Self-managed permissions] The name of the account that the stack instance is associated with.

" }, "StackId":{ "shape":"StackId", @@ -4837,7 +4903,7 @@ }, "DriftStatus":{ "shape":"StackDriftStatus", - "documentation":"

Status of the stack instance's actual configuration compared to the expected template and parameter configuration of the stack set to which it belongs.

  • DRIFTED: The stack differs from the expected template and parameter configuration of the stack set to which it belongs. A stack instance is considered to have drifted if one or more of the resources in the associated stack have drifted.

  • NOT_CHECKED: AWS CloudFormation has not checked if the stack instance differs from its expected stack set configuration.

  • IN_SYNC: The stack instance's actual configuration matches its expected stack set configuration.

  • UNKNOWN: This value is reserved for future use.

" + "documentation":"

Status of the stack instance's actual configuration compared to the expected template and parameter configuration of the stack set to which it belongs.

  • DRIFTED: The stack differs from the expected template and parameter configuration of the stack set to which it belongs. A stack instance is considered to have drifted if one or more of the resources in the associated stack have drifted.

  • NOT_CHECKED: CloudFormation has not checked if the stack instance differs from its expected stack set configuration.

  • IN_SYNC: The stack instance's actual configuration matches its expected stack set configuration.

  • UNKNOWN: This value is reserved for future use.

" }, "LastDriftCheckTimestamp":{ "shape":"Timestamp", @@ -4852,6 +4918,18 @@ "min":1, "pattern":"([a-zA-Z][-a-zA-Z0-9]*)|(arn:\\b(aws|aws-us-gov|aws-cn)\\b:[-a-zA-Z0-9:/._+]*)" }, + "StackNotFoundException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified stack ARN doesn’t exist or stack doesn’t exist corresponding to the ARN in input.

", + "error":{ + "code":"StackNotFoundException", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, "StackPolicyBody":{ "type":"string", "max":16384, @@ -4895,11 +4973,11 @@ }, "PhysicalResourceId":{ "shape":"PhysicalResourceId", - "documentation":"

The name or unique identifier that corresponds to a physical instance ID of a resource supported by AWS CloudFormation.

" + "documentation":"

The name or unique identifier that corresponds to a physical instance ID of a resource supported by CloudFormation.

" }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

Type of resource. (For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)

" + "documentation":"

Type of resource. (For more information, go to Amazon Web Services Resource Types Reference in the CloudFormation User Guide.)

" }, "Timestamp":{ "shape":"Timestamp", @@ -4951,11 +5029,11 @@ }, "PhysicalResourceId":{ "shape":"PhysicalResourceId", - "documentation":"

The name or unique identifier that corresponds to a physical instance ID of a resource supported by AWS CloudFormation.

" + "documentation":"

The name or unique identifier that corresponds to a physical instance ID of a resource supported by CloudFormation.

" }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

Type of resource. ((For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)

" + "documentation":"

Type of resource. ((For more information, go to Amazon Web Services Resource Types Reference in the CloudFormation User Guide.)

" }, "LastUpdatedTimestamp":{ "shape":"Timestamp", @@ -4975,7 +5053,7 @@ }, "Metadata":{ "shape":"Metadata", - "documentation":"

The content of the Metadata attribute declared for the resource. For more information, see Metadata Attribute in the AWS CloudFormation User Guide.

" + "documentation":"

The content of the Metadata attribute declared for the resource. For more information, see Metadata Attribute in the CloudFormation User Guide.

" }, "DriftInformation":{ "shape":"StackResourceDriftInformation", @@ -5008,11 +5086,11 @@ }, "PhysicalResourceId":{ "shape":"PhysicalResourceId", - "documentation":"

The name or unique identifier that corresponds to a physical instance ID of a resource supported by AWS CloudFormation.

" + "documentation":"

The name or unique identifier that corresponds to a physical instance ID of a resource supported by CloudFormation.

" }, "PhysicalResourceIdContext":{ "shape":"PhysicalResourceIdContext", - "documentation":"

Context information that enables AWS CloudFormation to uniquely identify a resource. AWS CloudFormation uses context key-value pairs in cases where a resource's logical and physical IDs are not enough to uniquely identify that resource. Each context key-value pair specifies a unique resource that contains the targeted resource.

" + "documentation":"

Context information that enables CloudFormation to uniquely identify a resource. CloudFormation uses context key-value pairs in cases where a resource's logical and physical IDs are not enough to uniquely identify that resource. Each context key-value pair specifies a unique resource that contains the targeted resource.

" }, "ResourceType":{ "shape":"ResourceType", @@ -5032,18 +5110,18 @@ }, "StackResourceDriftStatus":{ "shape":"StackResourceDriftStatus", - "documentation":"

Status of the resource's actual configuration compared to its expected configuration

  • DELETED: The resource differs from its expected template configuration because the resource has been deleted.

  • MODIFIED: One or more resource properties differ from their expected values (as defined in the stack template and any values specified as template parameters).

  • IN_SYNC: The resources's actual configuration matches its expected template configuration.

  • NOT_CHECKED: AWS CloudFormation does not currently return this value.

" + "documentation":"

Status of the resource's actual configuration compared to its expected configuration

  • DELETED: The resource differs from its expected template configuration because the resource has been deleted.

  • MODIFIED: One or more resource properties differ from their expected values (as defined in the stack template and any values specified as template parameters).

  • IN_SYNC: The resources's actual configuration matches its expected template configuration.

  • NOT_CHECKED: CloudFormation does not currently return this value.

" }, "Timestamp":{ "shape":"Timestamp", - "documentation":"

Time at which AWS CloudFormation performed drift detection on the stack resource.

" + "documentation":"

Time at which CloudFormation performed drift detection on the stack resource.

" }, "ModuleInfo":{ "shape":"ModuleInfo", "documentation":"

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

" } }, - "documentation":"

Contains the drift information for a resource that has been checked for drift. This includes actual and expected property values for resources in which AWS CloudFormation has detected drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Resources that do not currently support drift detection cannot be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

" + "documentation":"

Contains the drift information for a resource that has been checked for drift. This includes actual and expected property values for resources in which CloudFormation has detected drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Resources that do not currently support drift detection cannot be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

" }, "StackResourceDriftInformation":{ "type":"structure", @@ -5051,11 +5129,11 @@ "members":{ "StackResourceDriftStatus":{ "shape":"StackResourceDriftStatus", - "documentation":"

Status of the resource's actual configuration compared to its expected configuration

  • DELETED: The resource differs from its expected configuration in that it has been deleted.

  • MODIFIED: The resource differs from its expected configuration.

  • NOT_CHECKED: AWS CloudFormation has not checked if the resource differs from its expected configuration.

    Any resources that do not currently support drift detection have a status of NOT_CHECKED. For more information, see Resources that Support Drift Detection.

  • IN_SYNC: The resources's actual configuration matches its expected configuration.

" + "documentation":"

Status of the resource's actual configuration compared to its expected configuration

  • DELETED: The resource differs from its expected configuration in that it has been deleted.

  • MODIFIED: The resource differs from its expected configuration.

  • NOT_CHECKED: CloudFormation has not checked if the resource differs from its expected configuration.

    Any resources that do not currently support drift detection have a status of NOT_CHECKED. For more information, see Resources that Support Drift Detection.

  • IN_SYNC: The resources's actual configuration matches its expected configuration.

" }, "LastCheckTimestamp":{ "shape":"Timestamp", - "documentation":"

When AWS CloudFormation last checked if the resource had drifted from its expected configuration.

" + "documentation":"

When CloudFormation last checked if the resource had drifted from its expected configuration.

" } }, "documentation":"

Contains information about whether the resource's actual configuration differs, or has drifted, from its expected configuration.

" @@ -5066,11 +5144,11 @@ "members":{ "StackResourceDriftStatus":{ "shape":"StackResourceDriftStatus", - "documentation":"

Status of the resource's actual configuration compared to its expected configuration

  • DELETED: The resource differs from its expected configuration in that it has been deleted.

  • MODIFIED: The resource differs from its expected configuration.

  • NOT_CHECKED: AWS CloudFormation has not checked if the resource differs from its expected configuration.

    Any resources that do not currently support drift detection have a status of NOT_CHECKED. For more information, see Resources that Support Drift Detection. If you performed an ContinueUpdateRollback operation on a stack, any resources included in ResourcesToSkip will also have a status of NOT_CHECKED. For more information on skipping resources during rollback operations, see Continue Rolling Back an Update in the AWS CloudFormation User Guide.

  • IN_SYNC: The resources's actual configuration matches its expected configuration.

" + "documentation":"

Status of the resource's actual configuration compared to its expected configuration

  • DELETED: The resource differs from its expected configuration in that it has been deleted.

  • MODIFIED: The resource differs from its expected configuration.

  • NOT_CHECKED: CloudFormation has not checked if the resource differs from its expected configuration.

    Any resources that do not currently support drift detection have a status of NOT_CHECKED. For more information, see Resources that Support Drift Detection. If you performed an ContinueUpdateRollback operation on a stack, any resources included in ResourcesToSkip will also have a status of NOT_CHECKED. For more information on skipping resources during rollback operations, see Continue Rolling Back an Update in the CloudFormation User Guide.

  • IN_SYNC: The resources's actual configuration matches its expected configuration.

" }, "LastCheckTimestamp":{ "shape":"Timestamp", - "documentation":"

When AWS CloudFormation last checked if the resource had drifted from its expected configuration.

" + "documentation":"

When CloudFormation last checked if the resource had drifted from its expected configuration.

" } }, "documentation":"

Summarizes information about whether the resource's actual configuration differs, or has drifted, from its expected configuration.

" @@ -5117,7 +5195,7 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

Type of resource. (For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)

" + "documentation":"

Type of resource. (For more information, go to Amazon Web Services Resource Types Reference in the CloudFormation User Guide.)

" }, "LastUpdatedTimestamp":{ "shape":"Timestamp", @@ -5175,7 +5253,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your AWS account—for example, by creating new AWS Identity and Access Management (IAM) users. For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

" + "documentation":"

The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your account—for example, by creating new Identity and Access Management (IAM) users. For more information, see Acknowledging IAM Resources in CloudFormation Templates.

" }, "Tags":{ "shape":"Tags", @@ -5187,7 +5265,7 @@ }, "AdministrationRoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Number (ARN) of the IAM role used to create or update the stack set.

Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the AWS CloudFormation User Guide.

" + "documentation":"

The Amazon Resource Number (ARN) of the IAM role used to create or update the stack set.

Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the CloudFormation User Guide.

" }, "ExecutionRoleName":{ "shape":"ExecutionRoleName", @@ -5199,18 +5277,18 @@ }, "AutoDeployment":{ "shape":"AutoDeployment", - "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).

" + "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to Organizations accounts that are added to a target organization or organizational unit (OU).

" }, "PermissionModel":{ "shape":"PermissionModels", - "documentation":"

Describes how the IAM roles required for stack set operations are created.

" + "documentation":"

Describes how the IAM roles required for stack set operations are created.

" }, "OrganizationalUnitIds":{ "shape":"OrganizationalUnitIdList", "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" } }, - "documentation":"

A structure that contains information about a stack set. A stack set enables you to provision stacks into AWS accounts and across Regions by using a single CloudFormation template. In the stack set, you specify the template to use, as well as any parameters and capabilities that the template requires.

" + "documentation":"

A structure that contains information about a stack set. A stack set enables you to provision stacks into accounts and across Regions by using a single CloudFormation template. In the stack set, you specify the template to use, as well as any parameters and capabilities that the template requires.

" }, "StackSetARN":{"type":"string"}, "StackSetDriftDetectionDetails":{ @@ -5218,7 +5296,7 @@ "members":{ "DriftStatus":{ "shape":"StackSetDriftStatus", - "documentation":"

Status of the stack set's actual configuration compared to its expected template and parameter configuration. A stack set is considered to have drifted if one or more of its stack instances have drifted from their expected template and parameter configuration.

  • DRIFTED: One or more of the stack instances belonging to the stack set stack differs from the expected template and parameter configuration. A stack instance is considered to have drifted if one or more of the resources in the associated stack have drifted.

  • NOT_CHECKED: AWS CloudFormation has not checked the stack set for drift.

  • IN_SYNC: All of the stack instances belonging to the stack set stack match from the expected template and parameter configuration.

" + "documentation":"

Status of the stack set's actual configuration compared to its expected template and parameter configuration. A stack set is considered to have drifted if one or more of its stack instances have drifted from their expected template and parameter configuration.

  • DRIFTED: One or more of the stack instances belonging to the stack set stack differs from the expected template and parameter configuration. A stack instance is considered to have drifted if one or more of the resources in the associated stack have drifted.

  • NOT_CHECKED: CloudFormation has not checked the stack set for drift.

  • IN_SYNC: All of the stack instances belonging to the stack set stack match from the expected template and parameter configuration.

" }, "DriftDetectionStatus":{ "shape":"StackSetDriftDetectionStatus", @@ -5249,7 +5327,7 @@ "documentation":"

The number of stack instances for which the drift detection operation failed.

" } }, - "documentation":"

Detailed information about the drift status of the stack set.

For stack sets, contains information about the last completed drift operation performed on the stack set. Information about drift operations in-progress is not included.

For stack set operations, includes information about drift operations currently being performed on the stack set.

For more information, see Detecting Unmanaged Changes in Stack Sets in the AWS CloudFormation User Guide.

" + "documentation":"

Detailed information about the drift status of the stack set.

For stack sets, contains information about the last completed drift operation performed on the stack set. Information about drift operations in-progress is not included.

For stack set operations, includes information about drift operations currently being performed on the stack set.

For more information, see Detecting Unmanaged Changes in Stack Sets in the CloudFormation User Guide.

" }, "StackSetDriftDetectionStatus":{ "type":"string", @@ -5316,11 +5394,11 @@ }, "Status":{ "shape":"StackSetOperationStatus", - "documentation":"

The status of the operation.

  • FAILED: The operation exceeded the specified failure tolerance. The failure tolerance value that you've set for an operation is applied for each Region during stack create and update operations. If the number of failed stacks within a Region exceeds the failure tolerance, the status of the operation in the Region is set to FAILED. This in turn sets the status of the operation as a whole to FAILED, and AWS CloudFormation cancels the operation in any remaining Regions.

  • QUEUED: [Service-managed permissions] For automatic deployments that require a sequence of operations, the operation is queued to be performed. For more information, see the stack set operation status codes in the AWS CloudFormation User Guide.

  • RUNNING: The operation is currently being performed.

  • STOPPED: The user has cancelled the operation.

  • STOPPING: The operation is in the process of stopping, at user request.

  • SUCCEEDED: The operation completed creating or updating all the specified stacks without exceeding the failure tolerance for the operation.

" + "documentation":"

The status of the operation.

  • FAILED: The operation exceeded the specified failure tolerance. The failure tolerance value that you've set for an operation is applied for each Region during stack create and update operations. If the number of failed stacks within a Region exceeds the failure tolerance, the status of the operation in the Region is set to FAILED. This in turn sets the status of the operation as a whole to FAILED, and CloudFormation cancels the operation in any remaining Regions.

  • QUEUED: [Service-managed permissions] For automatic deployments that require a sequence of operations, the operation is queued to be performed. For more information, see the stack set operation status codes in the CloudFormation User Guide.

  • RUNNING: The operation is currently being performed.

  • STOPPED: The user has cancelled the operation.

  • STOPPING: The operation is in the process of stopping, at user request.

  • SUCCEEDED: The operation completed creating or updating all the specified stacks without exceeding the failure tolerance for the operation.

" }, "OperationPreferences":{ "shape":"StackSetOperationPreferences", - "documentation":"

The preferences for how AWS CloudFormation performs this stack set operation.

" + "documentation":"

The preferences for how CloudFormation performs this stack set operation.

" }, "RetainStacks":{ "shape":"RetainStacksNullable", @@ -5328,7 +5406,7 @@ }, "AdministrationRoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Number (ARN) of the IAM role used to perform this stack set operation.

Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Define Permissions for Multiple Administrators in the AWS CloudFormation User Guide.

" + "documentation":"

The Amazon Resource Number (ARN) of the IAM role used to perform this stack set operation.

Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Define Permissions for Multiple Administrators in the CloudFormation User Guide.

" }, "ExecutionRoleName":{ "shape":"ExecutionRoleName", @@ -5336,7 +5414,7 @@ }, "CreationTimestamp":{ "shape":"Timestamp", - "documentation":"

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested Regions, before actually creating the first stacks.

" + "documentation":"

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested Regions, before actually creating the first stacks.

" }, "EndTimestamp":{ "shape":"Timestamp", @@ -5344,11 +5422,11 @@ }, "DeploymentTargets":{ "shape":"DeploymentTargets", - "documentation":"

[Service-managed permissions] The AWS Organizations accounts affected by the stack operation.

" + "documentation":"

[Service-managed permissions] The Organizations accounts affected by the stack operation.

" }, "StackSetDriftDetectionDetails":{ "shape":"StackSetDriftDetectionDetails", - "documentation":"

Detailed information about the drift status of the stack set. This includes information about drift operations currently being performed on the stack set.

this information will only be present for stack set operations whose Action type is DETECT_DRIFT.

For more information, see Detecting Unmanaged Changes in Stack Sets in the AWS CloudFormation User Guide.

" + "documentation":"

Detailed information about the drift status of the stack set. This includes information about drift operations currently being performed on the stack set.

this information will only be present for stack set operations whose Action type is DETECT_DRIFT.

For more information, see Detecting Unmanaged Changes in Stack Sets in the CloudFormation User Guide.

" } }, "documentation":"

The structure that contains information about a stack set operation.

" @@ -5367,7 +5445,7 @@ "members":{ "RegionConcurrencyType":{ "shape":"RegionConcurrencyType", - "documentation":"

The concurrency type of deploying StackSets operations in regions, could be in parallel or one region at a time.

" + "documentation":"

The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time.

" }, "RegionOrder":{ "shape":"RegionList", @@ -5375,11 +5453,11 @@ }, "FailureToleranceCount":{ "shape":"FailureToleranceCount", - "documentation":"

The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. If the operation is stopped in a Region, AWS CloudFormation doesn't attempt the operation in any subsequent Regions.

Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage (but not both).

By default, 0 is specified.

" + "documentation":"

The number of accounts, per Region, for which this operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions.

Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage (but not both).

By default, 0 is specified.

" }, "FailureTolerancePercentage":{ "shape":"FailureTolerancePercentage", - "documentation":"

The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. If the operation is stopped in a Region, AWS CloudFormation doesn't attempt the operation in any subsequent Regions.

When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number.

Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage, but not both.

By default, 0 is specified.

" + "documentation":"

The percentage of accounts, per Region, for which this stack operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions.

When calculating the number of accounts based on the specified percentage, CloudFormation rounds down to the next whole number.

Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage, but not both.

By default, 0 is specified.

" }, "MaxConcurrentCount":{ "shape":"MaxConcurrentCount", @@ -5387,10 +5465,10 @@ }, "MaxConcurrentPercentage":{ "shape":"MaxConcurrentPercentage", - "documentation":"

The maximum percentage of accounts in which to perform this operation at one time.

When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.

Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.

Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

By default, 1 is specified.

" + "documentation":"

The maximum percentage of accounts in which to perform this operation at one time.

When calculating the number of accounts based on the specified percentage, CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.

Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.

Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

By default, 1 is specified.

" } }, - "documentation":"

The user-specified preferences for how AWS CloudFormation performs a stack set operation.

For more information on maximum concurrent accounts and failure tolerance, see Stack set operation options.

" + "documentation":"

The user-specified preferences for how CloudFormation performs a stack set operation.

For more information on maximum concurrent accounts and failure tolerance, see Stack set operation options.

" }, "StackSetOperationResultStatus":{ "type":"string", @@ -5411,11 +5489,11 @@ "members":{ "Account":{ "shape":"Account", - "documentation":"

[Self-managed permissions] The name of the AWS account for this operation result.

" + "documentation":"

[Self-managed permissions] The name of the account for this operation result.

" }, "Region":{ "shape":"Region", - "documentation":"

The name of the AWS Region for this operation result.

" + "documentation":"

The name of the Region for this operation result.

" }, "Status":{ "shape":"StackSetOperationResultStatus", @@ -5427,7 +5505,7 @@ }, "AccountGateResult":{ "shape":"AccountGateResult", - "documentation":"

The results of the account gate function AWS CloudFormation invokes, if present, before proceeding with stack set operations in an account

" + "documentation":"

The results of the account gate function CloudFormation invokes, if present, before proceeding with stack set operations in an account

" }, "OrganizationalUnitId":{ "shape":"OrganizationalUnitId", @@ -5464,11 +5542,11 @@ }, "Status":{ "shape":"StackSetOperationStatus", - "documentation":"

The overall status of the operation.

  • FAILED: The operation exceeded the specified failure tolerance. The failure tolerance value that you've set for an operation is applied for each Region during stack create and update operations. If the number of failed stacks within a Region exceeds the failure tolerance, the status of the operation in the Region is set to FAILED. This in turn sets the status of the operation as a whole to FAILED, and AWS CloudFormation cancels the operation in any remaining Regions.

  • QUEUED: [Service-managed permissions] For automatic deployments that require a sequence of operations, the operation is queued to be performed. For more information, see the stack set operation status codes in the AWS CloudFormation User Guide.

  • RUNNING: The operation is currently being performed.

  • STOPPED: The user has cancelled the operation.

  • STOPPING: The operation is in the process of stopping, at user request.

  • SUCCEEDED: The operation completed creating or updating all the specified stacks without exceeding the failure tolerance for the operation.

" + "documentation":"

The overall status of the operation.

  • FAILED: The operation exceeded the specified failure tolerance. The failure tolerance value that you've set for an operation is applied for each Region during stack create and update operations. If the number of failed stacks within a Region exceeds the failure tolerance, the status of the operation in the Region is set to FAILED. This in turn sets the status of the operation as a whole to FAILED, and CloudFormation cancels the operation in any remaining Regions.

  • QUEUED: [Service-managed permissions] For automatic deployments that require a sequence of operations, the operation is queued to be performed. For more information, see the stack set operation status codes in the CloudFormation User Guide.

  • RUNNING: The operation is currently being performed.

  • STOPPED: The user has cancelled the operation.

  • STOPPING: The operation is in the process of stopping, at user request.

  • SUCCEEDED: The operation completed creating or updating all the specified stacks without exceeding the failure tolerance for the operation.

" }, "CreationTimestamp":{ "shape":"Timestamp", - "documentation":"

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested Regions, before actually creating the first stacks.

" + "documentation":"

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested Regions, before actually creating the first stacks.

" }, "EndTimestamp":{ "shape":"Timestamp", @@ -5509,15 +5587,15 @@ }, "AutoDeployment":{ "shape":"AutoDeployment", - "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organizational unit (OU).

" + "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to Organizations accounts that are added to a target organizational unit (OU).

" }, "PermissionModel":{ "shape":"PermissionModels", - "documentation":"

Describes how the IAM roles required for stack set operations are created.

" + "documentation":"

Describes how the IAM roles required for stack set operations are created.

" }, "DriftStatus":{ "shape":"StackDriftStatus", - "documentation":"

Status of the stack set's actual configuration compared to its expected template and parameter configuration. A stack set is considered to have drifted if one or more of its stack instances have drifted from their expected template and parameter configuration.

  • DRIFTED: One or more of the stack instances belonging to the stack set stack differs from the expected template and parameter configuration. A stack instance is considered to have drifted if one or more of the resources in the associated stack have drifted.

  • NOT_CHECKED: AWS CloudFormation has not checked the stack set for drift.

  • IN_SYNC: All of the stack instances belonging to the stack set stack match from the expected template and parameter configuration.

  • UNKNOWN: This value is reserved for future use.

" + "documentation":"

Status of the stack set's actual configuration compared to its expected template and parameter configuration. A stack set is considered to have drifted if one or more of its stack instances have drifted from their expected template and parameter configuration.

  • DRIFTED: One or more of the stack instances belonging to the stack set stack differs from the expected template and parameter configuration. A stack instance is considered to have drifted if one or more of the resources in the associated stack have drifted.

  • NOT_CHECKED: CloudFormation has not checked the stack set for drift.

  • IN_SYNC: All of the stack instances belonging to the stack set stack match from the expected template and parameter configuration.

  • UNKNOWN: This value is reserved for future use.

" }, "LastDriftCheckTimestamp":{ "shape":"Timestamp", @@ -5604,11 +5682,11 @@ }, "ParentId":{ "shape":"StackId", - "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.

For more information, see Working with Nested Stacks in the AWS CloudFormation User Guide.

" + "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.

For more information, see Working with Nested Stacks in the CloudFormation User Guide.

" }, "RootId":{ "shape":"StackId", - "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.

For more information, see Working with Nested Stacks in the AWS CloudFormation User Guide.

" + "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.

For more information, see Working with Nested Stacks in the CloudFormation User Guide.

" }, "DriftInformation":{ "shape":"StackDriftInformationSummary", @@ -5658,7 +5736,7 @@ }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -5685,14 +5763,14 @@ "members":{ "Key":{ "shape":"TagKey", - "documentation":"

Required. A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws:.

" + "documentation":"

Required. A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (Amazon Web Services) have the reserved prefix: aws:.

" }, "Value":{ "shape":"TagValue", "documentation":"

Required. A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value.

" } }, - "documentation":"

The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack.

" + "documentation":"

The Tag type enables you to specify a key-value pair that can be used to store information about an CloudFormation stack.

" }, "TagKey":{ "type":"string", @@ -5773,7 +5851,7 @@ }, "LogDeliveryBucket":{ "shape":"S3Bucket", - "documentation":"

The S3 bucket to which CloudFormation delivers the contract test execution logs.

CloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of PASSED or FAILED.

The user calling TestType must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions:

  • GetObject

  • PutObject

For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the AWS Identity and Access Management User Guide.

" + "documentation":"

The S3 bucket to which CloudFormation delivers the contract test execution logs.

CloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of PASSED or FAILED.

The user calling TestType must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions:

  • GetObject

  • PutObject

For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Amazon Web Services Identity and Access Management User Guide.

" } } }, @@ -6115,11 +6193,11 @@ }, "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)

Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

" + "documentation":"

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the CloudFormation User Guide.)

Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

" }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

" + "documentation":"

Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

" }, "UsePreviousTemplate":{ "shape":"UsePreviousTemplate", @@ -6139,19 +6217,19 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to update the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.

    If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.

    You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs.

    Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without AWS CloudFormation being notified.

    For more information, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

    If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.

    You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs.

    Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.

    For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.

" }, "ResourceTypes":{ "shape":"ResourceTypes", - "documentation":"

The template resource types that you have permissions to work with for this update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance.

If the list of resource types doesn't include a resource that you're updating, the stack update fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for AWS CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management.

" + "documentation":"

The template resource types that you have permissions to work with for this update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance.

If the list of resource types doesn't include a resource that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity and Access Management.

" }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to update the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation always uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.

If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to update the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. As long as users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.

If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials.

" }, "RollbackConfiguration":{ "shape":"RollbackConfiguration", - "documentation":"

The rollback triggers for AWS CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

" + "documentation":"

The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

" }, "StackPolicyBody":{ "shape":"StackPolicyBody", @@ -6163,15 +6241,15 @@ }, "NotificationARNs":{ "shape":"NotificationARNs", - "documentation":"

Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that AWS CloudFormation associates with the stack. Specify an empty list to remove all notification topics.

" + "documentation":"

Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that CloudFormation associates with the stack. Specify an empty list to remove all notification topics.

" }, "Tags":{ "shape":"Tags", - "documentation":"

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags.

If you don't specify this parameter, AWS CloudFormation doesn't modify the stack's tags. If you specify an empty value, AWS CloudFormation removes all associated tags.

" + "documentation":"

Key-value pairs to associate with this stack. CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags.

If you don't specify this parameter, CloudFormation doesn't modify the stack's tags. If you specify an empty value, CloudFormation removes all associated tags.

" }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to update a stack with the same name. You might retry UpdateStack requests to ensure that AWS CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" + "documentation":"

A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to update a stack with the same name. You might retry UpdateStack requests to ensure that CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" } }, "documentation":"

The input for an UpdateStack action.

" @@ -6189,11 +6267,11 @@ }, "Accounts":{ "shape":"AccountList", - "documentation":"

[Self-managed permissions] The names of one or more AWS accounts for which you want to update parameter values for stack instances. The overridden parameter values will be applied to all stack instances in the specified accounts and Regions.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Self-managed permissions] The names of one or more accounts for which you want to update parameter values for stack instances. The overridden parameter values will be applied to all stack instances in the specified accounts and Regions.

You can specify Accounts or DeploymentTargets, but not both.

" }, "DeploymentTargets":{ "shape":"DeploymentTargets", - "documentation":"

[Service-managed permissions] The AWS Organizations accounts for which you want to update parameter values for stack instances. If your update targets OUs, the overridden parameter values only apply to the accounts that are currently in the target OUs and their child OUs. Accounts added to the target OUs and their child OUs in the future won't use the overridden values.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Service-managed permissions] The Organizations accounts for which you want to update parameter values for stack instances. If your update targets OUs, the overridden parameter values only apply to the accounts that are currently in the target OUs and their child OUs. Accounts added to the target OUs and their child OUs in the future won't use the overridden values.

You can specify Accounts or DeploymentTargets, but not both.

" }, "Regions":{ "shape":"RegionList", @@ -6201,20 +6279,20 @@ }, "ParameterOverrides":{ "shape":"Parameters", - "documentation":"

A list of input parameters whose values you want to update for the specified stack instances.

Any overridden parameter values will be applied to all stack instances in the specified accounts and Regions. When specifying parameters and their values, be aware of how AWS CloudFormation sets parameter values during stack instance update operations:

  • To override the current value for a parameter, include the parameter and specify its value.

  • To leave a parameter set to its present value, you can do one of the following:

    • Do not include the parameter in the list.

    • Include the parameter and specify UsePreviousValue as true. (You cannot specify both a value and set UsePreviousValue to true.)

  • To set all overridden parameter back to the values specified in the stack set, specify a parameter list but do not include any parameters.

  • To leave all parameters set to their present values, do not specify this property at all.

During stack set updates, any parameter values overridden for a stack instance are not updated, but retain their overridden value.

You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances.

" + "documentation":"

A list of input parameters whose values you want to update for the specified stack instances.

Any overridden parameter values will be applied to all stack instances in the specified accounts and Regions. When specifying parameters and their values, be aware of how CloudFormation sets parameter values during stack instance update operations:

  • To override the current value for a parameter, include the parameter and specify its value.

  • To leave an overridden parameter set to its present value, include the parameter and specify UsePreviousValue as true. (You cannot specify both a value and set UsePreviousValue to true.)

  • To set an overridden parameter back to the value specified in the stack set, specify a parameter list but do not include the parameter in the list.

  • To leave all parameters set to their present values, do not specify this property at all.

During stack set updates, any parameter values overridden for a stack instance are not updated, but retain their overridden value.

You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances.

" }, "OperationPreferences":{ "shape":"StackSetOperationPreferences", - "documentation":"

Preferences for how AWS CloudFormation performs this stack set operation.

" + "documentation":"

Preferences for how CloudFormation performs this stack set operation.

" }, "OperationId":{ "shape":"ClientRequestToken", - "documentation":"

The unique identifier for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

", + "documentation":"

The unique identifier for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

", "idempotencyToken":true }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -6251,11 +6329,11 @@ }, "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

" + "documentation":"

The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

" }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

" + "documentation":"

The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, see Template Anatomy in the CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

" }, "UsePreviousTemplate":{ "shape":"UsePreviousTemplate", @@ -6267,52 +6345,52 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to update the stack set and its associated stack instances.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates.

    Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your account; for example, by creating new Identity and Access Management (IAM) users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.

    Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.

" }, "Tags":{ "shape":"Tags", - "documentation":"

The key-value pairs to associate with this stack set and the stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the stacks. You can specify a maximum number of 50 tags.

If you specify tags for this parameter, those tags replace any list of tags that are currently associated with this stack set. This means:

  • If you don't specify this parameter, AWS CloudFormation doesn't modify the stack's tags.

  • If you specify any tags using this parameter, you must specify all the tags that you want associated with this stack set, even tags you've specifed before (for example, when creating the stack set or during a previous update of the stack set.). Any tags that you don't include in the updated list of tags are removed from the stack set, and therefore from the stacks and resources as well.

  • If you specify an empty value, AWS CloudFormation removes all currently associated tags.

If you specify new tags as part of an UpdateStackSet action, AWS CloudFormation checks to see if you have the required IAM permission to tag resources. If you omit tags that are currently associated with the stack set from the list of tags you specify, AWS CloudFormation assumes that you want to remove those tags from the stack set, and checks to see if you have permission to untag resources. If you don't have the necessary permission(s), the entire UpdateStackSet action fails with an access denied error, and the stack set is not updated.

" + "documentation":"

The key-value pairs to associate with this stack set and the stacks created from it. CloudFormation also propagates these tags to supported resources that are created in the stacks. You can specify a maximum number of 50 tags.

If you specify tags for this parameter, those tags replace any list of tags that are currently associated with this stack set. This means:

  • If you don't specify this parameter, CloudFormation doesn't modify the stack's tags.

  • If you specify any tags using this parameter, you must specify all the tags that you want associated with this stack set, even tags you've specifed before (for example, when creating the stack set or during a previous update of the stack set.). Any tags that you don't include in the updated list of tags are removed from the stack set, and therefore from the stacks and resources as well.

  • If you specify an empty value, CloudFormation removes all currently associated tags.

If you specify new tags as part of an UpdateStackSet action, CloudFormation checks to see if you have the required IAM permission to tag resources. If you omit tags that are currently associated with the stack set from the list of tags you specify, CloudFormation assumes that you want to remove those tags from the stack set, and checks to see if you have permission to untag resources. If you don't have the necessary permission(s), the entire UpdateStackSet action fails with an access denied error, and the stack set is not updated.

" }, "OperationPreferences":{ "shape":"StackSetOperationPreferences", - "documentation":"

Preferences for how AWS CloudFormation performs this stack set operation.

" + "documentation":"

Preferences for how CloudFormation performs this stack set operation.

" }, "AdministrationRoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Number (ARN) of the IAM role to use to update this stack set.

Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Granting Permissions for Stack Set Operations in the AWS CloudFormation User Guide.

If you specified a customized administrator role when you created the stack set, you must specify a customized administrator role, even if it is the same customized administrator role used with this stack set previously.

" + "documentation":"

The Amazon Resource Number (ARN) of the IAM role to use to update this stack set.

Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Granting Permissions for Stack Set Operations in the CloudFormation User Guide.

If you specified a customized administrator role when you created the stack set, you must specify a customized administrator role, even if it is the same customized administrator role used with this stack set previously.

" }, "ExecutionRoleName":{ "shape":"ExecutionRoleName", - "documentation":"

The name of the IAM execution role to use to update the stack set. If you do not specify an execution role, AWS CloudFormation uses the AWSCloudFormationStackSetExecutionRole role for the stack set operation.

Specify an IAM role only if you are using customized execution roles to control which stack resources users and groups can include in their stack sets.

If you specify a customized execution role, AWS CloudFormation uses that role to update the stack. If you do not specify a customized execution role, AWS CloudFormation performs the update using the role previously associated with the stack set, so long as you have permissions to perform operations on the stack set.

" + "documentation":"

The name of the IAM execution role to use to update the stack set. If you do not specify an execution role, CloudFormation uses the AWSCloudFormationStackSetExecutionRole role for the stack set operation.

Specify an IAM role only if you are using customized execution roles to control which stack resources users and groups can include in their stack sets.

If you specify a customized execution role, CloudFormation uses that role to update the stack. If you do not specify a customized execution role, CloudFormation performs the update using the role previously associated with the stack set, so long as you have permissions to perform operations on the stack set.

" }, "DeploymentTargets":{ "shape":"DeploymentTargets", - "documentation":"

[Service-managed permissions] The AWS Organizations accounts in which to update associated stack instances.

To update all the stack instances associated with this stack set, do not specify DeploymentTargets or Regions.

If the stack set update includes changes to the template (that is, if TemplateBody or TemplateURL is specified), or the Parameters, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" + "documentation":"

[Service-managed permissions] The Organizations accounts in which to update associated stack instances.

To update all the stack instances associated with this stack set, do not specify DeploymentTargets or Regions.

If the stack set update includes changes to the template (that is, if TemplateBody or TemplateURL is specified), or the Parameters, CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" }, "PermissionModel":{ "shape":"PermissionModels", - "documentation":"

Describes how the IAM roles required for stack set operations are created. You cannot modify PermissionModel if there are stack instances associated with your stack set.

" + "documentation":"

Describes how the IAM roles required for stack set operations are created. You cannot modify PermissionModel if there are stack instances associated with your stack set.

" }, "AutoDeployment":{ "shape":"AutoDeployment", - "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).

If you specify AutoDeployment, do not specify DeploymentTargets or Regions.

" + "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to Organizations accounts that are added to a target organization or organizational unit (OU).

If you specify AutoDeployment, do not specify DeploymentTargets or Regions.

" }, "OperationId":{ "shape":"ClientRequestToken", - "documentation":"

The unique ID for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, AWS CloudFormation generates one automatically.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", + "documentation":"

The unique ID for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that CloudFormation successfully received them.

If you don't specify an operation ID, CloudFormation generates one automatically.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", "idempotencyToken":true }, "Accounts":{ "shape":"AccountList", - "documentation":"

[Self-managed permissions] The accounts in which to update associated stack instances. If you specify accounts, you must also specify the Regions in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" + "documentation":"

[Self-managed permissions] The accounts in which to update associated stack instances. If you specify accounts, you must also specify the Regions in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" }, "Regions":{ "shape":"RegionList", - "documentation":"

The Regions in which to update associated stack instances. If you specify Regions, you must also specify accounts in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" + "documentation":"

The Regions in which to update associated stack instances. If you specify Regions, you must also specify accounts in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" }, "CallAs":{ "shape":"CallAs", - "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your AWS account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the AWS CloudFormation User Guide.

" + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" } } }, @@ -6359,11 +6437,11 @@ "members":{ "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

" + "documentation":"

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

" }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

" + "documentation":"

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

" } }, "documentation":"

The input for ValidateTemplate action.

" @@ -6381,7 +6459,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

The capabilities found within the template. If your template contains IAM resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

" + "documentation":"

The capabilities found within the template. If your template contains IAM resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in CloudFormation Templates.

" }, "CapabilitiesReason":{ "shape":"CapabilitiesReason", @@ -6411,5 +6489,5 @@ ] } }, - "documentation":"AWS CloudFormation

AWS CloudFormation allows you to create and manage AWS infrastructure deployments predictably and repeatedly. You can use AWS CloudFormation to leverage AWS products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly-reliable, highly scalable, cost-effective applications without creating or configuring the underlying AWS infrastructure.

With AWS CloudFormation, you declare all of your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. AWS CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you.

For more information about AWS CloudFormation, see the AWS CloudFormation Product Page.

Amazon CloudFormation makes use of other AWS products. If you need additional technical information about a specific AWS product, you can find the product's technical documentation at docs.aws.amazon.com.

" + "documentation":"AWS CloudFormation

CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly-reliable, highly scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services infrastructure.

With CloudFormation, you declare all of your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you.

For more information about CloudFormation, see the CloudFormation Product Page.

CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com .

" } diff --git a/services/cloudfront/pom.xml b/services/cloudfront/pom.xml index b0d3c35cf56a..853ffa7c9e07 100644 --- a/services/cloudfront/pom.xml +++ b/services/cloudfront/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cloudfront AWS Java SDK :: Services :: Amazon CloudFront diff --git a/services/cloudhsm/pom.xml b/services/cloudhsm/pom.xml index 7d2190a98fc8..ea8e62409dfd 100644 --- a/services/cloudhsm/pom.xml +++ b/services/cloudhsm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cloudhsm AWS Java SDK :: Services :: AWS CloudHSM diff --git a/services/cloudhsmv2/pom.xml b/services/cloudhsmv2/pom.xml index 266cf045e2b2..a73989b81d42 100644 --- a/services/cloudhsmv2/pom.xml +++ b/services/cloudhsmv2/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 cloudhsmv2 diff --git a/services/cloudsearch/pom.xml b/services/cloudsearch/pom.xml index 4323515852a3..99faf241837e 100644 --- a/services/cloudsearch/pom.xml +++ b/services/cloudsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cloudsearch AWS Java SDK :: Services :: Amazon CloudSearch diff --git a/services/cloudsearchdomain/pom.xml b/services/cloudsearchdomain/pom.xml index 43fc4981a583..2a352324b07c 100644 --- a/services/cloudsearchdomain/pom.xml +++ b/services/cloudsearchdomain/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cloudsearchdomain AWS Java SDK :: Services :: Amazon CloudSearch Domain diff --git a/services/cloudtrail/pom.xml b/services/cloudtrail/pom.xml index ee62eb81cd05..fd0d86198e0e 100644 --- a/services/cloudtrail/pom.xml +++ b/services/cloudtrail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cloudtrail AWS Java SDK :: Services :: AWS CloudTrail diff --git a/services/cloudwatch/pom.xml b/services/cloudwatch/pom.xml index b87211117ec0..207a0b9cac23 100644 --- a/services/cloudwatch/pom.xml +++ b/services/cloudwatch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cloudwatch AWS Java SDK :: Services :: Amazon CloudWatch diff --git a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json index 8e50bd8dbb82..5fa8a4f5988c 100644 --- a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json @@ -297,7 +297,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Gets statistics for the specified metric.

The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. Data points are not returned in chronological order.

CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

  • The SampleCount value of the statistic set is 1.

  • The Min and the Max values of the statistic set are equal.

Percentile statistics are not available for metrics when any of the metric values are negative numbers.

Amazon CloudWatch retains metric data as follows:

  • Data points with a period of less than 60 seconds are available for 3 hours. These data points are high-resolution metrics and are available only for custom metrics that have been defined with a StorageResolution of 1.

  • Data points with a period of 60 seconds (1-minute) are available for 15 days.

  • Data points with a period of 300 seconds (5-minute) are available for 63 days.

  • Data points with a period of 3600 seconds (1 hour) are available for 455 days (15 months).

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.

For information about metrics and dimensions supported by AWS services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide.

" + "documentation":"

Gets statistics for the specified metric.

The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. Data points are not returned in chronological order.

CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

  • The SampleCount value of the statistic set is 1.

  • The Min and the Max values of the statistic set are equal.

Percentile statistics are not available for metrics when any of the metric values are negative numbers.

Amazon CloudWatch retains metric data as follows:

  • Data points with a period of less than 60 seconds are available for 3 hours. These data points are high-resolution metrics and are available only for custom metrics that have been defined with a StorageResolution of 1.

  • Data points with a period of 60 seconds (1-minute) are available for 15 days.

  • Data points with a period of 300 seconds (5-minute) are available for 63 days.

  • Data points with a period of 3600 seconds (1 hour) are available for 455 days (15 months).

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.

For information about metrics and dimensions supported by Amazon Web Services services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide.

" }, "GetMetricStream":{ "name":"GetMetricStream", @@ -479,7 +479,7 @@ "errors":[ {"shape":"LimitExceededFault"} ], - "documentation":"

Creates or updates an alarm and associates it with the specified metric, metric math expression, or anomaly detection model.

Alarms based on anomaly detection models cannot have Auto Scaling actions.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:

  • The iam:CreateServiceLinkedRole for all alarms with EC2 actions

  • The iam:CreateServiceLinkedRole to create an alarm with Systems Manager OpsItem actions.

The first time you create an alarm in the AWS Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked roles are called AWSServiceRoleForCloudWatchEvents and AWSServiceRoleForCloudWatchAlarms_ActionSSM. For more information, see AWS service-linked role.

" + "documentation":"

Creates or updates an alarm and associates it with the specified metric, metric math expression, or anomaly detection model.

Alarms based on anomaly detection models cannot have Auto Scaling actions.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:

  • The iam:CreateServiceLinkedRole for all alarms with EC2 actions

  • The iam:CreateServiceLinkedRole to create an alarm with Systems Manager OpsItem actions.

The first time you create an alarm in the Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked roles are called AWSServiceRoleForCloudWatchEvents and AWSServiceRoleForCloudWatchAlarms_ActionSSM. For more information, see Amazon Web Services service-linked role.

Cross-account alarms

You can set an alarm on metrics in the current account, or in another account. To create a cross-account alarm that watches a metric in a different account, you must have completed the following pre-requisites:

  • The account where the metrics are located (the sharing account) must already have a sharing role named CloudWatch-CrossAccountSharingRole. If it does not already have this role, you must create it using the instructions in Set up a sharing account in Cross-account cross-Region CloudWatch console. The policy for that role must grant access to the ID of the account where you are creating the alarm.

  • The account where you are creating the alarm (the monitoring account) must already have a service-linked role named AWSServiceRoleForCloudWatchCrossAccount to allow CloudWatch to assume the sharing role in the sharing account. If it does not, you must create it following the directions in Set up a monitoring account in Cross-account cross-Region CloudWatch console.

" }, "PutMetricData":{ "name":"PutMetricData", @@ -514,7 +514,7 @@ {"shape":"MissingRequiredParameterException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates or updates a metric stream. Metric streams can automatically stream CloudWatch metrics to AWS destinations including Amazon S3 and to many third-party solutions.

For more information, see Using Metric Streams.

To create a metric stream, you must be logged on to an account that has the iam:PassRole permission and either the CloudWatchFullAccess policy or the cloudwatch:PutMetricStream permission.

When you create or update a metric stream, you choose one of the following:

  • Stream metrics from all metric namespaces in the account.

  • Stream metrics from all metric namespaces in the account, except for the namespaces that you list in ExcludeFilters.

  • Stream metrics from only the metric namespaces that you list in IncludeFilters.

When you use PutMetricStream to create a new metric stream, the stream is created in the running state. If you use it to update an existing stream, the state of the stream is not changed.

" + "documentation":"

Creates or updates a metric stream. Metric streams can automatically stream CloudWatch metrics to Amazon Web Services destinations including Amazon S3 and to many third-party solutions.

For more information, see Using Metric Streams.

To create a metric stream, you must be logged on to an account that has the iam:PassRole permission and either the CloudWatchFullAccess policy or the cloudwatch:PutMetricStream permission.

When you create or update a metric stream, you choose one of the following:

  • Stream metrics from all metric namespaces in the account.

  • Stream metrics from all metric namespaces in the account, except for the namespaces that you list in ExcludeFilters.

  • Stream metrics from only the metric namespaces that you list in IncludeFilters.

When you use PutMetricStream to create a new metric stream, the stream is created in the running state. If you use it to update an existing stream, the state of the stream is not changed.

" }, "SetAlarmState":{ "name":"SetAlarmState", @@ -582,7 +582,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Assigns one or more tags (key-value pairs) to the specified CloudWatch resource. Currently, the only CloudWatch resources that can be tagged are alarms and Contributor Insights rules.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with an alarm that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a CloudWatch resource.

" + "documentation":"

Assigns one or more tags (key-value pairs) to the specified CloudWatch resource. Currently, the only CloudWatch resources that can be tagged are alarms and Contributor Insights rules.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

You can use the TagResource action with an alarm that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a CloudWatch resource.

" }, "UntagResource":{ "name":"UntagResource", @@ -605,6 +605,11 @@ } }, "shapes":{ + "AccountId":{ + "type":"string", + "max":255, + "min":1 + }, "ActionPrefix":{ "type":"string", "max":1024, @@ -1334,11 +1339,11 @@ "members":{ "Name":{ "shape":"DimensionName", - "documentation":"

The name of the dimension. Dimension names cannot contain blank spaces or non-ASCII characters.

" + "documentation":"

The name of the dimension. Dimension names must contain only ASCII characters and must include at least one non-whitespace character.

" }, "Value":{ "shape":"DimensionValue", - "documentation":"

The value of the dimension. Dimension values cannot contain blank spaces or non-ASCII characters.

" + "documentation":"

The value of the dimension. Dimension values must contain only ASCII characters and must include at least one non-whitespace character.

" } }, "documentation":"

A dimension is a name/value pair that is part of the identity of a metric. You can assign up to 10 dimensions to a metric. Because dimensions are part of the unique identifier for a metric, whenever you add a unique name/value pair to one of your metrics, you are creating a new variation of that metric.

", @@ -1747,7 +1752,7 @@ }, "OutputFormat":{ "shape":"OutputFormat", - "documentation":"

The format of the resulting image. Only PNG images are supported.

The default is png. If you specify png, the API returns an HTTP response with the content-type set to text/xml. The image data is in a MetricWidgetImage field. For example:

<GetMetricWidgetImageResponse xmlns=<URLstring>>

<GetMetricWidgetImageResult>

<MetricWidgetImage>

iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQEAYAAAAip...

</MetricWidgetImage>

</GetMetricWidgetImageResult>

<ResponseMetadata>

<RequestId>6f0d4192-4d42-11e8-82c1-f539a07e0e3b</RequestId>

</ResponseMetadata>

</GetMetricWidgetImageResponse>

The image/png setting is intended only for custom HTTP requests. For most use cases, and all actions using an AWS SDK, you should use png. If you specify image/png, the HTTP response has a content-type set to image/png, and the body of the response is a PNG image.

" + "documentation":"

The format of the resulting image. Only PNG images are supported.

The default is png. If you specify png, the API returns an HTTP response with the content-type set to text/xml. The image data is in a MetricWidgetImage field. For example:

<GetMetricWidgetImageResponse xmlns=<URLstring>>

<GetMetricWidgetImageResult>

<MetricWidgetImage>

iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQEAYAAAAip...

</MetricWidgetImage>

</GetMetricWidgetImageResult>

<ResponseMetadata>

<RequestId>6f0d4192-4d42-11e8-82c1-f539a07e0e3b</RequestId>

</ResponseMetadata>

</GetMetricWidgetImageResponse>

The image/png setting is intended only for custom HTTP requests. For most use cases, and all actions using an Amazon Web Services SDK, you should use png. If you specify image/png, the HTTP response has a content-type set to image/png, and the body of the response is a PNG image.

" } } }, @@ -2221,7 +2226,7 @@ "documentation":"

The message text.

" } }, - "documentation":"

A message returned by the GetMetricDataAPI, including a code and a description.

" + "documentation":"

A message returned by the GetMetricDataAPI, including a code and a description.

If a cross-Region GetMetricData operation fails with a code of Forbidden and a value of Authentication too complex to retrieve cross region data, you can correct the problem by running the GetMetricData operation in the same Region where the metric data is.

" }, "MessageDataCode":{"type":"string"}, "MessageDataValue":{"type":"string"}, @@ -2430,6 +2435,10 @@ "Period":{ "shape":"Period", "documentation":"

The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData operation that includes a StorageResolution of 1 second.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The ID of the account where the metrics are located, if this is a cross-account alarm.

Use this field only for PutMetricAlarm operations. It is not used in GetMetricData operations.

" } }, "documentation":"

This structure is used in both GetMetricData and PutMetricAlarm. The supported use of this structure is different for those two operations.

When used in GetMetricData, it indicates the metric data to return, and whether this call is just retrieving a batch set of data for one metric, or is performing a math expression on metric data. A single GetMetricData call can include up to 500 MetricDataQuery structures.

When used in PutMetricAlarm, it enables you to create an alarm based on a metric math expression. Each MetricDataQuery in the array specifies either a metric to retrieve, or a math expression to be performed on retrieved metrics. A single PutMetricAlarm call can include up to 20 MetricDataQuery structures in the array. The 20 structures can include as many as 10 structures that contain a MetricStat parameter to retrieve a metric, and as many as 10 structures that contain the Expression parameter to perform a math expression. Of those Expression structures, one must have True as the value for ReturnData. The result of this expression is the value the alarm watches.

Any expression used in a PutMetricAlarm operation must return a single time series. For more information, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Some of the parameters of this structure also have different uses whether you are using this structure in a GetMetricData operation or a PutMetricAlarm operation. These differences are explained in the following parameter list.

" @@ -2844,7 +2853,7 @@ }, "AlarmActions":{ "shape":"ResourceList", - "documentation":"

The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:automate:region:ec2:reboot | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name | arn:aws:ssm:region:account-id:opsitem:severity

Valid Values (for use with IAM roles): arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

" + "documentation":"

The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:automate:region:ec2:reboot | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name | arn:aws:ssm:region:account-id:opsitem:severity | arn:aws:ssm-incidents::account-id:response-plan:response-plan-name

Valid Values (for use with IAM roles): arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Recover/1.0

" }, "InsufficientDataActions":{ "shape":"ResourceList", @@ -2925,7 +2934,7 @@ "members":{ "Namespace":{ "shape":"Namespace", - "documentation":"

The namespace for the metric data.

To avoid conflicts with AWS service namespaces, you should not specify a namespace that begins with AWS/

" + "documentation":"

The namespace for the metric data.

To avoid conflicts with Amazon Web Services service namespaces, you should not specify a namespace that begins with AWS/

" }, "MetricData":{ "shape":"MetricData", @@ -2968,7 +2977,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

A list of key-value pairs to associate with the metric stream. You can associate as many as 50 tags with a metric stream.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

" + "documentation":"

A list of key-value pairs to associate with the metric stream. You can associate as many as 50 tags with a metric stream.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

You can use this parameter only when you are creating a new metric stream. If you are using this operation to update an existing metric stream, any tags you specify in this parameter are ignored. To change the tags of an existing metric stream, use TagResource or UntagResource.

" } } }, @@ -3317,5 +3326,5 @@ "member":{"shape":"DatapointValue"} } }, - "documentation":"

Amazon CloudWatch monitors your Amazon Web Services (AWS) resources and the applications you run on AWS in real time. You can use CloudWatch to collect and track metrics, which are the variables you want to measure for your resources and applications.

CloudWatch alarms send notifications or automatically change the resources you are monitoring based on rules that you define. For example, you can monitor the CPU usage and disk reads and writes of your Amazon EC2 instances. Then, use this data to determine whether you should launch additional instances to handle increased load. You can also use this data to stop under-used instances to save money.

In addition to monitoring the built-in metrics that come with AWS, you can monitor your own custom metrics. With CloudWatch, you gain system-wide visibility into resource utilization, application performance, and operational health.

" + "documentation":"

Amazon CloudWatch monitors your Amazon Web Services (Amazon Web Services) resources and the applications you run on Amazon Web Services in real time. You can use CloudWatch to collect and track metrics, which are the variables you want to measure for your resources and applications.

CloudWatch alarms send notifications or automatically change the resources you are monitoring based on rules that you define. For example, you can monitor the CPU usage and disk reads and writes of your Amazon EC2 instances. Then, use this data to determine whether you should launch additional instances to handle increased load. You can also use this data to stop under-used instances to save money.

In addition to monitoring the built-in metrics that come with Amazon Web Services, you can monitor your own custom metrics. With CloudWatch, you gain system-wide visibility into resource utilization, application performance, and operational health.

" } diff --git a/services/cloudwatchevents/pom.xml b/services/cloudwatchevents/pom.xml index 86a3890f3b50..4bca16e10eca 100644 --- a/services/cloudwatchevents/pom.xml +++ b/services/cloudwatchevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cloudwatchevents AWS Java SDK :: Services :: Amazon CloudWatch Events diff --git a/services/cloudwatchlogs/pom.xml b/services/cloudwatchlogs/pom.xml index 9f48d8b7b53c..caa16b064b71 100644 --- a/services/cloudwatchlogs/pom.xml +++ b/services/cloudwatchlogs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cloudwatchlogs AWS Java SDK :: Services :: Amazon CloudWatch Logs diff --git a/services/cloudwatchlogs/src/it/java/software/amazon/awssdk/services/cloudwatchlogs/ServiceIntegrationTest.java b/services/cloudwatchlogs/src/it/java/software/amazon/awssdk/services/cloudwatchlogs/ServiceIntegrationTest.java index e63d1aefdf4f..673114705d5b 100644 --- a/services/cloudwatchlogs/src/it/java/software/amazon/awssdk/services/cloudwatchlogs/ServiceIntegrationTest.java +++ b/services/cloudwatchlogs/src/it/java/software/amazon/awssdk/services/cloudwatchlogs/ServiceIntegrationTest.java @@ -162,53 +162,6 @@ public void tearDown() { } } - /** - * Test uploading and retrieving log events. - */ - @Test - public void testEventsLogging() { - // No log event is expected in the newly created log stream - GetLogEventsResponse getResult = awsLogs.getLogEvents(GetLogEventsRequest.builder().logGroupName(logGroupName).logStreamName(logStreamName).build()); - Assert.assertTrue(getResult.events().isEmpty()); - - // Insert a new log event - PutLogEventsRequest request = PutLogEventsRequest.builder() - .logGroupName(logGroupName) - .logStreamName(logStreamName) - .logEvents(InputLogEvent.builder() - .message(LOG_MESSAGE) - .timestamp(LOG_MESSAGE_TIMESTAMP) - .build()) - .build(); - PutLogEventsResponse putResult = awsLogs.putLogEvents(request); - - Assert.assertNotNull(putResult.nextSequenceToken()); - - // The new log event is not instantly available in GetLogEvents operation. - try { - Thread.sleep(5000); - } catch (InterruptedException ignored) { - // Ignored or expected. - } - - // Pull the event from the log stream - getResult = awsLogs.getLogEvents(GetLogEventsRequest.builder().logGroupName(logGroupName).logStreamName(logStreamName).build()); - Assert.assertEquals(1, getResult.events().size()); - Assert.assertNotNull(getResult.nextBackwardToken()); - Assert.assertNotNull(getResult.nextForwardToken()); - - OutputLogEvent event = getResult.events().get(0); - Assert.assertEquals(LOG_MESSAGE, event.message()); - Assert.assertEquals(LOG_MESSAGE_TIMESTAMP, event.timestamp().longValue()); - - // Use DescribeLogStreams API to verify that the new log event has - // updated the following parameters of the log stream. - final LogStream stream = findLogStreamByName(awsLogs, logGroupName, logStreamName); - Assert.assertEquals(LOG_MESSAGE_TIMESTAMP, stream.firstEventTimestamp().longValue()); - Assert.assertEquals(LOG_MESSAGE_TIMESTAMP, stream.lastEventTimestamp().longValue()); - Assert.assertNotNull(stream.lastIngestionTime()); - } - /** * Use the TestMetricFilter API to verify the correctness of the metric filter pattern we have * been using in this integration test. diff --git a/services/codeartifact/pom.xml b/services/codeartifact/pom.xml index f6f9f864d813..6fc14138ff2f 100644 --- a/services/codeartifact/pom.xml +++ b/services/codeartifact/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT codeartifact AWS Java SDK :: Services :: Codeartifact diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index 4c82cb83ec68..a541944fd0e3 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT codebuild AWS Java SDK :: Services :: AWS Code Build diff --git a/services/codebuild/src/main/resources/codegen-resources/service-2.json b/services/codebuild/src/main/resources/codegen-resources/service-2.json index 036da44deac8..e94e574acd22 100644 --- a/services/codebuild/src/main/resources/codegen-resources/service-2.json +++ b/services/codebuild/src/main/resources/codegen-resources/service-2.json @@ -134,7 +134,7 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

For an existing CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.

If you enable webhooks for an CodeBuild project, and the project is used as a build step in CodePipeline, then two identical builds are created for each commit. One build is triggered through webhooks, and one through CodePipeline. Because billing is on a per-build basis, you are billed for both builds. Therefore, if you are using CodePipeline, we recommend that you disable webhooks in CodeBuild. In the CodeBuild console, clear the Webhook box. For more information, see step 5 in Change a Build Project's Settings.

" + "documentation":"

For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.

If you enable webhooks for an AWS CodeBuild project, and the project is used as a build step in AWS CodePipeline, then two identical builds are created for each commit. One build is triggered through webhooks, and one through AWS CodePipeline. Because billing is on a per-build basis, you are billed for both builds. Therefore, if you are using AWS CodePipeline, we recommend that you disable webhooks in AWS CodeBuild. In the AWS CodeBuild console, clear the Webhook box. For more information, see step 5 in Change a Build Project's Settings.

" }, "DeleteBuildBatch":{ "name":"DeleteBuildBatch", @@ -228,7 +228,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"OAuthProviderException"} ], - "documentation":"

For an existing CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops CodeBuild from rebuilding the source code every time a code change is pushed to the repository.

" + "documentation":"

For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops AWS CodeBuild from rebuilding the source code every time a code change is pushed to the repository.

" }, "DescribeCodeCoverages":{ "name":"DescribeCodeCoverages", @@ -298,7 +298,7 @@ {"shape":"AccountLimitExceededException"}, {"shape":"ResourceAlreadyExistsException"} ], - "documentation":"

Imports the source repository credentials for an CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.

" + "documentation":"

Imports the source repository credentials for an AWS CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.

" }, "InvalidateProjectCache":{ "name":"InvalidateProjectCache", @@ -376,7 +376,7 @@ }, "input":{"shape":"ListCuratedEnvironmentImagesInput"}, "output":{"shape":"ListCuratedEnvironmentImagesOutput"}, - "documentation":"

Gets information about Docker images that are managed by CodeBuild.

" + "documentation":"

Gets information about Docker images that are managed by AWS CodeBuild.

" }, "ListProjects":{ "name":"ListProjects", @@ -402,7 +402,7 @@ "errors":[ {"shape":"InvalidInputException"} ], - "documentation":"

Gets a list ARNs for the report groups in the current Amazon Web Services account.

" + "documentation":"

Gets a list ARNs for the report groups in the current AWS account.

" }, "ListReports":{ "name":"ListReports", @@ -415,7 +415,7 @@ "errors":[ {"shape":"InvalidInputException"} ], - "documentation":"

Returns a list of ARNs for the reports in the current Amazon Web Services account.

" + "documentation":"

Returns a list of ARNs for the reports in the current AWS account.

" }, "ListReportsForReportGroup":{ "name":"ListReportsForReportGroup", @@ -442,7 +442,7 @@ "errors":[ {"shape":"InvalidInputException"} ], - "documentation":"

Gets a list of projects that are shared with other Amazon Web Services accounts or users.

" + "documentation":"

Gets a list of projects that are shared with other AWS accounts or users.

" }, "ListSharedReportGroups":{ "name":"ListSharedReportGroups", @@ -455,7 +455,7 @@ "errors":[ {"shape":"InvalidInputException"} ], - "documentation":"

Gets a list of report groups that are shared with other Amazon Web Services accounts or users.

" + "documentation":"

Gets a list of report groups that are shared with other AWS accounts or users.

" }, "ListSourceCredentials":{ "name":"ListSourceCredentials", @@ -611,7 +611,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"OAuthProviderException"} ], - "documentation":"

Updates the webhook associated with an CodeBuild build project.

If you use Bitbucket for your repository, rotateSecret is ignored.

" + "documentation":"

Updates the webhook associated with an AWS CodeBuild build project.

If you use Bitbucket for your repository, rotateSecret is ignored.

" } }, "shapes":{ @@ -619,7 +619,7 @@ "type":"structure", "members":{ }, - "documentation":"

An Amazon Web Services service limit was exceeded for the calling Amazon Web Services account.

", + "documentation":"

An AWS service limit was exceeded for the calling AWS account.

", "exception":true }, "ArtifactNamespace":{ @@ -727,7 +727,7 @@ "members":{ "names":{ "shape":"ProjectNames", - "documentation":"

The names or ARNs of the build projects. To get information about a project shared with your Amazon Web Services account, its ARN must be specified. You cannot specify a shared project using its name.

" + "documentation":"

The names or ARNs of the build projects. To get information about a project shared with your AWS account, its ARN must be specified. You cannot specify a shared project using its name.

" } } }, @@ -799,12 +799,21 @@ }, "computeTypesAllowed":{ "shape":"ComputeTypesAllowed", - "documentation":"

An array of strings that specify the compute types that are allowed for the batch build. See Build environment compute types in the CodeBuild User Guide for these values.

" + "documentation":"

An array of strings that specify the compute types that are allowed for the batch build. See Build environment compute types in the AWS CodeBuild User Guide for these values.

" } }, "documentation":"

Specifies restrictions for the batch build.

" }, "Boolean":{"type":"boolean"}, + "BucketOwnerAccess":{ + "type":"string", + "documentation":"

Specifies the access for objects that are uploaded to an Amazon S3 bucket that is owned by another account.

By default, only the account that uploads the objects to the bucket has access to these objects. This property allows you to give the bucket owner access to these objects.

NONE

The bucket owner does not have access to the objects. This is the default.

READ_ONLY

The bucket owner has read only access to the objects. The uploading account retains ownership of the objects.

FULL

The bucket owner has full access to the objects. Object ownership is determined by the following criteria:

  • If the bucket is configured with the Bucket owner preferred setting, the bucket owner owns the objects. The uploading account will have object access as specified by the bucket's policy.

  • Otherwise, the uploading account retains ownership of the objects.

For more information about Amazon S3 object ownership, see Controlling ownership of uploaded objects using S3 Object Ownership in the Amazon Simple Storage Service User Guide.

", + "enum":[ + "NONE", + "READ_ONLY", + "FULL" + ] + }, "Build":{ "type":"structure", "members":{ @@ -838,15 +847,15 @@ }, "sourceVersion":{ "shape":"NonEmptyString", - "documentation":"

Any version identifier for the version of the source code to be built. If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" + "documentation":"

Any version identifier for the version of the source code to be built. If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" }, "resolvedSourceVersion":{ "shape":"NonEmptyString", - "documentation":"

An identifier for the version of this build's source code.

  • For CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID.

  • For CodePipeline, the source revision provided by CodePipeline.

  • For Amazon S3, this does not apply.

" + "documentation":"

An identifier for the version of this build's source code.

  • For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID.

  • For AWS CodePipeline, the source revision provided by AWS CodePipeline.

  • For Amazon S3, this does not apply.

" }, "projectName":{ "shape":"NonEmptyString", - "documentation":"

The name of the CodeBuild project.

" + "documentation":"

The name of the AWS CodeBuild project.

" }, "phases":{ "shape":"BuildPhases", @@ -862,7 +871,7 @@ }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", - "documentation":"

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

" + "documentation":"

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

  • For AWS CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

" }, "artifacts":{ "shape":"BuildArtifacts", @@ -886,11 +895,11 @@ }, "logs":{ "shape":"LogsLocation", - "documentation":"

Information about the build's logs in CloudWatch Logs.

" + "documentation":"

Information about the build's logs in Amazon CloudWatch Logs.

" }, "timeoutInMinutes":{ "shape":"WrapperInt", - "documentation":"

How long, in minutes, for CodeBuild to wait before timing out this build if it does not get marked as completed.

" + "documentation":"

How long, in minutes, for AWS CodeBuild to wait before timing out this build if it does not get marked as completed.

" }, "queuedTimeoutInMinutes":{ "shape":"WrapperInt", @@ -902,11 +911,11 @@ }, "initiator":{ "shape":"String", - "documentation":"

The entity that started the build. Valid values include:

  • If CodePipeline started the build, the pipeline's name (for example, codepipeline/my-demo-pipeline).

  • If an Identity and Access Management user started the build, the user's name (for example, MyUserName).

  • If the Jenkins plugin for CodeBuild started the build, the string CodeBuild-Jenkins-Plugin.

" + "documentation":"

The entity that started the build. Valid values include:

  • If AWS CodePipeline started the build, the pipeline's name (for example, codepipeline/my-demo-pipeline).

  • If an AWS Identity and Access Management (IAM) user started the build, the user's name (for example, MyUserName).

  • If the Jenkins plugin for AWS CodeBuild started the build, the string CodeBuild-Jenkins-Plugin.

" }, "vpcConfig":{ "shape":"VpcConfig", - "documentation":"

If your CodeBuild project accesses resources in an Amazon VPC, you provide this parameter that identifies the VPC ID and the list of security group IDs and subnet IDs. The security groups and subnets must belong to the same VPC. You must provide at least one security group and one subnet ID.

" + "documentation":"

If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide this parameter that identifies the VPC ID and the list of security group IDs and subnet IDs. The security groups and subnets must belong to the same VPC. You must provide at least one security group and one subnet ID.

" }, "networkInterface":{ "shape":"NetworkInterface", @@ -914,11 +923,11 @@ }, "encryptionKey":{ "shape":"NonEmptyString", - "documentation":"

The Key Management Service customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" }, "exportedEnvironmentVariables":{ "shape":"ExportedEnvironmentVariables", - "documentation":"

A list of exported environment variables for this build.

Exported environment variables are used in conjunction with CodePipeline to export environment variables from the current build stage to subsequent stages in the pipeline. For more information, see Working with variables in the CodePipeline User Guide.

" + "documentation":"

A list of exported environment variables for this build.

Exported environment variables are used in conjunction with AWS CodePipeline to export environment variables from the current build stage to subsequent stages in the pipeline. For more information, see Working with variables in the AWS CodePipeline User Guide.

" }, "reportArns":{ "shape":"BuildReportArns", @@ -965,7 +974,8 @@ "artifactIdentifier":{ "shape":"String", "documentation":"

An identifier for this artifact definition.

" - } + }, + "bucketOwnerAccess":{"shape":"BucketOwnerAccess"} }, "documentation":"

Information about build output artifacts.

" }, @@ -1008,7 +1018,7 @@ }, "resolvedSourceVersion":{ "shape":"NonEmptyString", - "documentation":"

The identifier of the resolved version of this batch build's source code.

  • For CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID.

  • For CodePipeline, the source revision provided by CodePipeline.

  • For Amazon S3, this does not apply.

" + "documentation":"

The identifier of the resolved version of this batch build's source code.

  • For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID.

  • For AWS CodePipeline, the source revision provided by AWS CodePipeline.

  • For Amazon S3, this does not apply.

" }, "projectName":{ "shape":"NonEmptyString", @@ -1025,7 +1035,7 @@ }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", - "documentation":"

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

" + "documentation":"

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

  • For AWS CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

" }, "artifacts":{ "shape":"BuildArtifacts", @@ -1056,12 +1066,12 @@ }, "initiator":{ "shape":"String", - "documentation":"

The entity that started the batch build. Valid values include:

  • If CodePipeline started the build, the pipeline's name (for example, codepipeline/my-demo-pipeline).

  • If an Identity and Access Management user started the build, the user's name.

  • If the Jenkins plugin for CodeBuild started the build, the string CodeBuild-Jenkins-Plugin.

" + "documentation":"

The entity that started the batch build. Valid values include:

  • If AWS CodePipeline started the build, the pipeline's name (for example, codepipeline/my-demo-pipeline).

  • If an AWS Identity and Access Management (IAM) user started the build, the user's name.

  • If the Jenkins plugin for AWS CodeBuild started the build, the string CodeBuild-Jenkins-Plugin.

" }, "vpcConfig":{"shape":"VpcConfig"}, "encryptionKey":{ "shape":"NonEmptyString", - "documentation":"

The Key Management Service customer master key (CMK) to be used for encrypting the batch build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the batch build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" }, "buildBatchNumber":{ "shape":"WrapperLong", @@ -1267,7 +1277,7 @@ "documentation":"

Specifies the target url of the build status CodeBuild sends to the source provider. The usage of this parameter depends on the source provider.

Bitbucket

This parameter is used for the url parameter in the Bitbucket commit status. For more information, see build in the Bitbucket API documentation.

GitHub/GitHub Enterprise Server

This parameter is used for the target_url parameter in the GitHub commit status. For more information, see Create a commit status in the GitHub developer guide.

" } }, - "documentation":"

Contains information that defines how the CodeBuild build project reports the build status to the source provider.

" + "documentation":"

Contains information that defines how the AWS CodeBuild build project reports the build status to the source provider.

" }, "BuildSummaries":{ "type":"list", @@ -1329,18 +1339,18 @@ "members":{ "status":{ "shape":"LogsConfigStatusType", - "documentation":"

The current status of the logs in CloudWatch Logs for a build project. Valid values are:

  • ENABLED: CloudWatch Logs are enabled for this build project.

  • DISABLED: CloudWatch Logs are not enabled for this build project.

" + "documentation":"

The current status of the logs in Amazon CloudWatch Logs for a build project. Valid values are:

  • ENABLED: Amazon CloudWatch Logs are enabled for this build project.

  • DISABLED: Amazon CloudWatch Logs are not enabled for this build project.

" }, "groupName":{ "shape":"String", - "documentation":"

The group name of the logs in CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.

" + "documentation":"

The group name of the logs in Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.

" }, "streamName":{ "shape":"String", - "documentation":"

The prefix of the stream name of the CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.

" + "documentation":"

The prefix of the stream name of the Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.

" } }, - "documentation":"

Information about CloudWatch Logs for a build project.

" + "documentation":"

Information about Amazon CloudWatch Logs for a build project.

" }, "CodeCoverage":{ "type":"structure", @@ -1463,7 +1473,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" + "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For AWS CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", @@ -1487,11 +1497,11 @@ }, "serviceRole":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the Identity and Access Management role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.

" + "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.

" }, "timeoutInMinutes":{ "shape":"TimeOut", - "documentation":"

How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before it times out any build that has not been marked as completed. The default is 60 minutes.

" + "documentation":"

How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before it times out any build that has not been marked as completed. The default is 60 minutes.

" }, "queuedTimeoutInMinutes":{ "shape":"TimeOut", @@ -1499,15 +1509,15 @@ }, "encryptionKey":{ "shape":"NonEmptyString", - "documentation":"

The Key Management Service customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" }, "tags":{ "shape":"TagList", - "documentation":"

A list of tag key and value pairs associated with this build project.

These tags are available for use by Amazon Web Services services that support CodeBuild build project tags.

" + "documentation":"

A list of tag key and value pairs associated with this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

" }, "vpcConfig":{ "shape":"VpcConfig", - "documentation":"

VpcConfig enables CodeBuild to access resources in an Amazon VPC.

" + "documentation":"

VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC.

" }, "badgeEnabled":{ "shape":"WrapperBoolean", @@ -1515,7 +1525,7 @@ }, "logsConfig":{ "shape":"LogsConfig", - "documentation":"

Information about logs for the build project. These can be logs in CloudWatch Logs, logs uploaded to a specified S3 bucket, or both.

" + "documentation":"

Information about logs for the build project. These can be logs in Amazon CloudWatch Logs, logs uploaded to a specified S3 bucket, or both.

" }, "fileSystemLocations":{ "shape":"ProjectFileSystemLocations", @@ -1562,7 +1572,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

A list of tag key and value pairs associated with this report group.

These tags are available for use by Amazon Web Services services that support CodeBuild report group tags.

" + "documentation":"

A list of tag key and value pairs associated with this report group.

These tags are available for use by AWS services that support AWS CodeBuild report group tags.

" } } }, @@ -1581,7 +1591,7 @@ "members":{ "projectName":{ "shape":"ProjectName", - "documentation":"

The name of the CodeBuild project.

" + "documentation":"

The name of the AWS CodeBuild project.

" }, "branchFilter":{ "shape":"String", @@ -1602,7 +1612,7 @@ "members":{ "webhook":{ "shape":"Webhook", - "documentation":"

Information about a webhook that connects repository events to a build project in CodeBuild.

" + "documentation":"

Information about a webhook that connects repository events to a build project in AWS CodeBuild.

" } } }, @@ -1740,7 +1750,7 @@ "members":{ "projectName":{ "shape":"ProjectName", - "documentation":"

The name of the CodeBuild project.

" + "documentation":"

The name of the AWS CodeBuild project.

" } } }, @@ -1847,7 +1857,7 @@ "documentation":"

A list of environment image versions.

" } }, - "documentation":"

Information about a Docker image that is managed by CodeBuild.

" + "documentation":"

Information about a Docker image that is managed by AWS CodeBuild.

" }, "EnvironmentImages":{ "type":"list", @@ -1865,7 +1875,7 @@ "documentation":"

The list of Docker images that are related by the specified programming language.

" } }, - "documentation":"

A set of Docker images that are related by programming language and are managed by CodeBuild.

" + "documentation":"

A set of Docker images that are related by programming language and are managed by AWS CodeBuild.

" }, "EnvironmentLanguages":{ "type":"list", @@ -1883,7 +1893,7 @@ "documentation":"

The list of programming languages that are available for the specified platform.

" } }, - "documentation":"

A set of Docker images that are related by platform and are managed by CodeBuild.

" + "documentation":"

A set of Docker images that are related by platform and are managed by AWS CodeBuild.

" }, "EnvironmentPlatforms":{ "type":"list", @@ -1912,11 +1922,11 @@ }, "value":{ "shape":"String", - "documentation":"

The value of the environment variable.

We strongly discourage the use of PLAINTEXT environment variables to store sensitive values, especially Amazon Web Services secret key IDs and secret access keys. PLAINTEXT environment variables can be displayed in plain text using the CodeBuild console and the AWS Command Line Interface (AWS CLI). For sensitive values, we recommend you use an environment variable of type PARAMETER_STORE or SECRETS_MANAGER.

" + "documentation":"

The value of the environment variable.

We strongly discourage the use of PLAINTEXT environment variables to store sensitive values, especially AWS secret key IDs and secret access keys. PLAINTEXT environment variables can be displayed in plain text using the AWS CodeBuild console and the AWS Command Line Interface (AWS CLI). For sensitive values, we recommend you use an environment variable of type PARAMETER_STORE or SECRETS_MANAGER.

" }, "type":{ "shape":"EnvironmentVariableType", - "documentation":"

The type of environment variable. Valid values include:

  • PARAMETER_STORE: An environment variable stored in Systems Manager Parameter Store. To learn how to specify a parameter store environment variable, see env/parameter-store in the CodeBuild User Guide.

  • PLAINTEXT: An environment variable in plain text format. This is the default value.

  • SECRETS_MANAGER: An environment variable stored in Secrets Manager. To learn how to specify a secrets manager environment variable, see env/secrets-manager in the CodeBuild User Guide.

" + "documentation":"

The type of environment variable. Valid values include:

  • PARAMETER_STORE: An environment variable stored in Amazon EC2 Systems Manager Parameter Store. To learn how to specify a parameter store environment variable, see env/parameter-store in the AWS CodeBuild User Guide.

  • PLAINTEXT: An environment variable in plain text format. This is the default value.

  • SECRETS_MANAGER: An environment variable stored in AWS Secrets Manager. To learn how to specify a secrets manager environment variable, see env/secrets-manager in the AWS CodeBuild User Guide.

" } }, "documentation":"

Information about an environment variable for a build project or a build.

" @@ -1945,7 +1955,7 @@ "documentation":"

The value assigned to the exported environment variable.

" } }, - "documentation":"

Contains information about an exported environment variable.

Exported environment variables are used in conjunction with CodePipeline to export environment variables from the current build stage to subsequent stages in the pipeline. For more information, see Working with variables in the CodePipeline User Guide.

During a build, the value of a variable is available starting with the install phase. It can be updated between the start of the install phase and the end of the post_build phase. After the post_build phase ends, the value of exported variables cannot change.

" + "documentation":"

Contains information about an exported environment variable.

Exported environment variables are used in conjunction with AWS CodePipeline to export environment variables from the current build stage to subsequent stages in the pipeline. For more information, see Working with variables in the AWS CodePipeline User Guide.

During a build, the value of a variable is available starting with the install phase. It can be updated between the start of the install phase and the end of the post_build phase. After the post_build phase ends, the value of exported variables cannot change.

" }, "ExportedEnvironmentVariables":{ "type":"list", @@ -2026,10 +2036,10 @@ "members":{ "fetchSubmodules":{ "shape":"WrapperBoolean", - "documentation":"

Set to true to fetch Git submodules for your CodeBuild build project.

" + "documentation":"

Set to true to fetch Git submodules for your AWS CodeBuild build project.

" } }, - "documentation":"

Information about the Git submodules configuration for an CodeBuild build project.

" + "documentation":"

Information about the Git submodules configuration for an AWS CodeBuild build project.

" }, "Identifiers":{ "type":"list", @@ -2068,7 +2078,7 @@ }, "authType":{ "shape":"AuthType", - "documentation":"

The type of authentication used to connect to a GitHub, GitHub Enterprise, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the CodeBuild console.

" + "documentation":"

The type of authentication used to connect to a GitHub, GitHub Enterprise, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the AWS CodeBuild console.

" }, "shouldOverwrite":{ "shape":"WrapperBoolean", @@ -2098,7 +2108,7 @@ "members":{ "projectName":{ "shape":"NonEmptyString", - "documentation":"

The name of the CodeBuild build project that the cache is reset for.

" + "documentation":"

The name of the AWS CodeBuild build project that the cache is reset for.

" } } }, @@ -2206,7 +2216,7 @@ "members":{ "projectName":{ "shape":"NonEmptyString", - "documentation":"

The name of the CodeBuild project.

" + "documentation":"

The name of the AWS CodeBuild project.

" }, "sortOrder":{ "shape":"SortOrderType", @@ -2267,7 +2277,7 @@ "members":{ "platforms":{ "shape":"EnvironmentPlatforms", - "documentation":"

Information about supported platforms for Docker images that are managed by CodeBuild.

" + "documentation":"

Information about supported platforms for Docker images that are managed by AWS CodeBuild.

" } } }, @@ -2331,7 +2341,7 @@ }, "reportGroups":{ "shape":"ReportGroupArns", - "documentation":"

The list of ARNs for the report groups in the current Amazon Web Services account.

" + "documentation":"

The list of ARNs for the report groups in the current AWS account.

" } } }, @@ -2404,7 +2414,7 @@ }, "reports":{ "shape":"ReportArns", - "documentation":"

The list of returned ARNs for the reports in the current Amazon Web Services account.

" + "documentation":"

The list of returned ARNs for the reports in the current AWS account.

" } } }, @@ -2413,7 +2423,7 @@ "members":{ "sortBy":{ "shape":"SharedResourceSortByType", - "documentation":"

The criterion to be used to list build projects shared with the current Amazon Web Services account or user. Valid values include:

  • ARN: List based on the ARN.

  • MODIFIED_TIME: List based on when information about the shared project was last changed.

" + "documentation":"

The criterion to be used to list build projects shared with the current AWS account or user. Valid values include:

  • ARN: List based on the ARN.

  • MODIFIED_TIME: List based on when information about the shared project was last changed.

" }, "sortOrder":{ "shape":"SortOrderType", @@ -2438,7 +2448,7 @@ }, "projects":{ "shape":"ProjectArns", - "documentation":"

The list of ARNs for the build projects shared with the current Amazon Web Services account or user.

" + "documentation":"

The list of ARNs for the build projects shared with the current AWS account or user.

" } } }, @@ -2451,7 +2461,7 @@ }, "sortBy":{ "shape":"SharedResourceSortByType", - "documentation":"

The criterion to be used to list report groups shared with the current Amazon Web Services account or user. Valid values include:

  • ARN: List based on the ARN.

  • MODIFIED_TIME: List based on when information about the shared report group was last changed.

" + "documentation":"

The criterion to be used to list report groups shared with the current AWS account or user. Valid values include:

  • ARN: List based on the ARN.

  • MODIFIED_TIME: List based on when information about the shared report group was last changed.

" }, "nextToken":{ "shape":"String", @@ -2472,7 +2482,7 @@ }, "reportGroups":{ "shape":"ReportGroupArns", - "documentation":"

The list of ARNs for the report groups shared with the current Amazon Web Services account or user.

" + "documentation":"

The list of ARNs for the report groups shared with the current AWS account or user.

" } } }, @@ -2495,14 +2505,14 @@ "members":{ "cloudWatchLogs":{ "shape":"CloudWatchLogsConfig", - "documentation":"

Information about CloudWatch Logs for a build project. CloudWatch Logs are enabled by default.

" + "documentation":"

Information about Amazon CloudWatch Logs for a build project. Amazon CloudWatch Logs are enabled by default.

" }, "s3Logs":{ "shape":"S3LogsConfig", "documentation":"

Information about logs built to an S3 bucket for a build project. S3 logs are not enabled by default.

" } }, - "documentation":"

Information about logs for a build project. These can be logs in CloudWatch Logs, built in a specified S3 bucket, or both.

" + "documentation":"

Information about logs for a build project. These can be logs in Amazon CloudWatch Logs, built in a specified S3 bucket, or both.

" }, "LogsConfigStatusType":{ "type":"string", @@ -2516,15 +2526,15 @@ "members":{ "groupName":{ "shape":"String", - "documentation":"

The name of the CloudWatch Logs group for the build logs.

" + "documentation":"

The name of the Amazon CloudWatch Logs group for the build logs.

" }, "streamName":{ "shape":"String", - "documentation":"

The name of the CloudWatch Logs stream for the build logs.

" + "documentation":"

The name of the Amazon CloudWatch Logs stream for the build logs.

" }, "deepLink":{ "shape":"String", - "documentation":"

The URL to an individual build log in CloudWatch Logs.

" + "documentation":"

The URL to an individual build log in Amazon CloudWatch Logs.

" }, "s3DeepLink":{ "shape":"String", @@ -2532,7 +2542,7 @@ }, "cloudWatchLogsArn":{ "shape":"String", - "documentation":"

The ARN of CloudWatch Logs for a build project. Its format is arn:${Partition}:logs:${Region}:${Account}:log-group:${LogGroupName}:log-stream:${LogStreamName}. For more information, see Resources Defined by CloudWatch Logs.

" + "documentation":"

The ARN of Amazon CloudWatch Logs for a build project. Its format is arn:${Partition}:logs:${Region}:${Account}:log-group:${LogGroupName}:log-stream:${LogStreamName}. For more information, see Resources Defined by Amazon CloudWatch Logs.

" }, "s3LogsArn":{ "shape":"String", @@ -2540,14 +2550,14 @@ }, "cloudWatchLogs":{ "shape":"CloudWatchLogsConfig", - "documentation":"

Information about CloudWatch Logs for a build project.

" + "documentation":"

Information about Amazon CloudWatch Logs for a build project.

" }, "s3Logs":{ "shape":"S3LogsConfig", "documentation":"

Information about S3 logs for a build project.

" } }, - "documentation":"

Information about build logs in CloudWatch Logs.

" + "documentation":"

Information about build logs in Amazon CloudWatch Logs.

" }, "NetworkInterface":{ "type":"structure", @@ -2640,7 +2650,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" + "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For AWS CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", @@ -2664,11 +2674,11 @@ }, "serviceRole":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the Identity and Access Management role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.

" + "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.

" }, "timeoutInMinutes":{ "shape":"TimeOut", - "documentation":"

How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed. The default is 60 minutes.

" + "documentation":"

How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before timing out any related build that did not get marked as completed. The default is 60 minutes.

" }, "queuedTimeoutInMinutes":{ "shape":"TimeOut", @@ -2676,11 +2686,11 @@ }, "encryptionKey":{ "shape":"NonEmptyString", - "documentation":"

The Key Management Service customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>). If you don't specify a value, CodeBuild uses the managed CMK for Amazon Simple Storage Service (Amazon S3).

" + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" }, "tags":{ "shape":"TagList", - "documentation":"

A list of tag key and value pairs associated with this build project.

These tags are available for use by Amazon Web Services services that support CodeBuild build project tags.

" + "documentation":"

A list of tag key and value pairs associated with this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

" }, "created":{ "shape":"Timestamp", @@ -2692,11 +2702,11 @@ }, "webhook":{ "shape":"Webhook", - "documentation":"

Information about a webhook that connects repository events to a build project in CodeBuild.

" + "documentation":"

Information about a webhook that connects repository events to a build project in AWS CodeBuild.

" }, "vpcConfig":{ "shape":"VpcConfig", - "documentation":"

Information about the VPC configuration that CodeBuild accesses.

" + "documentation":"

Information about the VPC configuration that AWS CodeBuild accesses.

" }, "badge":{ "shape":"ProjectBadge", @@ -2704,7 +2714,7 @@ }, "logsConfig":{ "shape":"LogsConfig", - "documentation":"

Information about logs for the build project. A project can create logs in CloudWatch Logs, an S3 bucket, or both.

" + "documentation":"

Information about logs for the build project. A project can create logs in Amazon CloudWatch Logs, an S3 bucket, or both.

" }, "fileSystemLocations":{ "shape":"ProjectFileSystemLocations", @@ -2733,27 +2743,27 @@ "members":{ "type":{ "shape":"ArtifactsType", - "documentation":"

The type of build output artifact. Valid values include:

  • CODEPIPELINE: The build project has build output generated through CodePipeline.

    The CODEPIPELINE type is not supported for secondaryArtifacts.

  • NO_ARTIFACTS: The build project does not produce any build output.

  • S3: The build project stores build output in Amazon S3.

" + "documentation":"

The type of build output artifact. Valid values include:

  • CODEPIPELINE: The build project has build output generated through AWS CodePipeline.

    The CODEPIPELINE type is not supported for secondaryArtifacts.

  • NO_ARTIFACTS: The build project does not produce any build output.

  • S3: The build project stores build output in Amazon S3.

" }, "location":{ "shape":"String", - "documentation":"

Information about the build output artifact location:

  • If type is set to CODEPIPELINE, CodePipeline ignores this value if specified. This is because CodePipeline manages its build output locations instead of CodeBuild.

  • If type is set to NO_ARTIFACTS, this value is ignored if specified, because no build output is produced.

  • If type is set to S3, this is the name of the output bucket.

" + "documentation":"

Information about the build output artifact location:

  • If type is set to CODEPIPELINE, AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output locations instead of AWS CodeBuild.

  • If type is set to NO_ARTIFACTS, this value is ignored if specified, because no build output is produced.

  • If type is set to S3, this is the name of the output bucket.

" }, "path":{ "shape":"String", - "documentation":"

Along with namespaceType and name, the pattern that CodeBuild uses to name and store the output artifact:

  • If type is set to CODEPIPELINE, CodePipeline ignores this value if specified. This is because CodePipeline manages its build output names instead of CodeBuild.

  • If type is set to NO_ARTIFACTS, this value is ignored if specified, because no build output is produced.

  • If type is set to S3, this is the path to the output artifact. If path is not specified, path is not used.

For example, if path is set to MyArtifacts, namespaceType is set to NONE, and name is set to MyArtifact.zip, the output artifact is stored in the output bucket at MyArtifacts/MyArtifact.zip.

" + "documentation":"

Along with namespaceType and name, the pattern that AWS CodeBuild uses to name and store the output artifact:

  • If type is set to CODEPIPELINE, AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.

  • If type is set to NO_ARTIFACTS, this value is ignored if specified, because no build output is produced.

  • If type is set to S3, this is the path to the output artifact. If path is not specified, path is not used.

For example, if path is set to MyArtifacts, namespaceType is set to NONE, and name is set to MyArtifact.zip, the output artifact is stored in the output bucket at MyArtifacts/MyArtifact.zip.

" }, "namespaceType":{ "shape":"ArtifactNamespace", - "documentation":"

Along with path and name, the pattern that CodeBuild uses to determine the name and location to store the output artifact:

  • If type is set to CODEPIPELINE, CodePipeline ignores this value if specified. This is because CodePipeline manages its build output names instead of CodeBuild.

  • If type is set to NO_ARTIFACTS, this value is ignored if specified, because no build output is produced.

  • If type is set to S3, valid values include:

    • BUILD_ID: Include the build ID in the location of the build output artifact.

    • NONE: Do not include the build ID. This is the default if namespaceType is not specified.

For example, if path is set to MyArtifacts, namespaceType is set to BUILD_ID, and name is set to MyArtifact.zip, the output artifact is stored in MyArtifacts/<build-ID>/MyArtifact.zip.

" + "documentation":"

Along with path and name, the pattern that AWS CodeBuild uses to determine the name and location to store the output artifact:

  • If type is set to CODEPIPELINE, AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.

  • If type is set to NO_ARTIFACTS, this value is ignored if specified, because no build output is produced.

  • If type is set to S3, valid values include:

    • BUILD_ID: Include the build ID in the location of the build output artifact.

    • NONE: Do not include the build ID. This is the default if namespaceType is not specified.

For example, if path is set to MyArtifacts, namespaceType is set to BUILD_ID, and name is set to MyArtifact.zip, the output artifact is stored in MyArtifacts/<build-ID>/MyArtifact.zip.

" }, "name":{ "shape":"String", - "documentation":"

Along with path and namespaceType, the pattern that CodeBuild uses to name and store the output artifact:

  • If type is set to CODEPIPELINE, CodePipeline ignores this value if specified. This is because CodePipeline manages its build output names instead of CodeBuild.

  • If type is set to NO_ARTIFACTS, this value is ignored if specified, because no build output is produced.

  • If type is set to S3, this is the name of the output artifact object. If you set the name to be a forward slash (\"/\"), the artifact is stored in the root of the output bucket.

For example:

  • If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and name is set to MyArtifact.zip, then the output artifact is stored in MyArtifacts/<build-ID>/MyArtifact.zip.

  • If path is empty, namespaceType is set to NONE, and name is set to \"/\", the output artifact is stored in the root of the output bucket.

  • If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and name is set to \"/\", the output artifact is stored in MyArtifacts/<build-ID>.

" + "documentation":"

Along with path and namespaceType, the pattern that AWS CodeBuild uses to name and store the output artifact:

  • If type is set to CODEPIPELINE, AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.

  • If type is set to NO_ARTIFACTS, this value is ignored if specified, because no build output is produced.

  • If type is set to S3, this is the name of the output artifact object. If you set the name to be a forward slash (\"/\"), the artifact is stored in the root of the output bucket.

For example:

  • If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and name is set to MyArtifact.zip, then the output artifact is stored in MyArtifacts/<build-ID>/MyArtifact.zip.

  • If path is empty, namespaceType is set to NONE, and name is set to \"/\", the output artifact is stored in the root of the output bucket.

  • If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and name is set to \"/\", the output artifact is stored in MyArtifacts/<build-ID>.

" }, "packaging":{ "shape":"ArtifactPackaging", - "documentation":"

The type of build output artifact to create:

  • If type is set to CODEPIPELINE, CodePipeline ignores this value if specified. This is because CodePipeline manages its build output artifacts instead of CodeBuild.

  • If type is set to NO_ARTIFACTS, this value is ignored if specified, because no build output is produced.

  • If type is set to S3, valid values include:

    • NONE: CodeBuild creates in the output bucket a folder that contains the build output. This is the default if packaging is not specified.

    • ZIP: CodeBuild creates in the output bucket a ZIP file that contains the build output.

" + "documentation":"

The type of build output artifact to create:

  • If type is set to CODEPIPELINE, AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output artifacts instead of AWS CodeBuild.

  • If type is set to NO_ARTIFACTS, this value is ignored if specified, because no build output is produced.

  • If type is set to S3, valid values include:

    • NONE: AWS CodeBuild creates in the output bucket a folder that contains the build output. This is the default if packaging is not specified.

    • ZIP: AWS CodeBuild creates in the output bucket a ZIP file that contains the build output.

" }, "overrideArtifactName":{ "shape":"WrapperBoolean", @@ -2766,7 +2776,8 @@ "artifactIdentifier":{ "shape":"String", "documentation":"

An identifier for this artifact definition.

" - } + }, + "bucketOwnerAccess":{"shape":"BucketOwnerAccess"} }, "documentation":"

Information about the build output artifacts for the build project.

" }, @@ -2850,15 +2861,15 @@ "members":{ "type":{ "shape":"EnvironmentType", - "documentation":"

The type of build environment to use for related builds.

  • The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and EU (Frankfurt).

  • The environment type LINUX_CONTAINER with compute type build.general1.2xlarge is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China (Beijing), and China (Ningxia).

  • The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) , China (Beijing), and China (Ningxia).

  • The environment types WINDOWS_CONTAINER and WINDOWS_SERVER_2019_CONTAINER are available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), and EU (Ireland).

For more information, see Build environment compute types in the CodeBuild user guide.

" + "documentation":"

The type of build environment to use for related builds.

  • The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and EU (Frankfurt).

  • The environment type LINUX_CONTAINER with compute type build.general1.2xlarge is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China (Beijing), and China (Ningxia).

  • The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) , China (Beijing), and China (Ningxia).

" }, "image":{ "shape":"NonEmptyString", - "documentation":"

The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:

  • For an image tag: <registry>/<repository>:<tag>. For example, in the Docker repository that CodeBuild uses to manage its Docker images, this would be aws/codebuild/standard:4.0.

  • For an image digest: <registry>/<repository>@<digest>. For example, to specify an image with the digest \"sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf,\" use <registry>/<repository>@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf.

For more information, see Docker images provided by CodeBuild in the CodeBuild user guide.

" + "documentation":"

The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:

  • For an image tag: <registry>/<repository>:<tag>. For example, in the Docker repository that CodeBuild uses to manage its Docker images, this would be aws/codebuild/standard:4.0.

  • For an image digest: <registry>/<repository>@<digest>. For example, to specify an image with the digest \"sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf,\" use <registry>/<repository>@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf.

" }, "computeType":{ "shape":"ComputeType", - "documentation":"

Information about the compute resources the build project uses. Available values include:

  • BUILD_GENERAL1_SMALL: Use up to 3 GB memory and 2 vCPUs for builds.

  • BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for builds.

  • BUILD_GENERAL1_LARGE: Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.

  • BUILD_GENERAL1_2XLARGE: Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.

If you use BUILD_GENERAL1_LARGE:

  • For environment type LINUX_CONTAINER, you can use up to 15 GB memory and 8 vCPUs for builds.

  • For environment type LINUX_GPU_CONTAINER, you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.

  • For environment type ARM_CONTAINER, you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.

For more information, see Build Environment Compute Types in the CodeBuild User Guide.

" + "documentation":"

Information about the compute resources the build project uses. Available values include:

  • BUILD_GENERAL1_SMALL: Use up to 3 GB memory and 2 vCPUs for builds.

  • BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for builds.

  • BUILD_GENERAL1_LARGE: Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.

  • BUILD_GENERAL1_2XLARGE: Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.

If you use BUILD_GENERAL1_LARGE:

  • For environment type LINUX_CONTAINER, you can use up to 15 GB memory and 8 vCPUs for builds.

  • For environment type LINUX_GPU_CONTAINER, you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.

  • For environment type ARM_CONTAINER, you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.

For more information, see Build Environment Compute Types in the AWS CodeBuild User Guide.

" }, "environmentVariables":{ "shape":"EnvironmentVariables", @@ -2870,7 +2881,7 @@ }, "certificate":{ "shape":"String", - "documentation":"

The ARN of the Amazon S3 bucket, path prefix, and object key that contains the PEM-encoded certificate for the build project. For more information, see certificate in the CodeBuild User Guide.

" + "documentation":"

The ARN of the Amazon S3 bucket, path prefix, and object key that contains the PEM-encoded certificate for the build project. For more information, see certificate in the AWS CodeBuild User Guide.

" }, "registryCredential":{ "shape":"RegistryCredential", @@ -2878,7 +2889,7 @@ }, "imagePullCredentialsType":{ "shape":"ImagePullCredentialsType", - "documentation":"

The type of credentials CodeBuild uses to pull images in your build. There are two valid values:

  • CODEBUILD specifies that CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust CodeBuild service principal.

  • SERVICE_ROLE specifies that CodeBuild uses your build project's service role.

When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an CodeBuild curated image, you must use CODEBUILD credentials.

" + "documentation":"

The type of credentials AWS CodeBuild uses to pull images in your build. There are two valid values:

  • CODEBUILD specifies that AWS CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust AWS CodeBuild's service principal.

  • SERVICE_ROLE specifies that AWS CodeBuild uses your build project's service role.

When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD credentials.

" } }, "documentation":"

Information about the build environment of the build project.

" @@ -2892,7 +2903,7 @@ }, "location":{ "shape":"String", - "documentation":"

A string that specifies the location of the file system created by Amazon EFS. Its format is efs-dns-name:/directory-path. You can find the DNS name of file system when you view it in the Amazon EFS console. The directory path is a path to a directory in the file system that CodeBuild mounts. For example, if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com, and its mount directory is my-efs-mount-directory, then the location is fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory.

The directory path in the format efs-dns-name:/directory-path is optional. If you do not specify a directory path, the location is only the DNS name and CodeBuild mounts the entire file system.

" + "documentation":"

A string that specifies the location of the file system created by Amazon EFS. Its format is efs-dns-name:/directory-path. You can find the DNS name of file system when you view it in the AWS EFS console. The directory path is a path to a directory in the file system that CodeBuild mounts. For example, if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com, and its mount directory is my-efs-mount-directory, then the location is fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory.

The directory path in the format efs-dns-name:/directory-path is optional. If you do not specify a directory path, the location is only the DNS name and CodeBuild mounts the entire file system.

" }, "mountPoint":{ "shape":"String", @@ -2904,7 +2915,7 @@ }, "mountOptions":{ "shape":"String", - "documentation":"

The mount options for a file system created by Amazon EFS. The default mount options used by CodeBuild are nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2. For more information, see Recommended NFS Mount Options.

" + "documentation":"

The mount options for a file system created by AWS EFS. The default mount options used by CodeBuild are nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2. For more information, see Recommended NFS Mount Options.

" } }, "documentation":"

Information about a file system created by Amazon Elastic File System (EFS). For more information, see What Is Amazon Elastic File System?

" @@ -2945,11 +2956,11 @@ "members":{ "type":{ "shape":"SourceType", - "documentation":"

The type of repository that contains the source code to be built. Valid values include:

  • BITBUCKET: The source code is in a Bitbucket repository.

  • CODECOMMIT: The source code is in an CodeCommit repository.

  • CODEPIPELINE: The source code settings are specified in the source action of a pipeline in CodePipeline.

  • GITHUB: The source code is in a GitHub or GitHub Enterprise Cloud repository.

  • GITHUB_ENTERPRISE: The source code is in a GitHub Enterprise Server repository.

  • NO_SOURCE: The project does not have input source code.

  • S3: The source code is in an Amazon S3 bucket.

" + "documentation":"

The type of repository that contains the source code to be built. Valid values include:

  • BITBUCKET: The source code is in a Bitbucket repository.

  • CODECOMMIT: The source code is in an AWS CodeCommit repository.

  • CODEPIPELINE: The source code settings are specified in the source action of a pipeline in AWS CodePipeline.

  • GITHUB: The source code is in a GitHub or GitHub Enterprise Cloud repository.

  • GITHUB_ENTERPRISE: The source code is in a GitHub Enterprise Server repository.

  • NO_SOURCE: The project does not have input source code.

  • S3: The source code is in an Amazon S3 bucket.

" }, "location":{ "shape":"String", - "documentation":"

Information about the location of the source code to be built. Valid values include:

  • For source code settings that are specified in the source action of a pipeline in CodePipeline, location should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.

  • For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.<region-ID>.amazonaws.com/v1/repos/<repo-name>).

  • For source code in an Amazon S3 input bucket, one of the following.

    • The path to the ZIP file that contains the source code (for example, <bucket-name>/<path>/<object-name>.zip).

    • The path to the folder that contains the source code (for example, <bucket-name>/<path-to-source-code>/<folder>/).

  • For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your account to your GitHub account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH.

  • For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your Bitbucket account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH.

If you specify CODEPIPELINE for the Type property, don't specify this property. For all of the other types, you must specify Location.

" + "documentation":"

Information about the location of the source code to be built. Valid values include:

  • For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.

  • For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.<region-ID>.amazonaws.com/v1/repos/<repo-name>).

  • For source code in an Amazon S3 input bucket, one of the following.

    • The path to the ZIP file that contains the source code (for example, <bucket-name>/<path>/<object-name>.zip).

    • The path to the folder that contains the source code (for example, <bucket-name>/<path-to-source-code>/<folder>/).

  • For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow AWS CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH.

  • For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH.

" }, "gitCloneDepth":{ "shape":"GitCloneDepth", @@ -2961,15 +2972,15 @@ }, "buildspec":{ "shape":"String", - "documentation":"

The buildspec file declaration to use for the builds in this build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

" + "documentation":"

The buildspec file declaration to use for the builds in this build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same AWS Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

" }, "auth":{ "shape":"SourceAuth", - "documentation":"

Information about the authorization settings for CodeBuild to access the source code to be built.

This information is for the CodeBuild console's use only. Your code should not get or set this information directly.

" + "documentation":"

Information about the authorization settings for AWS CodeBuild to access the source code to be built.

This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly.

" }, "reportBuildStatus":{ "shape":"WrapperBoolean", - "documentation":"

Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.

To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.

The status of a build triggered by a webhook is always reported to your source provider.

" + "documentation":"

Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.

To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the AWS CodeBuild User Guide.

The status of a build triggered by a webhook is always reported to your source provider.

" }, "buildStatusConfig":{ "shape":"BuildStatusConfig", @@ -2999,7 +3010,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

The source version for the corresponding source identifier. If specified, must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" + "documentation":"

The source version for the corresponding source identifier. If specified, must be one of:

  • For AWS CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" } }, "documentation":"

A source identifier and its corresponding version.

" @@ -3023,7 +3034,7 @@ "members":{ "policy":{ "shape":"NonEmptyString", - "documentation":"

A JSON-formatted resource policy. For more information, see Sharing a Project and Sharing a Report Group in the CodeBuild User Guide.

" + "documentation":"

A JSON-formatted resource policy. For more information, see Sharing a Project and Sharing a Report Group in the AWS CodeBuild User Guide.

" }, "resourceArn":{ "shape":"NonEmptyString", @@ -3049,14 +3060,14 @@ "members":{ "credential":{ "shape":"NonEmptyString", - "documentation":"

The Amazon Resource Name (ARN) or name of credentials created using Secrets Manager.

The credential can use the name of the credentials only if they exist in your current Region.

" + "documentation":"

The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets Manager.

The credential can use the name of the credentials only if they exist in your current AWS Region.

" }, "credentialProvider":{ "shape":"CredentialProviderType", - "documentation":"

The service that created the credentials to access a private Docker registry. The valid value, SECRETS_MANAGER, is for Secrets Manager.

" + "documentation":"

The service that created the credentials to access a private Docker registry. The valid value, SECRETS_MANAGER, is for AWS Secrets Manager.

" } }, - "documentation":"

Information about credentials that provide access to a private Docker registry. When this is set:

  • imagePullCredentialsType must be set to SERVICE_ROLE.

  • images cannot be curated or an Amazon ECR image.

For more information, see Private Registry with Secrets Manager Sample for CodeBuild.

" + "documentation":"

Information about credentials that provide access to a private Docker registry. When this is set:

  • imagePullCredentialsType must be set to SERVICE_ROLE.

  • images cannot be curated or an Amazon ECR image.

For more information, see Private Registry with AWS Secrets Manager Sample for AWS CodeBuild.

" }, "Report":{ "type":"structure", @@ -3185,7 +3196,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

A list of tag key and value pairs associated with this report group.

These tags are available for use by Amazon Web Services services that support CodeBuild report group tags.

" + "documentation":"

A list of tag key and value pairs associated with this report group.

These tags are available for use by AWS services that support AWS CodeBuild report group tags.

" }, "status":{ "shape":"ReportGroupStatusType", @@ -3337,14 +3348,14 @@ "type":"structure", "members":{ }, - "documentation":"

The specified Amazon Web Services resource cannot be created, because an Amazon Web Services resource with the same settings already exists.

", + "documentation":"

The specified AWS resource cannot be created, because an AWS resource with the same settings already exists.

", "exception":true }, "ResourceNotFoundException":{ "type":"structure", "members":{ }, - "documentation":"

The specified Amazon Web Services resource cannot be found.

", + "documentation":"

The specified AWS resource cannot be found.

", "exception":true }, "RetryBuildBatchInput":{ @@ -3356,7 +3367,7 @@ }, "idempotencyToken":{ "shape":"String", - "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuildBatch request. The token is included in the RetryBuildBatch request and is valid for five minutes. If you repeat the RetryBuildBatch request with the same token, but change a parameter, CodeBuild returns a parameter mismatch error.

" + "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuildBatch request. The token is included in the RetryBuildBatch request and is valid for five minutes. If you repeat the RetryBuildBatch request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

" }, "retryType":{ "shape":"RetryBuildBatchType", @@ -3386,7 +3397,7 @@ }, "idempotencyToken":{ "shape":"String", - "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuild request. The token is included in the RetryBuild request and is valid for five minutes. If you repeat the RetryBuild request with the same token, but change a parameter, CodeBuild returns a parameter mismatch error.

" + "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuild request. The token is included in the RetryBuild request and is valid for five minutes. If you repeat the RetryBuild request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

" } } }, @@ -3411,7 +3422,8 @@ "encryptionDisabled":{ "shape":"WrapperBoolean", "documentation":"

Set to true if you do not want your S3 build log output encrypted. By default S3 build logs are encrypted.

" - } + }, + "bucketOwnerAccess":{"shape":"BucketOwnerAccess"} }, "documentation":"

Information about S3 logs for a build project.

" }, @@ -3424,7 +3436,7 @@ }, "bucketOwner":{ "shape":"String", - "documentation":"

The Amazon Web Services account identifier of the owner of the Amazon S3 bucket. This allows report data to be exported to an Amazon S3 bucket that is owned by an account other than the account running the build.

" + "documentation":"

The AWS account identifier of the owner of the Amazon S3 bucket. This allows report data to be exported to an Amazon S3 bucket that is owned by an account other than the account running the build.

" }, "path":{ "shape":"String", @@ -3432,7 +3444,7 @@ }, "packaging":{ "shape":"ReportPackagingType", - "documentation":"

The type of build output artifact to create. Valid values include:

  • NONE: CodeBuild creates the raw data in the output bucket. This is the default if packaging is not specified.

  • ZIP: CodeBuild creates a ZIP file with the raw data in the output bucket.

" + "documentation":"

The type of build output artifact to create. Valid values include:

  • NONE: AWS CodeBuild creates the raw data in the output bucket. This is the default if packaging is not specified.

  • ZIP: AWS CodeBuild creates a ZIP file with the raw data in the output bucket.

" }, "encryptionKey":{ "shape":"NonEmptyString", @@ -3490,7 +3502,7 @@ "documentation":"

The resource value that applies to the specified authorization type.

" } }, - "documentation":"

Information about the authorization settings for CodeBuild to access the source code to be built.

This information is for the CodeBuild console's use only. Your code should not get or set this information directly.

" + "documentation":"

Information about the authorization settings for AWS CodeBuild to access the source code to be built.

This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly.

" }, "SourceAuthType":{ "type":"string", @@ -3548,7 +3560,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

The version of the batch build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon S3

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" + "documentation":"

The version of the batch build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

AWS CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon S3

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" }, "artifactsOverride":{ "shape":"ProjectArtifacts", @@ -3584,7 +3596,7 @@ }, "buildspecOverride":{ "shape":"String", - "documentation":"

A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

" + "documentation":"

A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same AWS Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

" }, "insecureSslOverride":{ "shape":"WrapperBoolean", @@ -3632,11 +3644,11 @@ }, "encryptionKeyOverride":{ "shape":"NonEmptyString", - "documentation":"

The Key Management Service customer master key (CMK) that overrides the one specified in the batch build project. The CMK key encrypts the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) that overrides the one specified in the batch build project. The CMK key encrypts the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" }, "idempotencyToken":{ "shape":"String", - "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuildBatch request. The token is included in the StartBuildBatch request and is valid for five minutes. If you repeat the StartBuildBatch request with the same token, but change a parameter, CodeBuild returns a parameter mismatch error.

" + "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuildBatch request. The token is included in the StartBuildBatch request and is valid for five minutes. If you repeat the StartBuildBatch request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

" }, "logsConfigOverride":{ "shape":"LogsConfig", @@ -3648,7 +3660,7 @@ }, "imagePullCredentialsTypeOverride":{ "shape":"ImagePullCredentialsType", - "documentation":"

The type of credentials CodeBuild uses to pull images in your batch build. There are two valid values:

CODEBUILD

Specifies that CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust CodeBuild's service principal.

SERVICE_ROLE

Specifies that CodeBuild uses your build project's service role.

When using a cross-account or private registry image, you must use SERVICE_ROLE credentials. When using an CodeBuild curated image, you must use CODEBUILD credentials.

" + "documentation":"

The type of credentials AWS CodeBuild uses to pull images in your batch build. There are two valid values:

CODEBUILD

Specifies that AWS CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust AWS CodeBuild's service principal.

SERVICE_ROLE

Specifies that AWS CodeBuild uses your build project's service role.

When using a cross-account or private registry image, you must use SERVICE_ROLE credentials. When using an AWS CodeBuild curated image, you must use CODEBUILD credentials.

" }, "buildBatchConfigOverride":{ "shape":"ProjectBuildBatchConfig", @@ -3675,7 +3687,7 @@ "members":{ "projectName":{ "shape":"NonEmptyString", - "documentation":"

The name of the CodeBuild build project to start running a build.

" + "documentation":"

The name of the AWS CodeBuild build project to start running a build.

" }, "secondarySourcesOverride":{ "shape":"ProjectSources", @@ -3687,7 +3699,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

The version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon S3

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" + "documentation":"

The version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

AWS CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon S3

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" }, "artifactsOverride":{ "shape":"ProjectArtifacts", @@ -3719,11 +3731,11 @@ }, "gitSubmodulesConfigOverride":{ "shape":"GitSubmodulesConfig", - "documentation":"

Information about the Git submodules configuration for this build of an CodeBuild build project.

" + "documentation":"

Information about the Git submodules configuration for this build of an AWS CodeBuild build project.

" }, "buildspecOverride":{ "shape":"String", - "documentation":"

A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

" + "documentation":"

A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same AWS Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

" }, "insecureSslOverride":{ "shape":"WrapperBoolean", @@ -3731,7 +3743,7 @@ }, "reportBuildStatusOverride":{ "shape":"WrapperBoolean", - "documentation":"

Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException is thrown.

To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.

The status of a build triggered by a webhook is always reported to your source provider.

" + "documentation":"

Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException is thrown.

To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the AWS CodeBuild User Guide.

The status of a build triggered by a webhook is always reported to your source provider.

" }, "buildStatusConfigOverride":{ "shape":"BuildStatusConfig", @@ -3775,11 +3787,11 @@ }, "encryptionKeyOverride":{ "shape":"NonEmptyString", - "documentation":"

The Key Management Service customer master key (CMK) that overrides the one specified in the build project. The CMK key encrypts the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) that overrides the one specified in the build project. The CMK key encrypts the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" }, "idempotencyToken":{ "shape":"String", - "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuild request. The token is included in the StartBuild request and is valid for 5 minutes. If you repeat the StartBuild request with the same token, but change a parameter, CodeBuild returns a parameter mismatch error.

" + "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuild request. The token is included in the StartBuild request and is valid for 5 minutes. If you repeat the StartBuild request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

" }, "logsConfigOverride":{ "shape":"LogsConfig", @@ -3791,7 +3803,7 @@ }, "imagePullCredentialsTypeOverride":{ "shape":"ImagePullCredentialsType", - "documentation":"

The type of credentials CodeBuild uses to pull images in your build. There are two valid values:

CODEBUILD

Specifies that CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust CodeBuild's service principal.

SERVICE_ROLE

Specifies that CodeBuild uses your build project's service role.

When using a cross-account or private registry image, you must use SERVICE_ROLE credentials. When using an CodeBuild curated image, you must use CODEBUILD credentials.

" + "documentation":"

The type of credentials AWS CodeBuild uses to pull images in your build. There are two valid values:

CODEBUILD

Specifies that AWS CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust AWS CodeBuild's service principal.

SERVICE_ROLE

Specifies that AWS CodeBuild uses your build project's service role.

When using a cross-account or private registry image, you must use SERVICE_ROLE credentials. When using an AWS CodeBuild curated image, you must use CODEBUILD credentials.

" }, "debugSessionEnabled":{ "shape":"WrapperBoolean", @@ -3872,7 +3884,7 @@ "documentation":"

The tag's value.

" } }, - "documentation":"

A tag, consisting of a key and a value.

This tag is available for use by Amazon Web Services services that support tags in CodeBuild.

" + "documentation":"

A tag, consisting of a key and a value.

This tag is available for use by AWS services that support tags in AWS CodeBuild.

" }, "TagList":{ "type":"list", @@ -3987,7 +3999,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

" + "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

  • For AWS CodeCommit: the commit ID, branch, or Git tag to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

  • For Amazon S3: the version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", @@ -4011,11 +4023,11 @@ }, "serviceRole":{ "shape":"NonEmptyString", - "documentation":"

The replacement ARN of the Identity and Access Management role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.

" + "documentation":"

The replacement ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.

" }, "timeoutInMinutes":{ "shape":"TimeOut", - "documentation":"

The replacement value in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed.

" + "documentation":"

The replacement value in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before timing out any related build that did not get marked as completed.

" }, "queuedTimeoutInMinutes":{ "shape":"TimeOut", @@ -4023,15 +4035,15 @@ }, "encryptionKey":{ "shape":"NonEmptyString", - "documentation":"

The Key Management Service customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" }, "tags":{ "shape":"TagList", - "documentation":"

An updated list of tag key and value pairs associated with this build project.

These tags are available for use by Amazon Web Services services that support CodeBuild build project tags.

" + "documentation":"

An updated list of tag key and value pairs associated with this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

" }, "vpcConfig":{ "shape":"VpcConfig", - "documentation":"

VpcConfig enables CodeBuild to access resources in an Amazon VPC.

" + "documentation":"

VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC.

" }, "badgeEnabled":{ "shape":"WrapperBoolean", @@ -4039,7 +4051,7 @@ }, "logsConfig":{ "shape":"LogsConfig", - "documentation":"

Information about logs for the build project. A project can create logs in CloudWatch Logs, logs in an S3 bucket, or both.

" + "documentation":"

Information about logs for the build project. A project can create logs in Amazon CloudWatch Logs, logs in an S3 bucket, or both.

" }, "fileSystemLocations":{ "shape":"ProjectFileSystemLocations", @@ -4075,7 +4087,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

An updated list of tag key and value pairs associated with this report group.

These tags are available for use by Amazon Web Services services that support CodeBuild report group tags.

" + "documentation":"

An updated list of tag key and value pairs associated with this report group.

These tags are available for use by AWS services that support AWS CodeBuild report group tags.

" } } }, @@ -4094,7 +4106,7 @@ "members":{ "projectName":{ "shape":"ProjectName", - "documentation":"

The name of the CodeBuild project.

" + "documentation":"

The name of the AWS CodeBuild project.

" }, "branchFilter":{ "shape":"String", @@ -4119,7 +4131,7 @@ "members":{ "webhook":{ "shape":"Webhook", - "documentation":"

Information about a repository's webhook that is associated with a project in CodeBuild.

" + "documentation":"

Information about a repository's webhook that is associated with a project in AWS CodeBuild.

" } } }, @@ -4145,7 +4157,7 @@ "documentation":"

A list of one or more security groups IDs in your Amazon VPC.

" } }, - "documentation":"

Information about the VPC configuration that CodeBuild accesses.

" + "documentation":"

Information about the VPC configuration that AWS CodeBuild accesses.

" }, "Webhook":{ "type":"structure", @@ -4156,7 +4168,7 @@ }, "payloadUrl":{ "shape":"NonEmptyString", - "documentation":"

The CodeBuild endpoint where webhook events are sent.

" + "documentation":"

The AWS CodeBuild endpoint where webhook events are sent.

" }, "secret":{ "shape":"NonEmptyString", @@ -4179,7 +4191,7 @@ "documentation":"

A timestamp that indicates the last time a repository's secret token was modified.

" } }, - "documentation":"

Information about a webhook that connects repository events to a build project in CodeBuild.

" + "documentation":"

Information about a webhook that connects repository events to a build project in AWS CodeBuild.

" }, "WebhookBuildType":{ "type":"string", @@ -4225,5 +4237,5 @@ "WrapperInt":{"type":"integer"}, "WrapperLong":{"type":"long"} }, - "documentation":"

CodeBuild is a fully managed build service in the cloud. CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in CodeBuild to use your own build tools. CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about CodeBuild, see the CodeBuild User Guide.

" + "documentation":"AWS CodeBuild

AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about AWS CodeBuild, see the AWS CodeBuild User Guide.

" } diff --git a/services/codecommit/pom.xml b/services/codecommit/pom.xml index 248fc42b88be..68b1b0624722 100644 --- a/services/codecommit/pom.xml +++ b/services/codecommit/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT codecommit AWS Java SDK :: Services :: AWS CodeCommit diff --git a/services/codedeploy/pom.xml b/services/codedeploy/pom.xml index 140c63b7fe68..3bf1d8ffde4b 100644 --- a/services/codedeploy/pom.xml +++ b/services/codedeploy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT codedeploy AWS Java SDK :: Services :: AWS CodeDeploy diff --git a/services/codeguruprofiler/pom.xml b/services/codeguruprofiler/pom.xml index 5671e5c1540d..f8be37af4729 100644 --- a/services/codeguruprofiler/pom.xml +++ b/services/codeguruprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT codeguruprofiler AWS Java SDK :: Services :: CodeGuruProfiler diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml index a1136b7c013e..5827e2629174 100644 --- a/services/codegurureviewer/pom.xml +++ b/services/codegurureviewer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT codegurureviewer AWS Java SDK :: Services :: CodeGuru Reviewer diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index 5d990232e753..6a4e2154b1f4 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT codepipeline AWS Java SDK :: Services :: AWS CodePipeline diff --git a/services/codestar/pom.xml b/services/codestar/pom.xml index 4baaf2eec395..8ac0484a3df8 100644 --- a/services/codestar/pom.xml +++ b/services/codestar/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT codestar AWS Java SDK :: Services :: AWS CodeStar diff --git a/services/codestarconnections/pom.xml b/services/codestarconnections/pom.xml index 098117ac6349..3ca3b6254748 100644 --- a/services/codestarconnections/pom.xml +++ b/services/codestarconnections/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT codestarconnections AWS Java SDK :: Services :: CodeStar connections diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml index 4de4e5454ba8..70167039f1e0 100644 --- a/services/codestarnotifications/pom.xml +++ b/services/codestarnotifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT codestarnotifications AWS Java SDK :: Services :: Codestar Notifications diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index d55d6614869a..e704d8b685ea 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cognitoidentity AWS Java SDK :: Services :: Amazon Cognito Identity diff --git a/services/cognitoidentityprovider/pom.xml b/services/cognitoidentityprovider/pom.xml index 30a3d8e5cded..bf5a0c120ce1 100644 --- a/services/cognitoidentityprovider/pom.xml +++ b/services/cognitoidentityprovider/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cognitoidentityprovider AWS Java SDK :: Services :: Amazon Cognito Identity Provider Service diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json index 3cd80f8d8d9a..e65cd8da315c 100755 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json @@ -96,7 +96,7 @@ {"shape":"UnsupportedUserStateException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates a new user in the specified user pool.

If MessageAction is not set, the default is to send a welcome message via email or phone (SMS).

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password.

Alternatively, you can call AdminCreateUser with “SUPPRESS” for the MessageAction parameter, and Amazon Cognito will not send any email.

In either case, the user will be in the FORCE_CHANGE_PASSWORD state until they sign in and change their password.

AdminCreateUser requires developer credentials.

" + "documentation":"

Creates a new user in the specified user pool.

If MessageAction is not set, the default is to send a welcome message via email or phone (SMS).

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password.

Alternatively, you can call AdminCreateUser with “SUPPRESS” for the MessageAction parameter, and Amazon Cognito will not send any email.

In either case, the user will be in the FORCE_CHANGE_PASSWORD state until they sign in and change their password.

AdminCreateUser requires developer credentials.

" }, "AdminDeleteUser":{ "name":"AdminDeleteUser", @@ -267,7 +267,7 @@ {"shape":"UserNotFoundException"}, {"shape":"UserNotConfirmedException"} ], - "documentation":"

Initiates the authentication flow, as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

Calling this action requires developer credentials.

" + "documentation":"

Initiates the authentication flow, as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

Calling this action requires developer credentials.

" }, "AdminLinkProviderForUser":{ "name":"AdminLinkProviderForUser", @@ -384,7 +384,7 @@ {"shape":"InvalidSmsRoleTrustRelationshipException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Resets the specified user's password in a user pool as an administrator. Works on any user.

When a developer calls this API, the current password is invalidated, so it must be changed. If a user tries to sign in after the API is called, the app will get a PasswordResetRequiredException exception back and should direct the user down the flow to reset the password, which is the same as the forgot password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

Calling this action requires developer credentials.

" + "documentation":"

Resets the specified user's password in a user pool as an administrator. Works on any user.

When a developer calls this API, the current password is invalidated, so it must be changed. If a user tries to sign in after the API is called, the app will get a PasswordResetRequiredException exception back and should direct the user down the flow to reset the password, which is the same as the forgot password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

Calling this action requires developer credentials.

" }, "AdminRespondToAuthChallenge":{ "name":"AdminRespondToAuthChallenge", @@ -416,7 +416,7 @@ {"shape":"UserNotConfirmedException"}, {"shape":"SoftwareTokenMFANotFoundException"} ], - "documentation":"

Responds to an authentication challenge, as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

Calling this action requires developer credentials.

" + "documentation":"

Responds to an authentication challenge, as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

Calling this action requires developer credentials.

" }, "AdminSetUserMFAPreference":{ "name":"AdminSetUserMFAPreference", @@ -534,7 +534,7 @@ {"shape":"InvalidEmailRoleAccessPolicyException"}, {"shape":"InvalidSmsRoleTrustRelationshipException"} ], - "documentation":"

Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user.

For custom attributes, you must prepend the custom: prefix to the attribute name.

In addition to updating user attributes, this API can also be used to mark phone and email as verified.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

Calling this action requires developer credentials.

" + "documentation":"

Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user.

For custom attributes, you must prepend the custom: prefix to the attribute name.

In addition to updating user attributes, this API can also be used to mark phone and email as verified.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

Calling this action requires developer credentials.

" }, "AdminUserGlobalSignOut":{ "name":"AdminUserGlobalSignOut", @@ -768,7 +768,7 @@ {"shape":"UserPoolTaggingException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates a new Amazon Cognito user pool and sets the password policy for the pool.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

" + "documentation":"

Creates a new Amazon Cognito user pool and sets the password policy for the pool.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

" }, "CreateUserPoolClient":{ "name":"CreateUserPoolClient", @@ -1111,7 +1111,7 @@ {"shape":"UserNotConfirmedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. If neither a verified phone number nor a verified email exists, an InvalidParameterException is thrown. To use the confirmation code for resetting the password, call ConfirmForgotPassword.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. If neither a verified phone number nor a verified email exists, an InvalidParameterException is thrown. To use the confirmation code for resetting the password, call ConfirmForgotPassword.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "GetCSVHeader":{ @@ -1265,7 +1265,7 @@ {"shape":"UserNotConfirmedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Gets the user attribute verification code for the specified attribute name.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Gets the user attribute verification code for the specified attribute name.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "GetUserPoolMfaConfig":{ @@ -1328,7 +1328,7 @@ {"shape":"InvalidSmsRoleAccessPolicyException"}, {"shape":"InvalidSmsRoleTrustRelationshipException"} ], - "documentation":"

Initiates the authentication flow.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Initiates the authentication flow.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "ListDevices":{ @@ -1468,7 +1468,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Lists the user pools associated with an AWS account.

" + "documentation":"

Lists the user pools associated with an account.

" }, "ListUsers":{ "name":"ListUsers", @@ -1528,7 +1528,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Resends the confirmation (for confirmation of registration) to a specific user in the user pool.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Resends the confirmation (for confirmation of registration) to a specific user in the user pool.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "RespondToAuthChallenge":{ @@ -1561,7 +1561,7 @@ {"shape":"InternalErrorException"}, {"shape":"SoftwareTokenMFANotFoundException"} ], - "documentation":"

Responds to the authentication challenge.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Responds to the authentication challenge.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "RevokeToken":{ @@ -1655,7 +1655,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Set the user pool multi-factor authentication (MFA) configuration.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

" + "documentation":"

Set the user pool multi-factor authentication (MFA) configuration.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

" }, "SetUserSettings":{ "name":"SetUserSettings", @@ -1701,7 +1701,7 @@ {"shape":"InvalidEmailRoleAccessPolicyException"}, {"shape":"CodeDeliveryFailureException"} ], - "documentation":"

Registers the user in the specified user pool and creates a user name, password, and user attributes.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Registers the user in the specified user pool and creates a user name, password, and user attributes.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "StartUserImportJob":{ @@ -1829,7 +1829,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Updates the specified group with the specified attributes.

Calling this action requires developer credentials.

If you don't provide a value for an attribute, it will be set to the default value.

" + "documentation":"

Updates the specified group with the specified attributes.

Calling this action requires developer credentials.

" }, "UpdateIdentityProvider":{ "name":"UpdateIdentityProvider", @@ -1894,7 +1894,7 @@ {"shape":"UserNotConfirmedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Allows a user to update a specific attribute (one at a time).

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Allows a user to update a specific attribute (one at a time).

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "UpdateUserPool":{ @@ -1918,7 +1918,7 @@ {"shape":"UserPoolTaggingException"}, {"shape":"InvalidEmailRoleAccessPolicyException"} ], - "documentation":"

Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings using DescribeUserPool. If you don't provide a value for an attribute, it will be set to the default value.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other AWS service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

" + "documentation":"

Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings using DescribeUserPool. If you don't provide a value for an attribute, it will be set to the default value.

This action might generate an SMS text message. Starting June 1, 2021, U.S. telecom carriers require that you register an origination phone number before you can send SMS messages to U.S. phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Cognito will use the the registered number automatically. Otherwise, Cognito users that must receive SMS messages might be unable to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon SNS might place your account in SMS sandbox. In sandbox mode , you’ll have limitations, such as sending messages to only verified phone numbers. After testing in the sandbox environment, you can move out of the SMS sandbox and into production. For more information, see SMS message settings for Cognito User Pools in the Amazon Cognito Developer Guide.

" }, "UpdateUserPoolClient":{ "name":"UpdateUserPoolClient", @@ -1955,7 +1955,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Updates the Secure Sockets Layer (SSL) certificate for the custom domain for your user pool.

You can use this operation to provide the Amazon Resource Name (ARN) of a new certificate to Amazon Cognito. You cannot use it to change the domain for a user pool.

A custom domain is used to host the Amazon Cognito hosted UI, which provides sign-up and sign-in pages for your application. When you set up a custom domain, you provide a certificate that you manage with AWS Certificate Manager (ACM). When necessary, you can use this operation to change the certificate that you applied to your custom domain.

Usually, this is unnecessary following routine certificate renewal with ACM. When you renew your existing certificate in ACM, the ARN for your certificate remains the same, and your custom domain uses the new certificate automatically.

However, if you replace your existing certificate with a new one, ACM gives the new certificate a new ARN. To apply the new certificate to your custom domain, you must provide this ARN to Amazon Cognito.

When you add your new certificate in ACM, you must choose US East (N. Virginia) as the AWS Region.

After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain.

For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI.

" + "documentation":"

Updates the Secure Sockets Layer (SSL) certificate for the custom domain for your user pool.

You can use this operation to provide the Amazon Resource Name (ARN) of a new certificate to Amazon Cognito. You cannot use it to change the domain for a user pool.

A custom domain is used to host the Amazon Cognito hosted UI, which provides sign-up and sign-in pages for your application. When you set up a custom domain, you provide a certificate that you manage with Certificate Manager (ACM). When necessary, you can use this operation to change the certificate that you applied to your custom domain.

Usually, this is unnecessary following routine certificate renewal with ACM. When you renew your existing certificate in ACM, the ARN for your certificate remains the same, and your custom domain uses the new certificate automatically.

However, if you replace your existing certificate with a new one, ACM gives the new certificate a new ARN. To apply the new certificate to your custom domain, you must provide this ARN to Amazon Cognito.

When you add your new certificate in ACM, you must choose US East (N. Virginia) as the Region.

After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain.

For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI.

" }, "VerifySoftwareToken":{ "name":"VerifySoftwareToken", @@ -2148,7 +2148,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

If your user pool configuration includes triggers, the AdminConfirmSignUp API action invokes the AWS Lambda function that is specified for the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. In this payload, the clientMetadata attribute provides the data that you assigned to the ClientMetadata parameter in your AdminConfirmSignUp request. In your function code in AWS Lambda, you can process the ClientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

If your user pool configuration includes triggers, the AdminConfirmSignUp API action invokes the Lambda function that is specified for the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. In this payload, the clientMetadata attribute provides the data that you assigned to the ClientMetadata parameter in your AdminConfirmSignUp request. In your function code in Lambda, you can process the ClientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

Represents the request to confirm user registration.

" @@ -2218,7 +2218,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the pre sign-up trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the pre sign-up trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

Represents the request to create a user in the specified user pool.

" @@ -2499,7 +2499,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminInitiateAuth API action, Amazon Cognito invokes the AWS Lambda functions that are specified for various triggers. The ClientMetadata value is passed as input to the functions for only the following triggers:

  • Pre signup

  • Pre authentication

  • User migration

When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which the function receives as input. This payload contains a validationData attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminInitiateAuth request. In your function code in AWS Lambda, you can process the validationData value to enhance your workflow for your specific needs.

When you use the AdminInitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it does not provide the ClientMetadata value as input:

  • Post authentication

  • Custom message

  • Pre token generation

  • Create auth challenge

  • Define auth challenge

  • Verify auth challenge

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminInitiateAuth API action, Amazon Cognito invokes the Lambda functions that are specified for various triggers. The ClientMetadata value is passed as input to the functions for only the following triggers:

  • Pre signup

  • Pre authentication

  • User migration

When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which the function receives as input. This payload contains a validationData attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminInitiateAuth request. In your function code in Lambda, you can process the validationData value to enhance your workflow for your specific needs.

When you use the AdminInitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it does not provide the ClientMetadata value as input:

  • Post authentication

  • Custom message

  • Pre token generation

  • Create auth challenge

  • Define auth challenge

  • Verify auth challenge

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" }, "AnalyticsMetadata":{ "shape":"AnalyticsMetadataType", @@ -2716,7 +2716,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminResetUserPassword API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminResetUserPassword request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminResetUserPassword API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminResetUserPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

Represents the request to reset a user's password as an administrator.

" @@ -2765,7 +2765,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, post authentication, user migration, pre token generation, define auth challenge, create auth challenge, and verify auth challenge response. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminRespondToAuthChallenge request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, post authentication, user migration, pre token generation, define auth challenge, create auth challenge, and verify auth challenge response. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminRespondToAuthChallenge request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

The request to respond to the authentication challenge, as an administrator.

" @@ -2969,7 +2969,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminUpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminUpdateUserAttributes request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminUpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminUpdateUserAttributes request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

Represents the request to update the user's attributes as an administrator.

" @@ -3549,7 +3549,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmForgotPassword request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmForgotPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

The request representing the confirmation for a password reset.

" @@ -3598,7 +3598,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ConfirmSignUp API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmSignUp request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ConfirmSignUp API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmSignUp request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

Represents the request to confirm registration of a user.

" @@ -3868,7 +3868,7 @@ }, "AllowedOAuthScopes":{ "shape":"ScopeListType", - "documentation":"

The allowed OAuth scopes. Possible values provided by OAuth are: phone, email, openid, and profile. Possible values provided by AWS are: aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported.

" + "documentation":"

The allowed OAuth scopes. Possible values provided by OAuth are: phone, email, openid, and profile. Possible values provided by Amazon Web Services are: aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported.

" }, "AllowedOAuthFlowsUserPoolClient":{ "shape":"BooleanType", @@ -3943,7 +3943,7 @@ }, "LambdaConfig":{ "shape":"LambdaConfigType", - "documentation":"

The Lambda trigger configuration information for the new user pool.

In a push model, event sources (such as Amazon S3 and custom applications) need permission to invoke a function. So you will need to make an extra call to add permission for these event sources to invoke your Lambda function.

For more information on using the Lambda API to add permission, see AddPermission .

For adding permission using the AWS CLI, see add-permission .

" + "documentation":"

The Lambda trigger configuration information for the new user pool.

In a push model, event sources (such as Amazon S3 and custom applications) need permission to invoke a function. So you will need to make an extra call to add permission for these event sources to invoke your Lambda function.

For more information on using the Lambda API to add permission, see AddPermission .

For adding permission using the CLI, see add-permission .

" }, "AutoVerifiedAttributes":{ "shape":"VerifiedAttributesListType", @@ -4048,7 +4048,7 @@ "members":{ "CertificateArn":{ "shape":"ArnType", - "documentation":"

The Amazon Resource Name (ARN) of an AWS Certificate Manager SSL certificate. You use this certificate for the subdomain of your custom domain.

" + "documentation":"

The Amazon Resource Name (ARN) of an Certificate Manager SSL certificate. You use this certificate for the subdomain of your custom domain.

" } }, "documentation":"

The configuration for a custom domain that hosts the sign-up and sign-in webpages for your application.

" @@ -4516,7 +4516,7 @@ }, "AWSAccountId":{ "shape":"AWSAccountIdType", - "documentation":"

The AWS account ID for the user pool owner.

" + "documentation":"

The account ID for the user pool owner.

" }, "Domain":{ "shape":"DomainType", @@ -4591,7 +4591,7 @@ }, "EmailSendingAccount":{ "shape":"EmailSendingAccountType", - "documentation":"

Specifies whether Amazon Cognito emails your users by using its built-in email functionality or your Amazon SES email configuration. Specify one of the following values:

COGNITO_DEFAULT

When Amazon Cognito emails your users, it uses its built-in email functionality. When you use the default option, Amazon Cognito allows only a limited number of emails each day for your user pool. For typical production environments, the default email limit is below the required delivery volume. To achieve a higher delivery volume, specify DEVELOPER to use your Amazon SES email configuration.

To look up the email delivery limit for the default option, see Limits in Amazon Cognito in the Amazon Cognito Developer Guide.

The default FROM address is no-reply@verificationemail.com. To customize the FROM address, provide the ARN of an Amazon SES verified email address for the SourceArn parameter.

If EmailSendingAccount is COGNITO_DEFAULT, the following parameters aren't allowed:

  • EmailVerificationMessage

  • EmailVerificationSubject

  • InviteMessageTemplate.EmailMessage

  • InviteMessageTemplate.EmailSubject

  • VerificationMessageTemplate.EmailMessage

  • VerificationMessageTemplate.EmailMessageByLink

  • VerificationMessageTemplate.EmailSubject,

  • VerificationMessageTemplate.EmailSubjectByLink

DEVELOPER EmailSendingAccount is required.

DEVELOPER

When Amazon Cognito emails your users, it uses your Amazon SES configuration. Amazon Cognito calls Amazon SES on your behalf to send email from your verified email address. When you use this option, the email delivery limits are the same limits that apply to your Amazon SES verified email address in your AWS account.

If you use this option, you must provide the ARN of an Amazon SES verified email address for the SourceArn parameter.

Before Amazon Cognito can email your users, it requires additional permissions to call Amazon SES on your behalf. When you update your user pool with this option, Amazon Cognito creates a service-linked role, which is a type of IAM role, in your AWS account. This role contains the permissions that allow Amazon Cognito to access Amazon SES and send email messages with your address. For more information about the service-linked role that Amazon Cognito creates, see Using Service-Linked Roles for Amazon Cognito in the Amazon Cognito Developer Guide.

" + "documentation":"

Specifies whether Amazon Cognito emails your users by using its built-in email functionality or your Amazon SES email configuration. Specify one of the following values:

COGNITO_DEFAULT

When Amazon Cognito emails your users, it uses its built-in email functionality. When you use the default option, Amazon Cognito allows only a limited number of emails each day for your user pool. For typical production environments, the default email limit is below the required delivery volume. To achieve a higher delivery volume, specify DEVELOPER to use your Amazon SES email configuration.

To look up the email delivery limit for the default option, see Limits in Amazon Cognito in the Amazon Cognito Developer Guide.

The default FROM address is no-reply@verificationemail.com. To customize the FROM address, provide the ARN of an Amazon SES verified email address for the SourceArn parameter.

If EmailSendingAccount is COGNITO_DEFAULT, the following parameters aren't allowed:

  • EmailVerificationMessage

  • EmailVerificationSubject

  • InviteMessageTemplate.EmailMessage

  • InviteMessageTemplate.EmailSubject

  • VerificationMessageTemplate.EmailMessage

  • VerificationMessageTemplate.EmailMessageByLink

  • VerificationMessageTemplate.EmailSubject,

  • VerificationMessageTemplate.EmailSubjectByLink

DEVELOPER EmailSendingAccount is required.

DEVELOPER

When Amazon Cognito emails your users, it uses your Amazon SES configuration. Amazon Cognito calls Amazon SES on your behalf to send email from your verified email address. When you use this option, the email delivery limits are the same limits that apply to your Amazon SES verified email address in your account.

If you use this option, you must provide the ARN of an Amazon SES verified email address for the SourceArn parameter.

Before Amazon Cognito can email your users, it requires additional permissions to call Amazon SES on your behalf. When you update your user pool with this option, Amazon Cognito creates a service-linked role, which is a type of IAM role, in your account. This role contains the permissions that allow Amazon Cognito to access Amazon SES and send email messages with your address. For more information about the service-linked role that Amazon Cognito creates, see Using Service-Linked Roles for Amazon Cognito in the Amazon Cognito Developer Guide.

" }, "From":{ "shape":"StringType", @@ -4599,7 +4599,7 @@ }, "ConfigurationSet":{ "shape":"SESConfigurationSet", - "documentation":"

The set of configuration rules that can be applied to emails sent using Amazon SES. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:

  • Event publishing – Amazon SES can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as SNS and CloudWatch.

  • IP pool management – When leasing dedicated IP addresses with Amazon SES, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets.

" + "documentation":"

The set of configuration rules that can be applied to emails sent using Amazon SES. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:

  • Event publishing – Amazon SES can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other Amazon Web Services services such as SNS and CloudWatch.

  • IP pool management – When leasing dedicated IP addresses with Amazon SES, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets.

" } }, "documentation":"

The email configuration type.

Amazon Cognito has specific regions for use with Amazon SES. For more information on the supported regions, see Email Settings for Amazon Cognito User Pools.

" @@ -4834,7 +4834,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ForgotPassword API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and user migration. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ForgotPassword request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ForgotPassword API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and user migration. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ForgotPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

Represents the request to reset a user's password.

" @@ -5016,7 +5016,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the GetUserAttributeVerificationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your GetUserAttributeVerificationCode request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the GetUserAttributeVerificationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your GetUserAttributeVerificationCode request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

Represents the request to get user attribute verification.

" @@ -5275,7 +5275,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the InitiateAuth API action, Amazon Cognito invokes the AWS Lambda functions that are specified for various triggers. The ClientMetadata value is passed as input to the functions for only the following triggers:

  • Pre signup

  • Pre authentication

  • User migration

When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which the function receives as input. This payload contains a validationData attribute, which provides the data that you assigned to the ClientMetadata parameter in your InitiateAuth request. In your function code in AWS Lambda, you can process the validationData value to enhance your workflow for your specific needs.

When you use the InitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it does not provide the ClientMetadata value as input:

  • Post authentication

  • Custom message

  • Pre token generation

  • Create auth challenge

  • Define auth challenge

  • Verify auth challenge

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the InitiateAuth API action, Amazon Cognito invokes the Lambda functions that are specified for various triggers. The ClientMetadata value is passed as input to the functions for only the following triggers:

  • Pre signup

  • Pre authentication

  • User migration

When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which the function receives as input. This payload contains a validationData attribute, which provides the data that you assigned to the ClientMetadata parameter in your InitiateAuth request. In your function code in Lambda, you can process the validationData value to enhance your workflow for your specific needs.

When you use the InitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it does not provide the ClientMetadata value as input:

  • Post authentication

  • Custom message

  • Pre token generation

  • Create auth challenge

  • Define auth challenge

  • Verify auth challenge

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" }, "ClientId":{ "shape":"ClientIdType", @@ -5343,10 +5343,10 @@ "members":{ "message":{ "shape":"MessageType", - "documentation":"

The message returned when the Amazon Cognito service throws an invalid AWS Lambda response exception.

" + "documentation":"

The message returned when the Amazon Cognito service throws an invalid Lambda response exception.

" } }, - "documentation":"

This exception is thrown when the Amazon Cognito service encounters an invalid AWS Lambda response.

", + "documentation":"

This exception is thrown when the Amazon Cognito service encounters an invalid Lambda response.

", "exception":true }, "InvalidOAuthFlowException":{ @@ -5398,7 +5398,7 @@ "documentation":"

The message returned when the role trust relationship for the SMS message is invalid.

" } }, - "documentation":"

This exception is thrown when the trust relationship is invalid for the role provided for SMS configuration. This can happen if you do not trust cognito-idp.amazonaws.com or the external ID provided in the role does not match what is provided in the SMS configuration for the user pool.

", + "documentation":"

This exception is thrown when the trust relationship is invalid for the role provided for SMS configuration. This can happen if you do not trust cognito-idp.amazonaws.com or the external ID provided in the role does not match what is provided in the SMS configuration for the user pool.

", "exception":true }, "InvalidUserPoolConfigurationException":{ @@ -5417,23 +5417,23 @@ "members":{ "PreSignUp":{ "shape":"ArnType", - "documentation":"

A pre-registration AWS Lambda trigger.

" + "documentation":"

A pre-registration Lambda trigger.

" }, "CustomMessage":{ "shape":"ArnType", - "documentation":"

A custom Message AWS Lambda trigger.

" + "documentation":"

A custom Message Lambda trigger.

" }, "PostConfirmation":{ "shape":"ArnType", - "documentation":"

A post-confirmation AWS Lambda trigger.

" + "documentation":"

A post-confirmation Lambda trigger.

" }, "PreAuthentication":{ "shape":"ArnType", - "documentation":"

A pre-authentication AWS Lambda trigger.

" + "documentation":"

A pre-authentication Lambda trigger.

" }, "PostAuthentication":{ "shape":"ArnType", - "documentation":"

A post-authentication AWS Lambda trigger.

" + "documentation":"

A post-authentication Lambda trigger.

" }, "DefineAuthChallenge":{ "shape":"ArnType", @@ -5457,18 +5457,18 @@ }, "CustomSMSSender":{ "shape":"CustomSMSLambdaVersionConfigType", - "documentation":"

A custom SMS sender AWS Lambda trigger.

" + "documentation":"

A custom SMS sender Lambda trigger.

" }, "CustomEmailSender":{ "shape":"CustomEmailLambdaVersionConfigType", - "documentation":"

A custom email sender AWS Lambda trigger.

" + "documentation":"

A custom email sender Lambda trigger.

" }, "KMSKeyID":{ "shape":"ArnType", "documentation":"

The Amazon Resource Name of Key Management Service Customer master keys . Amazon Cognito uses the key to encrypt codes and temporary passwords sent to CustomEmailSender and CustomSMSSender.

" } }, - "documentation":"

Specifies the configuration for AWS Lambda triggers.

" + "documentation":"

Specifies the configuration for Lambda triggers.

" }, "LimitExceededException":{ "type":"structure", @@ -5478,7 +5478,7 @@ "documentation":"

The message returned when Amazon Cognito throws a limit exceeded exception.

" } }, - "documentation":"

This exception is thrown when a user exceeds the limit for a requested AWS resource.

", + "documentation":"

This exception is thrown when a user exceeds the limit for a requested Amazon Web Services resource.

", "exception":true }, "ListDevicesRequest":{ @@ -6229,7 +6229,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ResendConfirmationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ResendConfirmationCode request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ResendConfirmationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ResendConfirmationCode request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

Represents the request to resend the confirmation code.

" @@ -6360,7 +6360,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the RespondToAuthChallenge API action, Amazon Cognito invokes any functions that are assigned to the following triggers: post authentication, pre token generation, define auth challenge, create auth challenge, and verify auth challenge. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your RespondToAuthChallenge request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the RespondToAuthChallenge API action, Amazon Cognito invokes any functions that are assigned to the following triggers: post authentication, pre token generation, define auth challenge, create auth challenge, and verify auth challenge. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your RespondToAuthChallenge request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

The request to respond to an authentication challenge.

" @@ -6784,7 +6784,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the SignUp API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and post confirmation. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your SignUp request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the SignUp API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and post confirmation. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your SignUp request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

Represents the request to register a user.

" @@ -6822,14 +6822,14 @@ "members":{ "SnsCallerArn":{ "shape":"ArnType", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. This is the ARN of the IAM role in your AWS account which Cognito will use to send SMS messages. SMS messages are subject to a spending limit.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. This is the ARN of the IAM role in your account which Cognito will use to send SMS messages. SMS messages are subject to a spending limit.

" }, "ExternalId":{ "shape":"StringType", - "documentation":"

The external ID is a value that we recommend you use to add security to your IAM role which is used to call Amazon SNS to send SMS messages for your user pool. If you provide an ExternalId, the Cognito User Pool will include it when attempting to assume your IAM role, so that you can set your roles trust policy to require the ExternalID. If you use the Cognito Management Console to create a role for SMS MFA, Cognito will create a role with the required permissions and a trust policy that demonstrates use of the ExternalId.

For more information about the ExternalId of a role, see How to use an external ID when granting access to your AWS resources to a third party

" + "documentation":"

The external ID is a value that we recommend you use to add security to your IAM role which is used to call Amazon SNS to send SMS messages for your user pool. If you provide an ExternalId, the Cognito User Pool will include it when attempting to assume your IAM role, so that you can set your roles trust policy to require the ExternalID. If you use the Cognito Management Console to create a role for SMS MFA, Cognito will create a role with the required permissions and a trust policy that demonstrates use of the ExternalId.

For more information about the ExternalId of a role, see How to use an external ID when granting access to your Amazon Web Services resources to a third party

" } }, - "documentation":"

The SMS configuration type that includes the settings the Cognito User Pool needs to call for the Amazon SNS service to send an SMS message from your AWS account. The Cognito User Pool makes the request to the Amazon SNS Service by using an AWS IAM role that you provide for your AWS account.

" + "documentation":"

The SMS configuration type that includes the settings the Cognito User Pool needs to call for the Amazon SNS service to send an SMS message from your account. The Cognito User Pool makes the request to the Amazon SNS Service by using an IAM role that you provide for your account.

" }, "SmsMfaConfigType":{ "type":"structure", @@ -7109,10 +7109,10 @@ "members":{ "message":{ "shape":"MessageType", - "documentation":"

The message returned when the Amazon Cognito service returns an unexpected AWS Lambda exception.

" + "documentation":"

The message returned when the Amazon Cognito service returns an unexpected Lambda exception.

" } }, - "documentation":"

This exception is thrown when the Amazon Cognito service encounters an unexpected exception with the AWS Lambda service.

", + "documentation":"

This exception is thrown when the Amazon Cognito service encounters an unexpected exception with the Lambda service.

", "exception":true }, "UnsupportedIdentityProviderException":{ @@ -7367,7 +7367,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the UpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your UpdateUserAttributes request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the UpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your UpdateUserAttributes request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

  • Amazon Cognito does not store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.

  • Amazon Cognito does not validate the ClientMetadata value.

  • Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.

" } }, "documentation":"

Represents the request to update user attributes.

" @@ -7451,7 +7451,7 @@ }, "AllowedOAuthScopes":{ "shape":"ScopeListType", - "documentation":"

The allowed OAuth scopes. Possible values provided by OAuth are: phone, email, openid, and profile. Possible values provided by AWS are: aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported.

" + "documentation":"

The allowed OAuth scopes. Possible values provided by OAuth are: phone, email, openid, and profile. Possible values provided by Amazon Web Services are: aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported.

" }, "AllowedOAuthFlowsUserPoolClient":{ "shape":"BooleanType", @@ -7529,7 +7529,7 @@ }, "LambdaConfig":{ "shape":"LambdaConfigType", - "documentation":"

The AWS Lambda configuration information from the request to update the user pool.

" + "documentation":"

The Lambda configuration information from the request to update the user pool.

" }, "AutoVerifiedAttributes":{ "shape":"VerifiedAttributesListType", @@ -7715,10 +7715,10 @@ "members":{ "message":{ "shape":"MessageType", - "documentation":"

The message returned when the Amazon Cognito service returns a user validation exception with the AWS Lambda service.

" + "documentation":"

The message returned when the Amazon Cognito service returns a user validation exception with the Lambda service.

" } }, - "documentation":"

This exception is thrown when the Amazon Cognito service encounters a user validation exception with the AWS Lambda service.

", + "documentation":"

This exception is thrown when the Amazon Cognito service encounters a user validation exception with the Lambda service.

", "exception":true }, "UserMFASettingListType":{ @@ -7865,7 +7865,7 @@ }, "AllowedOAuthScopes":{ "shape":"ScopeListType", - "documentation":"

The allowed OAuth scopes. Possible values provided by OAuth are: phone, email, openid, and profile. Possible values provided by AWS are: aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported.

" + "documentation":"

The allowed OAuth scopes. Possible values provided by OAuth are: phone, email, openid, and profile. Possible values provided by Amazon Web Services are: aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported.

" }, "AllowedOAuthFlowsUserPoolClient":{ "shape":"BooleanType", @@ -7900,7 +7900,7 @@ }, "LambdaConfig":{ "shape":"LambdaConfigType", - "documentation":"

The AWS Lambda configuration information in a user pool description.

" + "documentation":"

The Lambda configuration information in a user pool description.

" }, "Status":{ "shape":"StatusType", @@ -7985,7 +7985,7 @@ }, "LambdaConfig":{ "shape":"LambdaConfigType", - "documentation":"

The AWS Lambda triggers associated with the user pool.

" + "documentation":"

The Lambda triggers associated with the user pool.

" }, "Status":{ "shape":"StatusType", @@ -8061,7 +8061,7 @@ }, "SmsConfigurationFailure":{ "shape":"StringType", - "documentation":"

The reason why the SMS configuration cannot send the messages to your users.

This message might include comma-separated values to describe why your SMS configuration can't send messages to user pool end users.

  • InvalidSmsRoleAccessPolicyException - The IAM role which Cognito uses to send SMS messages is not properly configured. For more information, see SmsConfigurationType.

  • SNSSandbox - The AWS account is in SNS Sandbox and messages won’t reach unverified end users. This parameter won’t get populated with SNSSandbox if the IAM user creating the user pool doesn’t have SNS permissions. To learn how to move your AWS account out of the sandbox, see Moving out of the SMS sandbox.

" + "documentation":"

The reason why the SMS configuration cannot send the messages to your users.

This message might include comma-separated values to describe why your SMS configuration can't send messages to user pool end users.

  • InvalidSmsRoleAccessPolicyException - The IAM role which Cognito uses to send SMS messages is not properly configured. For more information, see SmsConfigurationType.

  • SNSSandbox - The account is in SNS Sandbox and messages won’t reach unverified end users. This parameter won’t get populated with SNSSandbox if the IAM user creating the user pool doesn’t have SNS permissions. To learn how to move your account out of the sandbox, see Moving out of the SMS sandbox.

" }, "EmailConfigurationFailure":{ "shape":"StringType", diff --git a/services/cognitosync/pom.xml b/services/cognitosync/pom.xml index bb99f78547dd..231271d52154 100644 --- a/services/cognitosync/pom.xml +++ b/services/cognitosync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT cognitosync AWS Java SDK :: Services :: Amazon Cognito Sync diff --git a/services/comprehend/pom.xml b/services/comprehend/pom.xml index 2698aa35d49e..9a33b06f23cb 100644 --- a/services/comprehend/pom.xml +++ b/services/comprehend/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 comprehend diff --git a/services/comprehendmedical/pom.xml b/services/comprehendmedical/pom.xml index 0951dd3607ec..b65afd2bad73 100644 --- a/services/comprehendmedical/pom.xml +++ b/services/comprehendmedical/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT comprehendmedical AWS Java SDK :: Services :: ComprehendMedical diff --git a/services/computeoptimizer/pom.xml b/services/computeoptimizer/pom.xml index a68292e590b4..70c07a7e3ba4 100644 --- a/services/computeoptimizer/pom.xml +++ b/services/computeoptimizer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT computeoptimizer AWS Java SDK :: Services :: Compute Optimizer diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json index eb0d41c7d0c7..c4d37866f21a 100644 --- a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json +++ b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json @@ -367,7 +367,7 @@ }, "performanceRisk":{ "shape":"PerformanceRisk", - "documentation":"

The performance risk of the Auto Scaling group configuration recommendation.

Performance risk indicates the likelihood of the recommended instance type not meeting the resource needs of your workload. Compute Optimizer calculates an individual performance risk score for each specification of the recommended instance, including CPU, memory, EBS throughput, EBS IOPS, disk throughput, disk IOPS, network throughput, and network PPS. The performance risk of the recommended instance is calculated as the maximum performance risk score across the analyzed resource specifications.

The value ranges from 0 to 5, with 0 meaning that the recommended resource is predicted to always provide enough hardware capability. The higher the performance risk is, the more likely you should validate whether the recommended resource meets the performance requirements of your workload before migrating your resource.

" + "documentation":"

The performance risk of the Auto Scaling group configuration recommendation.

Performance risk indicates the likelihood of the recommended instance type not meeting the resource needs of your workload. Compute Optimizer calculates an individual performance risk score for each specification of the recommended instance, including CPU, memory, EBS throughput, EBS IOPS, disk throughput, disk IOPS, network throughput, and network PPS. The performance risk of the recommended instance is calculated as the maximum performance risk score across the analyzed resource specifications.

The value ranges from 0 - 4, with 0 meaning that the recommended resource is predicted to always provide enough hardware capability. The higher the performance risk is, the more likely you should validate whether the recommendation will meet the performance requirements of your workload before migrating your resource.

" }, "rank":{ "shape":"Rank", @@ -1263,7 +1263,7 @@ }, "performanceRisk":{ "shape":"PerformanceRisk", - "documentation":"

The performance risk of the instance recommendation option.

Performance risk indicates the likelihood of the recommended instance type not meeting the resource needs of your workload. Compute Optimizer calculates an individual performance risk score for each specification of the recommended instance, including CPU, memory, EBS throughput, EBS IOPS, disk throughput, disk IOPS, network throughput, and network PPS. The performance risk of the recommended instance is calculated as the maximum performance risk score across the analyzed resource specifications.

The value ranges from 0 to 5, with 0 meaning that the recommended resource is predicted to always provide enough hardware capability. The higher the performance risk is, the more likely you should validate whether the recommendation will meet the performance requirements of your workload before migrating your resource.

" + "documentation":"

The performance risk of the instance recommendation option.

Performance risk indicates the likelihood of the recommended instance type not meeting the resource needs of your workload. Compute Optimizer calculates an individual performance risk score for each specification of the recommended instance, including CPU, memory, EBS throughput, EBS IOPS, disk throughput, disk IOPS, network throughput, and network PPS. The performance risk of the recommended instance is calculated as the maximum performance risk score across the analyzed resource specifications.

The value ranges from 0 - 4, with 0 meaning that the recommended resource is predicted to always provide enough hardware capability. The higher the performance risk is, the more likely you should validate whether the recommendation will meet the performance requirements of your workload before migrating your resource.

" }, "rank":{ "shape":"Rank", @@ -2021,7 +2021,7 @@ }, "performanceRisk":{ "shape":"PerformanceRisk", - "documentation":"

The performance risk of the volume recommendation option.

Performance risk is the likelihood of the recommended volume type meeting the performance requirement of your workload.

The value ranges from 0 to 5, with 0 meaning that the recommended resource is predicted to always provide enough hardware capability. The higher the performance risk is, the more likely you should validate whether the recommendation will meet the performance requirements of your workload before migrating your resource.

" + "documentation":"

The performance risk of the volume recommendation option.

Performance risk is the likelihood of the recommended volume type meeting the performance requirement of your workload.

The value ranges from 0 - 4, with 0 meaning that the recommended resource is predicted to always provide enough hardware capability. The higher the performance risk is, the more likely you should validate whether the recommendation will meet the performance requirements of your workload before migrating your resource.

" }, "rank":{ "shape":"Rank", diff --git a/services/config/pom.xml b/services/config/pom.xml index 16978af0ee75..bce3cefe2d00 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT config AWS Java SDK :: Services :: AWS Config diff --git a/services/connect/pom.xml b/services/connect/pom.xml index f75eb0d91dfc..34fcb3ce8a7f 100644 --- a/services/connect/pom.xml +++ b/services/connect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT connect AWS Java SDK :: Services :: Connect diff --git a/services/connect/src/main/resources/codegen-resources/paginators-1.json b/services/connect/src/main/resources/codegen-resources/paginators-1.json index c7e821823a96..c26c20308b81 100644 --- a/services/connect/src/main/resources/codegen-resources/paginators-1.json +++ b/services/connect/src/main/resources/codegen-resources/paginators-1.json @@ -10,6 +10,12 @@ "limit_key": "MaxResults", "output_token": "NextToken" }, + "ListAgentStatuses": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AgentStatusSummaryList" + }, "ListApprovedOrigins": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/connect/src/main/resources/codegen-resources/service-2.json b/services/connect/src/main/resources/codegen-resources/service-2.json index 064d695034e4..a78fedb04a4d 100644 --- a/services/connect/src/main/resources/codegen-resources/service-2.json +++ b/services/connect/src/main/resources/codegen-resources/service-2.json @@ -155,6 +155,25 @@ ], "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Associates a security key to the instance.

" }, + "CreateAgentStatus":{ + "name":"CreateAgentStatus", + "http":{ + "method":"PUT", + "requestUri":"/agent-status/{InstanceId}" + }, + "input":{"shape":"CreateAgentStatusRequest"}, + "output":{"shape":"CreateAgentStatusResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"DuplicateResourceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Creates an agent status for the specified Amazon Connect instance.

" + }, "CreateContactFlow":{ "name":"CreateContactFlow", "http":{ @@ -175,6 +194,25 @@ ], "documentation":"

Creates a contact flow for the specified Amazon Connect instance.

You can also create and update contact flows using the Amazon Connect Flow language.

" }, + "CreateHoursOfOperation":{ + "name":"CreateHoursOfOperation", + "http":{ + "method":"PUT", + "requestUri":"/hours-of-operations/{InstanceId}" + }, + "input":{"shape":"CreateHoursOfOperationRequest"}, + "output":{"shape":"CreateHoursOfOperationResponse"}, + "errors":[ + {"shape":"DuplicateResourceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Creates hours of operation.

" + }, "CreateInstance":{ "name":"CreateInstance", "http":{ @@ -321,6 +359,22 @@ ], "documentation":"

Creates a new user hierarchy group.

" }, + "DeleteHoursOfOperation":{ + "name":"DeleteHoursOfOperation", + "http":{ + "method":"DELETE", + "requestUri":"/hours-of-operations/{InstanceId}/{HoursOfOperationId}" + }, + "input":{"shape":"DeleteHoursOfOperationRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Deletes an hours of operation.

" + }, "DeleteInstance":{ "name":"DeleteInstance", "http":{ @@ -414,6 +468,23 @@ ], "documentation":"

Deletes an existing user hierarchy group. It must not be associated with any agents or have any active child groups.

" }, + "DescribeAgentStatus":{ + "name":"DescribeAgentStatus", + "http":{ + "method":"GET", + "requestUri":"/agent-status/{InstanceId}/{AgentStatusId}" + }, + "input":{"shape":"DescribeAgentStatusRequest"}, + "output":{"shape":"DescribeAgentStatusResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Describes an agent status.

" + }, "DescribeContactFlow":{ "name":"DescribeContactFlow", "http":{ @@ -794,6 +865,23 @@ ], "documentation":"

Gets historical metric data from the specified Amazon Connect instance.

For a description of each historical metric, see Historical Metrics Definitions in the Amazon Connect Administrator Guide.

" }, + "ListAgentStatuses":{ + "name":"ListAgentStatuses", + "http":{ + "method":"GET", + "requestUri":"/agent-status/{InstanceId}" + }, + "input":{"shape":"ListAgentStatusRequest"}, + "output":{"shape":"ListAgentStatusResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Lists agent statuses.

" + }, "ListApprovedOrigins":{ "name":"ListApprovedOrigins", "http":{ @@ -1325,7 +1413,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Adds the specified tags to the specified resource.

The supported resource types are users, routing profiles, queues, quick connects, and contact flows.

For sample policies that use tags, see Amazon Connect Identity-Based Policy Examples in the Amazon Connect Administrator Guide.

" + "documentation":"

Adds the specified tags to the specified resource.

The supported resource types are users, routing profiles, queues, quick connects, contact flows, agent status, and hours of operation.

For sample policies that use tags, see Amazon Connect Identity-Based Policy Examples in the Amazon Connect Administrator Guide.

" }, "UntagResource":{ "name":"UntagResource", @@ -1343,6 +1431,24 @@ ], "documentation":"

Removes the specified tags from the specified resource.

" }, + "UpdateAgentStatus":{ + "name":"UpdateAgentStatus", + "http":{ + "method":"POST", + "requestUri":"/agent-status/{InstanceId}/{AgentStatusId}" + }, + "input":{"shape":"UpdateAgentStatusRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"DuplicateResourceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Updates agent status.

" + }, "UpdateContactAttributes":{ "name":"UpdateContactAttributes", "http":{ @@ -1393,6 +1499,23 @@ ], "documentation":"

The name of the contact flow.

You can also create and update contact flows using the Amazon Connect Flow language.

" }, + "UpdateHoursOfOperation":{ + "name":"UpdateHoursOfOperation", + "http":{ + "method":"POST", + "requestUri":"/hours-of-operations/{InstanceId}/{HoursOfOperationId}" + }, + "input":{"shape":"UpdateHoursOfOperationRequest"}, + "errors":[ + {"shape":"DuplicateResourceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Updates the hours of operation.

" + }, "UpdateInstanceAttribute":{ "name":"UpdateInstanceAttribute", "http":{ @@ -1734,6 +1857,106 @@ "max":100, "min":1 }, + "AgentStatus":{ + "type":"structure", + "members":{ + "AgentStatusARN":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the agent status.

" + }, + "AgentStatusId":{ + "shape":"AgentStatusId", + "documentation":"

The identifier of the agent status.

" + }, + "Name":{ + "shape":"AgentStatusName", + "documentation":"

The name of the agent status.

" + }, + "Description":{ + "shape":"AgentStatusDescription", + "documentation":"

The description of the agent status.

" + }, + "Type":{ + "shape":"AgentStatusType", + "documentation":"

The type of agent status.

" + }, + "DisplayOrder":{ + "shape":"AgentStatusOrderNumber", + "documentation":"

The display order of the agent status.

" + }, + "State":{ + "shape":"AgentStatusState", + "documentation":"

The state of the agent status.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

One or more tags.

" + } + }, + "documentation":"

Contains information about an agent status.

" + }, + "AgentStatusDescription":{ + "type":"string", + "max":250, + "min":1 + }, + "AgentStatusId":{"type":"string"}, + "AgentStatusName":{ + "type":"string", + "max":127, + "min":1 + }, + "AgentStatusOrderNumber":{ + "type":"integer", + "max":50, + "min":1 + }, + "AgentStatusState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "AgentStatusSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"AgentStatusId", + "documentation":"

The identifier for an agent status.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) for the agent status.

" + }, + "Name":{ + "shape":"AgentStatusName", + "documentation":"

The name of the agent status.

" + }, + "Type":{ + "shape":"AgentStatusType", + "documentation":"

The type of the agent status.

" + } + }, + "documentation":"

Summary information for an agent status.

" + }, + "AgentStatusSummaryList":{ + "type":"list", + "member":{"shape":"AgentStatusSummary"} + }, + "AgentStatusType":{ + "type":"string", + "enum":[ + "ROUTABLE", + "CUSTOM", + "OFFLINE" + ] + }, + "AgentStatusTypes":{ + "type":"list", + "member":{"shape":"AgentStatusType"}, + "max":3 + }, "AgentUsername":{ "type":"string", "max":100, @@ -1970,6 +2193,7 @@ "member":{"shape":"Attribute"} }, "AutoAccept":{"type":"boolean"}, + "Boolean":{"type":"boolean"}, "BotName":{ "type":"string", "max":50 @@ -2024,11 +2248,6 @@ "type":"string", "max":500 }, - "CommonDescriptionLength250":{ - "type":"string", - "max":250, - "min":0 - }, "CommonNameLength127":{ "type":"string", "max":127, @@ -2163,6 +2382,56 @@ "key":{"shape":"ReferenceKey"}, "value":{"shape":"Reference"} }, + "CreateAgentStatusRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Name", + "State" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "Name":{ + "shape":"AgentStatusName", + "documentation":"

The name of the status.

" + }, + "Description":{ + "shape":"AgentStatusDescription", + "documentation":"

The description of the status.

" + }, + "State":{ + "shape":"AgentStatusState", + "documentation":"

The state of the status.

" + }, + "DisplayOrder":{ + "shape":"AgentStatusOrderNumber", + "documentation":"

The display order of the status.

", + "box":true + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

One or more tags.

" + } + } + }, + "CreateAgentStatusResponse":{ + "type":"structure", + "members":{ + "AgentStatusARN":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the agent status.

" + }, + "AgentStatusId":{ + "shape":"AgentStatusId", + "documentation":"

The identifier of the agent status.

" + } + } + }, "CreateContactFlowRequest":{ "type":"structure", "required":[ @@ -2213,6 +2482,56 @@ } } }, + "CreateHoursOfOperationRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Name", + "TimeZone", + "Config" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "Name":{ + "shape":"CommonNameLength127", + "documentation":"

The name of the hours of operation.

" + }, + "Description":{ + "shape":"HoursOfOperationDescription", + "documentation":"

The description of the hours of operation.

" + }, + "TimeZone":{ + "shape":"TimeZone", + "documentation":"

The time zone of the hours of operation.

" + }, + "Config":{ + "shape":"HoursOfOperationConfigList", + "documentation":"

Configuration information for the hours of operation: day, start time, and end time.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

One or more tags.

" + } + } + }, + "CreateHoursOfOperationResponse":{ + "type":"structure", + "members":{ + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "documentation":"

The identifier for the hours of operation.

" + }, + "HoursOfOperationArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) for the hours of operation.

" + } + } + }, "CreateInstanceRequest":{ "type":"structure", "required":[ @@ -2721,6 +3040,27 @@ "max":9999, "min":0 }, + "DeleteHoursOfOperationRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "documentation":"

The identifier for the hours of operation.

", + "location":"uri", + "locationName":"HoursOfOperationId" + } + } + }, "DeleteInstanceRequest":{ "type":"structure", "required":["InstanceId"], @@ -2845,6 +3185,36 @@ } } }, + "DescribeAgentStatusRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "AgentStatusId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "AgentStatusId":{ + "shape":"AgentStatusId", + "documentation":"

The identifier for the agent status.

", + "location":"uri", + "locationName":"AgentStatusId" + } + } + }, + "DescribeAgentStatusResponse":{ + "type":"structure", + "members":{ + "AgentStatus":{ + "shape":"AgentStatus", + "documentation":"

The agent status.

" + } + } + }, "DescribeContactFlowRequest":{ "type":"structure", "required":[ @@ -3922,7 +4292,7 @@ "documentation":"

The name for the hours of operation.

" }, "Description":{ - "shape":"CommonDescriptionLength250", + "shape":"HoursOfOperationDescription", "documentation":"

The description for the hours of operation.

" }, "TimeZone":{ @@ -3942,6 +4312,11 @@ }, "HoursOfOperationConfig":{ "type":"structure", + "required":[ + "Day", + "StartTime", + "EndTime" + ], "members":{ "Day":{ "shape":"HoursOfOperationDays", @@ -3960,7 +4335,9 @@ }, "HoursOfOperationConfigList":{ "type":"list", - "member":{"shape":"HoursOfOperationConfig"} + "member":{"shape":"HoursOfOperationConfig"}, + "max":100, + "min":0 }, "HoursOfOperationDays":{ "type":"string", @@ -3974,6 +4351,11 @@ "SATURDAY" ] }, + "HoursOfOperationDescription":{ + "type":"string", + "max":250, + "min":1 + }, "HoursOfOperationId":{"type":"string"}, "HoursOfOperationName":{"type":"string"}, "HoursOfOperationSummary":{ @@ -4000,14 +4382,20 @@ }, "HoursOfOperationTimeSlice":{ "type":"structure", + "required":[ + "Hours", + "Minutes" + ], "members":{ "Hours":{ "shape":"Hours24Format", - "documentation":"

The hours.

" + "documentation":"

The hours.

", + "box":true }, "Minutes":{ "shape":"MinutesLimit60", - "documentation":"

The minutes.

" + "documentation":"

The minutes.

", + "box":true } }, "documentation":"

The start time or end time for an hours of operation.

" @@ -4406,6 +4794,50 @@ "error":{"httpStatusCode":429}, "exception":true }, + "ListAgentStatusRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult1000", + "documentation":"

The maximum number of results to return per page.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + }, + "AgentStatusTypes":{ + "shape":"AgentStatusTypes", + "documentation":"

Available agent status types.

", + "location":"querystring", + "locationName":"AgentStatusTypes" + } + } + }, + "ListAgentStatusResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + }, + "AgentStatusSummaryList":{ + "shape":"AgentStatusSummaryList", + "documentation":"

A summary of agent statuses.

" + } + } + }, "ListApprovedOriginsRequest":{ "type":"structure", "required":["InstanceId"], @@ -6844,6 +7276,53 @@ } } }, + "UpdateAgentStatusDescription":{ + "type":"string", + "max":250, + "min":0 + }, + "UpdateAgentStatusRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "AgentStatusId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "AgentStatusId":{ + "shape":"AgentStatusId", + "documentation":"

The identifier of the agent status.

", + "location":"uri", + "locationName":"AgentStatusId" + }, + "Name":{ + "shape":"AgentStatusName", + "documentation":"

The name of the agent status.

" + }, + "Description":{ + "shape":"UpdateAgentStatusDescription", + "documentation":"

The description of the agent status.

" + }, + "State":{ + "shape":"AgentStatusState", + "documentation":"

The state of the agent status.

" + }, + "DisplayOrder":{ + "shape":"AgentStatusOrderNumber", + "documentation":"

The display order of the agent status.

", + "box":true + }, + "ResetOrderNumber":{ + "shape":"Boolean", + "documentation":"

A number indicating the reset order of the agent status.

" + } + } + }, "UpdateContactAttributesRequest":{ "type":"structure", "required":[ @@ -6926,6 +7405,48 @@ } } }, + "UpdateHoursOfOperationDescription":{ + "type":"string", + "max":250, + "min":0 + }, + "UpdateHoursOfOperationRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "documentation":"

The identifier of the hours of operation.

", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "Name":{ + "shape":"CommonNameLength127", + "documentation":"

The name of the hours of operation.

" + }, + "Description":{ + "shape":"UpdateHoursOfOperationDescription", + "documentation":"

The description of the hours of operation.

" + }, + "TimeZone":{ + "shape":"TimeZone", + "documentation":"

The time zone of the hours of operation.

" + }, + "Config":{ + "shape":"HoursOfOperationConfigList", + "documentation":"

Configuration information of the hours of operation.

" + } + } + }, "UpdateInstanceAttributeRequest":{ "type":"structure", "required":[ diff --git a/services/connectcontactlens/pom.xml b/services/connectcontactlens/pom.xml index 3ba26c619843..821c4e1847e7 100644 --- a/services/connectcontactlens/pom.xml +++ b/services/connectcontactlens/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT connectcontactlens AWS Java SDK :: Services :: Connect Contact Lens diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml index d25c16af5cf8..2083be507010 100644 --- a/services/connectparticipant/pom.xml +++ b/services/connectparticipant/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT connectparticipant AWS Java SDK :: Services :: ConnectParticipant diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index 8b3debc49f4d..7b82a6b38878 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT costandusagereport AWS Java SDK :: Services :: AWS Cost and Usage Report diff --git a/services/costexplorer/pom.xml b/services/costexplorer/pom.xml index b0112b2057f5..6eb693fc8a65 100644 --- a/services/costexplorer/pom.xml +++ b/services/costexplorer/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 costexplorer diff --git a/services/customerprofiles/pom.xml b/services/customerprofiles/pom.xml index d62498eec146..f490f0f07044 100644 --- a/services/customerprofiles/pom.xml +++ b/services/customerprofiles/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT customerprofiles AWS Java SDK :: Services :: Customer Profiles diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index 5e6343942a81..31f1ac726a3a 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT databasemigration AWS Java SDK :: Services :: AWS Database Migration Service diff --git a/services/databasemigration/src/main/resources/codegen-resources/service-2.json b/services/databasemigration/src/main/resources/codegen-resources/service-2.json index ce38472e84e9..7bcd6f601c29 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/service-2.json +++ b/services/databasemigration/src/main/resources/codegen-resources/service-2.json @@ -23,7 +23,7 @@ "errors":[ {"shape":"ResourceNotFoundFault"} ], - "documentation":"

Adds metadata tags to an AWS DMS resource, including replication instance, endpoint, security group, and migration task. These tags can also be used with cost allocation reporting to track cost associated with DMS resources, or used in a Condition statement in an IAM policy for DMS. For more information, see Tag data type description.

" + "documentation":"

Adds metadata tags to an DMS resource, including replication instance, endpoint, security group, and migration task. These tags can also be used with cost allocation reporting to track cost associated with DMS resources, or used in a Condition statement in an IAM policy for DMS. For more information, see Tag data type description.

" }, "ApplyPendingMaintenanceAction":{ "name":"ApplyPendingMaintenanceAction", @@ -70,7 +70,7 @@ {"shape":"AccessDeniedFault"}, {"shape":"S3AccessDeniedFault"} ], - "documentation":"

Creates an endpoint using the provided settings.

" + "documentation":"

Creates an endpoint using the provided settings.

For a MySQL source or target endpoint, don't explicitly specify the database using the DatabaseName request parameter on the CreateEndpoint API call. Specifying DatabaseName when you create a MySQL endpoint replicates all the task tables to this single database. For MySQL endpoints, you specify the database only when you specify the schema in the table-mapping rules of the DMS task.

" }, "CreateEventSubscription":{ "name":"CreateEventSubscription", @@ -92,7 +92,7 @@ {"shape":"KMSNotFoundFault"}, {"shape":"KMSThrottlingFault"} ], - "documentation":"

Creates an AWS DMS event notification subscription.

You can specify the type of source (SourceType) you want to be notified of, provide a list of AWS DMS source IDs (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. If you specify both the SourceType and SourceIds, such as SourceType = replication-instance and SourceIdentifier = my-replinstance, you will be notified of all the replication instance events for the specified source. If you specify a SourceType but don't specify a SourceIdentifier, you receive notice of the events for that source type for all your AWS DMS sources. If you don't specify either SourceType nor SourceIdentifier, you will be notified of events generated from all AWS DMS sources belonging to your customer account.

For more information about AWS DMS events, see Working with Events and Notifications in the AWS Database Migration Service User Guide.

" + "documentation":"

Creates an DMS event notification subscription.

You can specify the type of source (SourceType) you want to be notified of, provide a list of DMS source IDs (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. If you specify both the SourceType and SourceIds, such as SourceType = replication-instance and SourceIdentifier = my-replinstance, you will be notified of all the replication instance events for the specified source. If you specify a SourceType but don't specify a SourceIdentifier, you receive notice of the events for that source type for all your DMS sources. If you don't specify either SourceType nor SourceIdentifier, you will be notified of events generated from all DMS sources belonging to your customer account.

For more information about DMS events, see Working with Events and Notifications in the Database Migration Service User Guide.

" }, "CreateReplicationInstance":{ "name":"CreateReplicationInstance", @@ -114,7 +114,7 @@ {"shape":"InvalidSubnet"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Creates the replication instance using the specified parameters.

AWS DMS requires that your account have certain roles with appropriate permissions before you can create a replication instance. For information on the required roles, see Creating the IAM Roles to Use With the AWS CLI and AWS DMS API. For information on the required permissions, see IAM Permissions Needed to Use AWS DMS.

" + "documentation":"

Creates the replication instance using the specified parameters.

DMS requires that your account have certain roles with appropriate permissions before you can create a replication instance. For information on the required roles, see Creating the IAM Roles to Use With the CLI and DMS API. For information on the required permissions, see IAM Permissions Needed to Use DMS.

" }, "CreateReplicationSubnetGroup":{ "name":"CreateReplicationSubnetGroup", @@ -207,7 +207,7 @@ {"shape":"ResourceNotFoundFault"}, {"shape":"InvalidResourceStateFault"} ], - "documentation":"

Deletes an AWS DMS event subscription.

" + "documentation":"

Deletes an DMS event subscription.

" }, "DeleteReplicationInstance":{ "name":"DeleteReplicationInstance", @@ -264,7 +264,7 @@ {"shape":"ResourceNotFoundFault"}, {"shape":"InvalidResourceStateFault"} ], - "documentation":"

Deletes the record of a single premigration assessment run.

This operation removes all metadata that AWS DMS maintains about this assessment run. However, the operation leaves untouched all information about this assessment run that is stored in your Amazon S3 bucket.

" + "documentation":"

Deletes the record of a single premigration assessment run.

This operation removes all metadata that DMS maintains about this assessment run. However, the operation leaves untouched all information about this assessment run that is stored in your Amazon S3 bucket.

" }, "DescribeAccountAttributes":{ "name":"DescribeAccountAttributes", @@ -274,7 +274,7 @@ }, "input":{"shape":"DescribeAccountAttributesMessage"}, "output":{"shape":"DescribeAccountAttributesResponse"}, - "documentation":"

Lists all of the AWS DMS attributes for a customer account. These attributes include AWS DMS quotas for the account and a unique account identifier in a particular DMS region. DMS quotas include a list of resource quotas supported by the account, such as the number of replication instances allowed. The description for each resource quota, includes the quota name, current usage toward that quota, and the quota's maximum value. DMS uses the unique account identifier to name each artifact used by DMS in the given region.

This command does not take any parameters.

" + "documentation":"

Lists all of the DMS attributes for a customer account. These attributes include DMS quotas for the account and a unique account identifier in a particular DMS region. DMS quotas include a list of resource quotas supported by the account, such as the number of replication instances allowed. The description for each resource quota, includes the quota name, current usage toward that quota, and the quota's maximum value. DMS uses the unique account identifier to name each artifact used by DMS in the given region.

This command does not take any parameters.

" }, "DescribeApplicableIndividualAssessments":{ "name":"DescribeApplicableIndividualAssessments", @@ -358,7 +358,7 @@ }, "input":{"shape":"DescribeEventCategoriesMessage"}, "output":{"shape":"DescribeEventCategoriesResponse"}, - "documentation":"

Lists categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in Working with Events and Notifications in the AWS Database Migration Service User Guide.

" + "documentation":"

Lists categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in Working with Events and Notifications in the Database Migration Service User Guide.

" }, "DescribeEventSubscriptions":{ "name":"DescribeEventSubscriptions", @@ -381,7 +381,7 @@ }, "input":{"shape":"DescribeEventsMessage"}, "output":{"shape":"DescribeEventsResponse"}, - "documentation":"

Lists events for a given source identifier and source type. You can also specify a start and end time. For more information on AWS DMS events, see Working with Events and Notifications in the AWS Database Migration User Guide.

" + "documentation":"

Lists events for a given source identifier and source type. You can also specify a start and end time. For more information on DMS events, see Working with Events and Notifications in the Database Migration Service User Guide.

" }, "DescribeOrderableReplicationInstances":{ "name":"DescribeOrderableReplicationInstances", @@ -471,7 +471,7 @@ "errors":[ {"shape":"ResourceNotFoundFault"} ], - "documentation":"

Returns the task assessment results from Amazon S3. This action always returns the latest results.

" + "documentation":"

Returns the task assessment results from the Amazon S3 bucket that DMS creates in your account. This action always returns the latest results.

For more information about DMS task assessments, see Creating a task assessment report in the Database Migration Service User Guide.

" }, "DescribeReplicationTaskAssessmentRuns":{ "name":"DescribeReplicationTaskAssessmentRuns", @@ -538,7 +538,7 @@ {"shape":"ResourceNotFoundFault"}, {"shape":"InvalidResourceStateFault"} ], - "documentation":"

Returns table statistics on the database migration task, including table name, rows inserted, rows updated, and rows deleted.

Note that the \"last updated\" column the DMS console only indicates the time that AWS DMS last updated the table statistics record for a table. It does not indicate the time of the last update to the table.

" + "documentation":"

Returns table statistics on the database migration task, including table name, rows inserted, rows updated, and rows deleted.

Note that the \"last updated\" column the DMS console only indicates the time that DMS last updated the table statistics record for a table. It does not indicate the time of the last update to the table.

" }, "ImportCertificate":{ "name":"ImportCertificate", @@ -566,7 +566,7 @@ "errors":[ {"shape":"ResourceNotFoundFault"} ], - "documentation":"

Lists all metadata tags attached to an AWS DMS resource, including replication instance, endpoint, security group, and migration task. For more information, see Tag data type description.

" + "documentation":"

Lists all metadata tags attached to an DMS resource, including replication instance, endpoint, security group, and migration task. For more information, see Tag data type description.

" }, "ModifyEndpoint":{ "name":"ModifyEndpoint", @@ -583,7 +583,7 @@ {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"AccessDeniedFault"} ], - "documentation":"

Modifies the specified endpoint.

" + "documentation":"

Modifies the specified endpoint.

For a MySQL source or target endpoint, don't explicitly specify the database using the DatabaseName request parameter on the ModifyEndpoint API call. Specifying DatabaseName when you modify a MySQL endpoint replicates all the task tables to this single database. For MySQL endpoints, you specify the database only when you specify the schema in the table-mapping rules of the DMS task.

" }, "ModifyEventSubscription":{ "name":"ModifyEventSubscription", @@ -604,7 +604,7 @@ {"shape":"KMSNotFoundFault"}, {"shape":"KMSThrottlingFault"} ], - "documentation":"

Modifies an existing AWS DMS event notification subscription.

" + "documentation":"

Modifies an existing DMS event notification subscription.

" }, "ModifyReplicationInstance":{ "name":"ModifyReplicationInstance", @@ -657,7 +657,7 @@ {"shape":"ResourceAlreadyExistsFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Modifies the specified replication task.

You can't modify the task endpoints. The task must be stopped before you can modify it.

For more information about AWS DMS tasks, see Working with Migration Tasks in the AWS Database Migration Service User Guide.

" + "documentation":"

Modifies the specified replication task.

You can't modify the task endpoints. The task must be stopped before you can modify it.

For more information about DMS tasks, see Working with Migration Tasks in the Database Migration Service User Guide.

" }, "MoveReplicationTask":{ "name":"MoveReplicationTask", @@ -673,7 +673,7 @@ {"shape":"ResourceNotFoundFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Moves a replication task from its current replication instance to a different target replication instance using the specified parameters. The target replication instance must be created with the same or later AWS DMS version as the current replication instance.

" + "documentation":"

Moves a replication task from its current replication instance to a different target replication instance using the specified parameters. The target replication instance must be created with the same or later DMS version as the current replication instance.

" }, "RebootReplicationInstance":{ "name":"RebootReplicationInstance", @@ -730,7 +730,7 @@ "errors":[ {"shape":"ResourceNotFoundFault"} ], - "documentation":"

Removes metadata tags from an AWS DMS resource, including replication instance, endpoint, security group, and migration task. For more information, see Tag data type description.

" + "documentation":"

Removes metadata tags from an DMS resource, including replication instance, endpoint, security group, and migration task. For more information, see Tag data type description.

" }, "StartReplicationTask":{ "name":"StartReplicationTask", @@ -745,7 +745,7 @@ {"shape":"InvalidResourceStateFault"}, {"shape":"AccessDeniedFault"} ], - "documentation":"

Starts the replication task.

For more information about AWS DMS tasks, see Working with Migration Tasks in the AWS Database Migration Service User Guide.

" + "documentation":"

Starts the replication task.

For more information about DMS tasks, see Working with Migration Tasks in the Database Migration Service User Guide.

" }, "StartReplicationTaskAssessment":{ "name":"StartReplicationTaskAssessment", @@ -826,7 +826,7 @@ "documentation":"

" } }, - "documentation":"

AWS DMS was denied access to the endpoint. Check that the role is correctly configured.

", + "documentation":"

DMS was denied access to the endpoint. Check that the role is correctly configured.

", "exception":true }, "AccountQuota":{ @@ -834,7 +834,7 @@ "members":{ "AccountQuotaName":{ "shape":"String", - "documentation":"

The name of the AWS DMS quota for this AWS account.

" + "documentation":"

The name of the DMS quota for this account.

" }, "Used":{ "shape":"Long", @@ -845,7 +845,7 @@ "documentation":"

The maximum allowed value for the quota.

" } }, - "documentation":"

Describes a quota for an AWS account, for example, the number of replication instances allowed.

" + "documentation":"

Describes a quota for an account, for example the number of replication instances allowed.

" }, "AccountQuotaList":{ "type":"list", @@ -860,14 +860,14 @@ "members":{ "ResourceArn":{ "shape":"String", - "documentation":"

Identifies the AWS DMS resource to which tags should be added. The value for this parameter is an Amazon Resource Name (ARN).

For AWS DMS, you can tag a replication instance, an endpoint, or a replication task.

" + "documentation":"

Identifies the DMS resource to which tags should be added. The value for this parameter is an Amazon Resource Name (ARN).

For DMS, you can tag a replication instance, an endpoint, or a replication task.

" }, "Tags":{ "shape":"TagList", "documentation":"

One or more tags to be assigned to the resource.

" } }, - "documentation":"

Associates a set of tags with an AWS DMS resource.

" + "documentation":"

Associates a set of tags with an DMS resource.

" }, "AddTagsToResourceResponse":{ "type":"structure", @@ -885,7 +885,7 @@ "members":{ "ReplicationInstanceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the AWS DMS resource that the pending maintenance action applies to.

" + "documentation":"

The Amazon Resource Name (ARN) of the DMS resource that the pending maintenance action applies to.

" }, "ApplyAction":{ "shape":"String", @@ -903,7 +903,7 @@ "members":{ "ResourcePendingMaintenanceActions":{ "shape":"ResourcePendingMaintenanceActions", - "documentation":"

The AWS DMS resource that the pending maintenance action will be applied to.

" + "documentation":"

The DMS resource that the pending maintenance action will be applied to.

" } }, "documentation":"

" @@ -931,7 +931,7 @@ "documentation":"

The name of the Availability Zone.

" } }, - "documentation":"

The name of an Availability Zone for use during database migration. AvailabilityZone is an optional parameter to the CreateReplicationInstance operation, and it’s value relates to the AWS Region of an endpoint. For example, the availability zone of an endpoint in the us-east-1 region might be us-east-1a, us-east-1b, us-east-1c, or us-east-1d.

" + "documentation":"

The name of an Availability Zone for use during database migration. AvailabilityZone is an optional parameter to the CreateReplicationInstance operation, and it’s value relates to the Region of an endpoint. For example, the availability zone of an endpoint in the us-east-1 region might be us-east-1a, us-east-1b, us-east-1c, or us-east-1d.

" }, "AvailabilityZonesList":{ "type":"list", @@ -1098,15 +1098,15 @@ }, "DatabaseName":{ "shape":"String", - "documentation":"

The name of the endpoint database.

" + "documentation":"

The name of the endpoint database. For a MySQL source or target endpoint, do not specify DatabaseName.

" }, "ExtraConnectionAttributes":{ "shape":"String", - "documentation":"

Additional attributes associated with the connection. Each attribute is specified as a name-value pair associated by an equal sign (=). Multiple attributes are separated by a semicolon (;) with no additional white space. For information on the attributes available for connecting your source or target endpoint, see Working with AWS DMS Endpoints in the AWS Database Migration Service User Guide.

" + "documentation":"

Additional attributes associated with the connection. Each attribute is specified as a name-value pair associated by an equal sign (=). Multiple attributes are separated by a semicolon (;) with no additional white space. For information on the attributes available for connecting your source or target endpoint, see Working with DMS Endpoints in the Database Migration Service User Guide.

" }, "KmsKeyId":{ "shape":"String", - "documentation":"

An AWS KMS key identifier that is used to encrypt the connection parameters for the endpoint.

If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

" + "documentation":"

An KMS key identifier that is used to encrypt the connection parameters for the endpoint.

If you don't specify a value for the KmsKeyId parameter, then DMS uses your default encryption key.

KMS creates the default encryption key for your account. Your account has a different default encryption key for each Region.

" }, "Tags":{ "shape":"TagList", @@ -1122,7 +1122,7 @@ }, "ServiceAccessRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for the service access role that you want to use to create the endpoint.

" + "documentation":"

The Amazon Resource Name (ARN) for the service access role that you want to use to create the endpoint. The role must allow the iam:PassRole action.

" }, "ExternalTableDefinition":{ "shape":"String", @@ -1130,64 +1130,64 @@ }, "DynamoDbSettings":{ "shape":"DynamoDbSettings", - "documentation":"

Settings in JSON format for the target Amazon DynamoDB endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to DynamoDB in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the target Amazon DynamoDB endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to DynamoDB in the Database Migration Service User Guide.

" }, "S3Settings":{ "shape":"S3Settings", - "documentation":"

Settings in JSON format for the target Amazon S3 endpoint. For more information about the available settings, see Extra Connection Attributes When Using Amazon S3 as a Target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the target Amazon S3 endpoint. For more information about the available settings, see Extra Connection Attributes When Using Amazon S3 as a Target for DMS in the Database Migration Service User Guide.

" }, "DmsTransferSettings":{ "shape":"DmsTransferSettings", - "documentation":"

The settings in JSON format for the DMS transfer type of source endpoint.

Possible settings include the following:

  • ServiceAccessRoleArn - The IAM role that has permission to access the Amazon S3 bucket.

  • BucketName - The name of the S3 bucket to use.

  • CompressionType - An optional parameter to use GZIP to compress the target files. To use GZIP, set this value to NONE (the default). To keep the files uncompressed, don't use this value.

Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string

JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }

" + "documentation":"

The settings in JSON format for the DMS transfer type of source endpoint.

Possible settings include the following:

  • ServiceAccessRoleArn - The IAM role that has permission to access the Amazon S3 bucket. The role must allow the iam:PassRole action.

  • BucketName - The name of the S3 bucket to use.

Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string

JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", }

" }, "MongoDbSettings":{ "shape":"MongoDbSettings", - "documentation":"

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see Endpoint configuration settings when using MongoDB as a source for Database Migration Service in the Database Migration Service User Guide.

" }, "KinesisSettings":{ "shape":"KinesisSettings", - "documentation":"

Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using object mapping to migrate data to a Kinesis data stream in the Database Migration Service User Guide.

" }, "KafkaSettings":{ "shape":"KafkaSettings", - "documentation":"

Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using object mapping to migrate data to a Kafka topic in the Database Migration Service User Guide.

" }, "ElasticsearchSettings":{ "shape":"ElasticsearchSettings", - "documentation":"

Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for DMS in the Database Migration Service User Guide.

" }, "NeptuneSettings":{ "shape":"NeptuneSettings", - "documentation":"

Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying Endpoint Settings for Amazon Neptune as a Target in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying graph-mapping rules using Gremlin and R2RML for Amazon Neptune as a target in the Database Migration Service User Guide.

" }, "RedshiftSettings":{"shape":"RedshiftSettings"}, "PostgreSQLSettings":{ "shape":"PostgreSQLSettings", - "documentation":"

Settings in JSON format for the source and target PostgreSQL endpoint. For information about other available settings, see Extra connection attributes when using PostgreSQL as a source for AWS DMS and Extra connection attributes when using PostgreSQL as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source and target PostgreSQL endpoint. For information about other available settings, see Extra connection attributes when using PostgreSQL as a source for DMS and Extra connection attributes when using PostgreSQL as a target for DMS in the Database Migration Service User Guide.

" }, "MySQLSettings":{ "shape":"MySQLSettings", - "documentation":"

Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for AWS DMS and Extra connection attributes when using a MySQL-compatible database as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for DMS and Extra connection attributes when using a MySQL-compatible database as a target for DMS in the Database Migration Service User Guide.

" }, "OracleSettings":{ "shape":"OracleSettings", - "documentation":"

Settings in JSON format for the source and target Oracle endpoint. For information about other available settings, see Extra connection attributes when using Oracle as a source for AWS DMS and Extra connection attributes when using Oracle as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source and target Oracle endpoint. For information about other available settings, see Extra connection attributes when using Oracle as a source for DMS and Extra connection attributes when using Oracle as a target for DMS in the Database Migration Service User Guide.

" }, "SybaseSettings":{ "shape":"SybaseSettings", - "documentation":"

Settings in JSON format for the source and target SAP ASE endpoint. For information about other available settings, see Extra connection attributes when using SAP ASE as a source for AWS DMS and Extra connection attributes when using SAP ASE as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source and target SAP ASE endpoint. For information about other available settings, see Extra connection attributes when using SAP ASE as a source for DMS and Extra connection attributes when using SAP ASE as a target for DMS in the Database Migration Service User Guide.

" }, "MicrosoftSQLServerSettings":{ "shape":"MicrosoftSQLServerSettings", - "documentation":"

Settings in JSON format for the source and target Microsoft SQL Server endpoint. For information about other available settings, see Extra connection attributes when using SQL Server as a source for AWS DMS and Extra connection attributes when using SQL Server as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source and target Microsoft SQL Server endpoint. For information about other available settings, see Extra connection attributes when using SQL Server as a source for DMS and Extra connection attributes when using SQL Server as a target for DMS in the Database Migration Service User Guide.

" }, "IBMDb2Settings":{ "shape":"IBMDb2Settings", - "documentation":"

Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for DMS in the Database Migration Service User Guide.

" }, "ResourceIdentifier":{ "shape":"String", - "documentation":"

A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, AWS DMS generates a default identifier value for the end of EndpointArn.

" + "documentation":"

A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, DMS generates a default identifier value for the end of EndpointArn.

" }, "DocDbSettings":{"shape":"DocDbSettings"} }, @@ -1212,7 +1212,7 @@ "members":{ "SubscriptionName":{ "shape":"String", - "documentation":"

The name of the AWS DMS event notification subscription. This name must be less than 255 characters.

" + "documentation":"

The name of the DMS event notification subscription. This name must be less than 255 characters.

" }, "SnsTopicArn":{ "shape":"String", @@ -1220,15 +1220,15 @@ }, "SourceType":{ "shape":"String", - "documentation":"

The type of AWS DMS resource that generates the events. For example, if you want to be notified of events generated by a replication instance, you set this parameter to replication-instance. If this value isn't specified, all events are returned.

Valid values: replication-instance | replication-task

" + "documentation":"

The type of DMS resource that generates the events. For example, if you want to be notified of events generated by a replication instance, you set this parameter to replication-instance. If this value isn't specified, all events are returned.

Valid values: replication-instance | replication-task

" }, "EventCategories":{ "shape":"EventCategoriesList", - "documentation":"

A list of event categories for a source type that you want to subscribe to. For more information, see Working with Events and Notifications in the AWS Database Migration Service User Guide.

" + "documentation":"

A list of event categories for a source type that you want to subscribe to. For more information, see Working with Events and Notifications in the Database Migration Service User Guide.

" }, "SourceIds":{ "shape":"SourceIdsList", - "documentation":"

A list of identifiers for which AWS DMS provides notification events.

If you don't specify a value, notifications are provided for all sources.

If you specify multiple values, they must be of the same type. For example, if you specify a database instance ID, then all of the other values must be database instance IDs.

" + "documentation":"

A list of identifiers for which DMS provides notification events.

If you don't specify a value, notifications are provided for all sources.

If you specify multiple values, they must be of the same type. For example, if you specify a database instance ID, then all of the other values must be database instance IDs.

" }, "Enabled":{ "shape":"BooleanOptional", @@ -1268,7 +1268,7 @@ }, "ReplicationInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.

" + "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

" }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", @@ -1276,7 +1276,7 @@ }, "AvailabilityZone":{ "shape":"String", - "documentation":"

The Availability Zone where the replication instance will be created. The default value is a random, system-chosen Availability Zone in the endpoint's AWS Region, for example: us-east-1d

" + "documentation":"

The Availability Zone where the replication instance will be created. The default value is a random, system-chosen Availability Zone in the endpoint's Region, for example: us-east-1d

" }, "ReplicationSubnetGroupIdentifier":{ "shape":"String", @@ -1284,7 +1284,7 @@ }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region, occurring on a random day of the week.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" + "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per Region, occurring on a random day of the week.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" }, "MultiAZ":{ "shape":"BooleanOptional", @@ -1304,7 +1304,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

An AWS KMS key identifier that is used to encrypt the data on the replication instance.

If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

" + "documentation":"

An KMS key identifier that is used to encrypt the data on the replication instance.

If you don't specify a value for the KmsKeyId parameter, then DMS uses your default encryption key.

KMS creates the default encryption key for your account. Your account has a different default encryption key for each Region.

" }, "PubliclyAccessible":{ "shape":"BooleanOptional", @@ -1316,7 +1316,7 @@ }, "ResourceIdentifier":{ "shape":"String", - "documentation":"

A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, AWS DMS generates a default identifier value for the end of EndpointArn.

" + "documentation":"

A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, DMS generates a default identifier value for the end of EndpointArn.

" } }, "documentation":"

" @@ -1401,11 +1401,11 @@ }, "TableMappings":{ "shape":"String", - "documentation":"

The table mappings for the task, in JSON format. For more information, see Using Table Mapping to Specify Task Settings in the AWS Database Migration Service User Guide.

" + "documentation":"

The table mappings for the task, in JSON format. For more information, see Using Table Mapping to Specify Task Settings in the Database Migration Service User Guide.

" }, "ReplicationTaskSettings":{ "shape":"String", - "documentation":"

Overall settings for the task, in JSON format. For more information, see Specifying Task Settings for AWS Database Migration Service Tasks in the AWS Database Migration User Guide.

" + "documentation":"

Overall settings for the task, in JSON format. For more information, see Specifying Task Settings for Database Migration Service Tasks in the Database Migration Service User Guide.

" }, "CdcStartTime":{ "shape":"TStamp", @@ -1413,7 +1413,7 @@ }, "CdcStartPosition":{ "shape":"String", - "documentation":"

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.

" + "documentation":"

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for DMS.

" }, "CdcStopPosition":{ "shape":"String", @@ -1425,11 +1425,11 @@ }, "TaskData":{ "shape":"String", - "documentation":"

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.

" + "documentation":"

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the Database Migration Service User Guide.

" }, "ResourceIdentifier":{ "shape":"String", - "documentation":"

A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, AWS DMS generates a default identifier value for the end of EndpointArn.

" + "documentation":"

A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, DMS generates a default identifier value for the end of EndpointArn.

" } }, "documentation":"

" @@ -1654,7 +1654,7 @@ }, "UniqueAccountIdentifier":{ "shape":"String", - "documentation":"

A unique AWS DMS identifier for an account in a particular AWS Region. The value of this identifier has the following format: c99999999999. DMS uses this identifier to name artifacts. For example, DMS uses this identifier to name the default Amazon S3 bucket for storing task assessment reports in a given AWS Region. The format of this S3 bucket name is the following: dms-AccountNumber-UniqueAccountIdentifier. Here is an example name for this default S3 bucket: dms-111122223333-c44445555666.

AWS DMS supports the UniqueAccountIdentifier parameter in versions 3.1.4 and later.

" + "documentation":"

A unique DMS identifier for an account in a particular Region. The value of this identifier has the following format: c99999999999. DMS uses this identifier to name artifacts. For example, DMS uses this identifier to name the default Amazon S3 bucket for storing task assessment reports in a given Region. The format of this S3 bucket name is the following: dms-AccountNumber-UniqueAccountIdentifier. Here is an example name for this default S3 bucket: dms-111122223333-c44445555666.

DMS supports the UniqueAccountIdentifier parameter in versions 3.1.4 and later.

" } }, "documentation":"

" @@ -1698,7 +1698,7 @@ "members":{ "IndividualAssessmentNames":{ "shape":"IndividualAssessmentNameList", - "documentation":"

List of names for the individual assessments supported by the premigration assessment run that you start based on the specified request parameters. For more information on the available individual assessments, including compatibility with different migration task configurations, see Working with premigration assessment runs in the AWS Database Migration Service User Guide.

" + "documentation":"

List of names for the individual assessments supported by the premigration assessment run that you start based on the specified request parameters. For more information on the available individual assessments, including compatibility with different migration task configurations, see Working with premigration assessment runs in the Database Migration Service User Guide.

" }, "Marker":{ "shape":"String", @@ -1869,7 +1869,7 @@ "members":{ "SourceType":{ "shape":"String", - "documentation":"

The type of AWS DMS resource that generates events.

Valid values: replication-instance | replication-task

" + "documentation":"

The type of DMS resource that generates events.

Valid values: replication-instance | replication-task

" }, "Filters":{ "shape":"FilterList", @@ -1893,7 +1893,7 @@ "members":{ "SubscriptionName":{ "shape":"String", - "documentation":"

The name of the AWS DMS event subscription to be described.

" + "documentation":"

The name of the DMS event subscription to be described.

" }, "Filters":{ "shape":"FilterList", @@ -1933,7 +1933,7 @@ }, "SourceType":{ "shape":"SourceType", - "documentation":"

The type of AWS DMS resource that generates events.

Valid values: replication-instance | replication-task

" + "documentation":"

The type of DMS resource that generates events.

Valid values: replication-instance | replication-task

" }, "StartTime":{ "shape":"TStamp", @@ -2388,7 +2388,7 @@ "members":{ "ServiceAccessRoleArn":{ "shape":"String", - "documentation":"

The IAM role that has permission to access the Amazon S3 bucket.

" + "documentation":"

The IAM role that has permission to access the Amazon S3 bucket. When specified as part of request syntax, such as for the CreateEndpoint and ModifyEndpoint actions, the role must allow the iam:PassRole action.

" }, "BucketName":{ "shape":"String", @@ -2434,11 +2434,11 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

" + "documentation":"

The KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId parameter, then DMS uses your default encryption key. KMS creates the default encryption key for your account. Your account has a different default encryption key for each Region.

" }, "SecretsManagerAccessRoleArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets Manager secret that allows access to the DocumentDB endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret has the value of the Amazon Web Services Secrets Manager secret that allows access to the DocumentDB endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

" }, "SecretsManagerSecretId":{ "shape":"String", @@ -2453,10 +2453,10 @@ "members":{ "ServiceAccessRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) used by the service access IAM role.

" + "documentation":"

The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the iam:PassRole action.

" } }, - "documentation":"

Provides the Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role used to define an Amazon DynamoDB target endpoint.

" + "documentation":"

Provides the Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role used to define an Amazon DynamoDB target endpoint.

" }, "ElasticsearchSettings":{ "type":"structure", @@ -2467,11 +2467,11 @@ "members":{ "ServiceAccessRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) used by service to access the IAM role.

" + "documentation":"

The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the iam:PassRole action.

" }, "EndpointUri":{ "shape":"String", - "documentation":"

The endpoint for the Elasticsearch cluster. AWS DMS uses HTTPS if a transport protocol (http/https) is not specified.

" + "documentation":"

The endpoint for the Elasticsearch cluster. DMS uses HTTPS if a transport protocol (http/https) is not specified.

" }, "FullLoadErrorPercentage":{ "shape":"IntegerOptional", @@ -2544,7 +2544,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

An AWS KMS key identifier that is used to encrypt the connection parameters for the endpoint.

If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

" + "documentation":"

An KMS key identifier that is used to encrypt the connection parameters for the endpoint.

If you don't specify a value for the KmsKeyId parameter, then DMS uses your default encryption key.

KMS creates the default encryption key for your account. Your account has a different default encryption key for each Region.

" }, "EndpointArn":{ "shape":"String", @@ -2560,7 +2560,7 @@ }, "ServiceAccessRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) used by the service access IAM role.

" + "documentation":"

The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the iam:PassRole action.

" }, "ExternalTableDefinition":{ "shape":"String", @@ -2580,7 +2580,7 @@ }, "DmsTransferSettings":{ "shape":"DmsTransferSettings", - "documentation":"

The settings in JSON format for the DMS transfer type of source endpoint.

Possible settings include the following:

  • ServiceAccessRoleArn - The IAM role that has permission to access the Amazon S3 bucket.

  • BucketName - The name of the S3 bucket to use.

  • CompressionType - An optional parameter to use GZIP to compress the target files. To use GZIP, set this value to NONE (the default). To keep the files uncompressed, don't use this value.

Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string

JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }

" + "documentation":"

The settings in JSON format for the DMS transfer type of source endpoint.

Possible settings include the following:

  • ServiceAccessRoleArn - The IAM role that has permission to access the Amazon S3 bucket. The role must allow the iam:PassRole action.

  • BucketName - The name of the S3 bucket to use.

Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,

JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\"}

" }, "MongoDbSettings":{ "shape":"MongoDbSettings", @@ -2632,7 +2632,7 @@ }, "DocDbSettings":{"shape":"DocDbSettings"} }, - "documentation":"

Describes an endpoint of a database instance in response to operations such as the following:

  • CreateEndpoint

  • DescribeEndpoint

  • DescribeEndpointTypes

  • ModifyEndpoint

" + "documentation":"

Describes an endpoint of a database instance in response to operations such as the following:

  • CreateEndpoint

  • DescribeEndpoint

  • ModifyEndpoint

" }, "EndpointList":{ "type":"list", @@ -2672,6 +2672,10 @@ "IntValueMax":{ "shape":"IntegerOptional", "documentation":"

The maximum value of an endpoint setting that is of type int.

" + }, + "DefaultValue":{ + "shape":"String", + "documentation":"

The default value of the endpoint setting if no value is specified using CreateEndpoint or ModifyEndpoint.

" } }, "documentation":"

Endpoint settings.

" @@ -2702,7 +2706,7 @@ }, "SourceType":{ "shape":"SourceType", - "documentation":"

The type of AWS DMS resource that generates events.

Valid values: replication-instance | endpoint | replication-task

" + "documentation":"

The type of DMS resource that generates events.

Valid values: replication-instance | endpoint | replication-task

" }, "Message":{ "shape":"String", @@ -2717,7 +2721,7 @@ "documentation":"

The date of the event.

" } }, - "documentation":"

Describes an identifiable significant activity that affects a replication instance or task. This object can provide the message, the available event categories, the date and source of the event, and the AWS DMS resource type.

" + "documentation":"

Describes an identifiable significant activity that affects a replication instance or task. This object can provide the message, the available event categories, the date and source of the event, and the DMS resource type.

" }, "EventCategoriesList":{ "type":"list", @@ -2728,14 +2732,14 @@ "members":{ "SourceType":{ "shape":"String", - "documentation":"

The type of AWS DMS resource that generates events.

Valid values: replication-instance | replication-server | security-group | replication-task

" + "documentation":"

The type of DMS resource that generates events.

Valid values: replication-instance | replication-server | security-group | replication-task

" }, "EventCategories":{ "shape":"EventCategoriesList", "documentation":"

A list of event categories from a source type that you've chosen.

" } }, - "documentation":"

Lists categories of events subscribed to, and generated by, the applicable AWS DMS resource type. This data type appears in response to the DescribeEventCategories action.

" + "documentation":"

Lists categories of events subscribed to, and generated by, the applicable DMS resource type. This data type appears in response to the DescribeEventCategories action.

" }, "EventCategoryGroupList":{ "type":"list", @@ -2750,27 +2754,27 @@ "members":{ "CustomerAwsId":{ "shape":"String", - "documentation":"

The AWS customer account associated with the AWS DMS event notification subscription.

" + "documentation":"

The Amazon Web Services customer account associated with the DMS event notification subscription.

" }, "CustSubscriptionId":{ "shape":"String", - "documentation":"

The AWS DMS event notification subscription Id.

" + "documentation":"

The DMS event notification subscription Id.

" }, "SnsTopicArn":{ "shape":"String", - "documentation":"

The topic ARN of the AWS DMS event notification subscription.

" + "documentation":"

The topic ARN of the DMS event notification subscription.

" }, "Status":{ "shape":"String", - "documentation":"

The status of the AWS DMS event notification subscription.

Constraints:

Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

The status \"no-permission\" indicates that AWS DMS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

" + "documentation":"

The status of the DMS event notification subscription.

Constraints:

Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

The status \"no-permission\" indicates that DMS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

" }, "SubscriptionCreationTime":{ "shape":"String", - "documentation":"

The time the AWS DMS event notification subscription was created.

" + "documentation":"

The time the DMS event notification subscription was created.

" }, "SourceType":{ "shape":"String", - "documentation":"

The type of AWS DMS resource that generates events.

Valid values: replication-instance | replication-server | security-group | replication-task

" + "documentation":"

The type of DMS resource that generates events.

Valid values: replication-instance | replication-server | security-group | replication-task

" }, "SourceIdsList":{ "shape":"SourceIdsList", @@ -2812,7 +2816,7 @@ "documentation":"

The filter value, which can specify one or more values used to narrow the returned results.

" } }, - "documentation":"

Identifies the name and value of a filter object. This filter is used to limit the number and type of AWS DMS objects that are returned for a particular Describe* call or similar operation. Filters are used as an optional parameter for certain API operations.

" + "documentation":"

Identifies the name and value of a filter object. This filter is used to limit the number and type of DMS objects that are returned for a particular Describe* call or similar operation. Filters are used as an optional parameter for certain API operations.

" }, "FilterList":{ "type":"list", @@ -2835,7 +2839,7 @@ }, "Port":{ "shape":"IntegerOptional", - "documentation":"

Endpoint TCP port.

" + "documentation":"

Endpoint TCP port. The default value is 50000.

" }, "ServerName":{ "shape":"String", @@ -2859,7 +2863,7 @@ }, "SecretsManagerAccessRoleArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets Manager secret that allows access to the Db2 LUW endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret has the value of the Amazon Web Services Secrets Manager secret that allows access to the Db2 LUW endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

" }, "SecretsManagerSecretId":{ "shape":"String", @@ -2882,7 +2886,7 @@ }, "CertificateWallet":{ "shape":"CertificateWallet", - "documentation":"

The location of an imported Oracle Wallet certificate for use with SSL.

" + "documentation":"

The location of an imported Oracle Wallet certificate for use with SSL. Provide the name of a .sso file using the fileb:// prefix. You can't provide the certificate inline.

" }, "Tags":{ "shape":"TagList", @@ -2971,7 +2975,7 @@ "members":{ "message":{"shape":"ExceptionMessage"} }, - "documentation":"

An AWS Key Management Service (AWS KMS) error is preventing access to AWS KMS.

", + "documentation":"

An Key Management Service (KMS) error is preventing access to KMS.

", "exception":true }, "KMSInvalidStateFault":{ @@ -2979,7 +2983,7 @@ "members":{ "message":{"shape":"ExceptionMessage"} }, - "documentation":"

The state of the specified AWS KMS resource isn't valid for this request.

", + "documentation":"

The state of the specified KMS resource isn't valid for this request.

", "exception":true }, "KMSKeyNotAccessibleFault":{ @@ -2990,7 +2994,7 @@ "documentation":"

" } }, - "documentation":"

AWS DMS cannot access the AWS KMS key.

", + "documentation":"

DMS cannot access the KMS key.

", "exception":true }, "KMSNotFoundFault":{ @@ -2998,7 +3002,7 @@ "members":{ "message":{"shape":"ExceptionMessage"} }, - "documentation":"

The specified AWS KMS entity or resource can't be found.

", + "documentation":"

The specified KMS entity or resource can't be found.

", "exception":true }, "KMSThrottlingFault":{ @@ -3006,7 +3010,7 @@ "members":{ "message":{"shape":"ExceptionMessage"} }, - "documentation":"

This request triggered AWS KMS request throttling.

", + "documentation":"

This request triggered KMS request throttling.

", "exception":true }, "KafkaSecurityProtocol":{ @@ -3023,11 +3027,11 @@ "members":{ "Broker":{ "shape":"String", - "documentation":"

A comma-separated list of one or more broker locations in your Kafka cluster that host your Kafka instance. Specify each broker location in the form broker-hostname-or-ip:port . For example, \"ec2-12-345-678-901.compute-1.amazonaws.com:2345\". For more information and examples of specifying a list of broker locations, see Using Apache Kafka as a target for AWS Database Migration Service in the AWS Data Migration Service User Guide.

" + "documentation":"

A comma-separated list of one or more broker locations in your Kafka cluster that host your Kafka instance. Specify each broker location in the form broker-hostname-or-ip:port . For example, \"ec2-12-345-678-901.compute-1.amazonaws.com:2345\". For more information and examples of specifying a list of broker locations, see Using Apache Kafka as a target for Database Migration Service in the Database Migration Service User Guide.

" }, "Topic":{ "shape":"String", - "documentation":"

The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies \"kafka-default-topic\" as the migration topic.

" + "documentation":"

The topic to which you migrate the data. If you don't specify a topic, DMS specifies \"kafka-default-topic\" as the migration topic.

" }, "MessageFormat":{ "shape":"MessageFormatValue", @@ -3039,7 +3043,7 @@ }, "IncludePartitionValue":{ "shape":"BooleanOptional", - "documentation":"

Shows the partition value within the Kafka message output, unless the partition type is schema-table-type. The default is false.

" + "documentation":"

Shows the partition value within the Kafka message output unless the partition type is schema-table-type. The default is false.

" }, "PartitionIncludeSchemaTable":{ "shape":"BooleanOptional", @@ -3079,15 +3083,19 @@ }, "SslCaCertificateArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for the private Certification Authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.

" + "documentation":"

The Amazon Resource Name (ARN) for the private Certification Authority (CA) cert that DMS uses to securely connect to your Kafka target endpoint.

" }, "SaslUsername":{ "shape":"String", - "documentation":"

The secure username you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

" + "documentation":"

The secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

" }, "SaslPassword":{ "shape":"SecretString", "documentation":"

The secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

" + }, + "NoHexPrefix":{ + "shape":"BooleanOptional", + "documentation":"

If this attribute is Y, it allows hexadecimal values that don't have the 0x prefix when migrated to a Kafka target. If this attribute is N, all hexadecimal values include this prefix when migrated to Kafka.

" } }, "documentation":"

Provides information that describes an Apache Kafka endpoint. This information includes the output format of records applied to the endpoint and details of transaction and control table data information.

" @@ -3109,7 +3117,7 @@ }, "ServiceAccessRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that AWS DMS uses to write to the Kinesis data stream.

" + "documentation":"

The Amazon Resource Name (ARN) for the IAM role that DMS uses to write to the Kinesis data stream. The role must allow the iam:PassRole action.

" }, "IncludeTransactionDetails":{ "shape":"BooleanOptional", @@ -3134,6 +3142,10 @@ "IncludeNullAndEmpty":{ "shape":"BooleanOptional", "documentation":"

Include NULL and empty columns for records migrated to the endpoint. The default is false.

" + }, + "NoHexPrefix":{ + "shape":"BooleanOptional", + "documentation":"

If this attribute is Y, it allows hexadecimal values that don't have the 0x prefix when migrated to a Kinesis target. If this attribute is N, all hexadecimal values include this prefix when migrated to Kinesis.

" } }, "documentation":"

Provides information that describes an Amazon Kinesis Data Stream endpoint. This information includes the output format of records applied to the endpoint and details of transaction and control table data information.

" @@ -3144,7 +3156,7 @@ "members":{ "ResourceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) string that uniquely identifies the AWS DMS resource.

" + "documentation":"

The Amazon Resource Name (ARN) string that uniquely identifies the DMS resource.

" } }, "documentation":"

" @@ -3184,7 +3196,7 @@ }, "ControlTablesFileGroup":{ "shape":"String", - "documentation":"

Specifies a file group for the AWS DMS internal tables. When the replication task starts, all the internal AWS DMS control tables (awsdms_ apply_exception, awsdms_apply, awsdms_changes) are created for the specified file group.

" + "documentation":"

Specifies a file group for the DMS internal tables. When the replication task starts, all the internal DMS control tables (awsdms_ apply_exception, awsdms_apply, awsdms_changes) are created for the specified file group.

" }, "Password":{ "shape":"SecretString", @@ -3196,11 +3208,11 @@ }, "ReadBackupOnly":{ "shape":"BooleanOptional", - "documentation":"

When this attribute is set to Y, AWS DMS only reads changes from transaction log backups and doesn't read from the active transaction log file during ongoing replication. Setting this parameter to Y enables you to control active transaction log file growth during full load and ongoing replication tasks. However, it can add some source latency to ongoing replication.

" + "documentation":"

When this attribute is set to Y, DMS only reads changes from transaction log backups and doesn't read from the active transaction log file during ongoing replication. Setting this parameter to Y enables you to control active transaction log file growth during full load and ongoing replication tasks. However, it can add some source latency to ongoing replication.

" }, "SafeguardPolicy":{ "shape":"SafeguardPolicy", - "documentation":"

Use this attribute to minimize the need to access the backup log and enable AWS DMS to prevent truncation using one of the following two methods.

Start transactions in the database: This is the default method. When this method is used, AWS DMS prevents TLOG truncation by mimicking a transaction in the database. As long as such a transaction is open, changes that appear after the transaction started aren't truncated. If you need Microsoft Replication to be enabled in your database, then you must choose this method.

Exclusively use sp_repldone within a single task: When this method is used, AWS DMS reads the changes and then uses sp_repldone to mark the TLOG transactions as ready for truncation. Although this method doesn't involve any transactional activities, it can only be used when Microsoft Replication isn't running. Also, when using this method, only one AWS DMS task can access the database at any given time. Therefore, if you need to run parallel AWS DMS tasks against the same database, use the default method.

" + "documentation":"

Use this attribute to minimize the need to access the backup log and enable DMS to prevent truncation using one of the following two methods.

Start transactions in the database: This is the default method. When this method is used, DMS prevents TLOG truncation by mimicking a transaction in the database. As long as such a transaction is open, changes that appear after the transaction started aren't truncated. If you need Microsoft Replication to be enabled in your database, then you must choose this method.

Exclusively use sp_repldone within a single task: When this method is used, DMS reads the changes and then uses sp_repldone to mark the TLOG transactions as ready for truncation. Although this method doesn't involve any transactional activities, it can only be used when Microsoft Replication isn't running. Also, when using this method, only one DMS task can access the database at any given time. Therefore, if you need to run parallel DMS tasks against the same database, use the default method.

" }, "ServerName":{ "shape":"String", @@ -3220,7 +3232,7 @@ }, "SecretsManagerAccessRoleArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets Manager secret that allows access to the SQL Server endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret has the value of the Amazon Web Services Secrets Manager secret that allows access to the SQL Server endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

" }, "SecretsManagerSecretId":{ "shape":"String", @@ -3275,7 +3287,7 @@ }, "DatabaseName":{ "shape":"String", - "documentation":"

The name of the endpoint database.

" + "documentation":"

The name of the endpoint database. For a MySQL source or target endpoint, do not specify DatabaseName.

" }, "ExtraConnectionAttributes":{ "shape":"String", @@ -3291,7 +3303,7 @@ }, "ServiceAccessRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for the service access role you want to use to modify the endpoint.

" + "documentation":"

The Amazon Resource Name (ARN) for the IAM role you want to use to modify the endpoint. The role must allow the iam:PassRole action.

" }, "ExternalTableDefinition":{ "shape":"String", @@ -3299,64 +3311,68 @@ }, "DynamoDbSettings":{ "shape":"DynamoDbSettings", - "documentation":"

Settings in JSON format for the target Amazon DynamoDB endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to DynamoDB in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the target Amazon DynamoDB endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to DynamoDB in the Database Migration Service User Guide.

" }, "S3Settings":{ "shape":"S3Settings", - "documentation":"

Settings in JSON format for the target Amazon S3 endpoint. For more information about the available settings, see Extra Connection Attributes When Using Amazon S3 as a Target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the target Amazon S3 endpoint. For more information about the available settings, see Extra Connection Attributes When Using Amazon S3 as a Target for DMS in the Database Migration Service User Guide.

" }, "DmsTransferSettings":{ "shape":"DmsTransferSettings", - "documentation":"

The settings in JSON format for the DMS transfer type of source endpoint.

Attributes include the following:

  • serviceAccessRoleArn - The AWS Identity and Access Management (IAM) role that has permission to access the Amazon S3 bucket.

  • BucketName - The name of the S3 bucket to use.

  • compressionType - An optional parameter to use GZIP to compress the target files. Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed.

Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string ,BucketName=string,CompressionType=string

JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }

" + "documentation":"

The settings in JSON format for the DMS transfer type of source endpoint.

Attributes include the following:

  • serviceAccessRoleArn - The Identity and Access Management (IAM) role that has permission to access the Amazon S3 bucket. The role must allow the iam:PassRole action.

  • BucketName - The name of the S3 bucket to use.

Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string ,BucketName=string

JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\"}

" }, "MongoDbSettings":{ "shape":"MongoDbSettings", - "documentation":"

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the configuration properties section in Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the configuration properties section in Endpoint configuration settings when using MongoDB as a source for Database Migration Service in the Database Migration Service User Guide.

" }, "KinesisSettings":{ "shape":"KinesisSettings", - "documentation":"

Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using object mapping to migrate data to a Kinesis data stream in the Database Migration Service User Guide.

" }, "KafkaSettings":{ "shape":"KafkaSettings", - "documentation":"

Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using object mapping to migrate data to a Kafka topic in the Database Migration Service User Guide.

" }, "ElasticsearchSettings":{ "shape":"ElasticsearchSettings", - "documentation":"

Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for DMS in the Database Migration Service User Guide.

" }, "NeptuneSettings":{ "shape":"NeptuneSettings", - "documentation":"

Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying Endpoint Settings for Amazon Neptune as a Target in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying graph-mapping rules using Gremlin and R2RML for Amazon Neptune as a target in the Database Migration Service User Guide.

" }, "RedshiftSettings":{"shape":"RedshiftSettings"}, "PostgreSQLSettings":{ "shape":"PostgreSQLSettings", - "documentation":"

Settings in JSON format for the source and target PostgreSQL endpoint. For information about other available settings, see Extra connection attributes when using PostgreSQL as a source for AWS DMS and Extra connection attributes when using PostgreSQL as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source and target PostgreSQL endpoint. For information about other available settings, see Extra connection attributes when using PostgreSQL as a source for DMS and Extra connection attributes when using PostgreSQL as a target for DMS in the Database Migration Service User Guide.

" }, "MySQLSettings":{ "shape":"MySQLSettings", - "documentation":"

Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for AWS DMS and Extra connection attributes when using a MySQL-compatible database as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for DMS and Extra connection attributes when using a MySQL-compatible database as a target for DMS in the Database Migration Service User Guide.

" }, "OracleSettings":{ "shape":"OracleSettings", - "documentation":"

Settings in JSON format for the source and target Oracle endpoint. For information about other available settings, see Extra connection attributes when using Oracle as a source for AWS DMS and Extra connection attributes when using Oracle as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source and target Oracle endpoint. For information about other available settings, see Extra connection attributes when using Oracle as a source for DMS and Extra connection attributes when using Oracle as a target for DMS in the Database Migration Service User Guide.

" }, "SybaseSettings":{ "shape":"SybaseSettings", - "documentation":"

Settings in JSON format for the source and target SAP ASE endpoint. For information about other available settings, see Extra connection attributes when using SAP ASE as a source for AWS DMS and Extra connection attributes when using SAP ASE as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source and target SAP ASE endpoint. For information about other available settings, see Extra connection attributes when using SAP ASE as a source for DMS and Extra connection attributes when using SAP ASE as a target for DMS in the Database Migration Service User Guide.

" }, "MicrosoftSQLServerSettings":{ "shape":"MicrosoftSQLServerSettings", - "documentation":"

Settings in JSON format for the source and target Microsoft SQL Server endpoint. For information about other available settings, see Extra connection attributes when using SQL Server as a source for AWS DMS and Extra connection attributes when using SQL Server as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source and target Microsoft SQL Server endpoint. For information about other available settings, see Extra connection attributes when using SQL Server as a source for DMS and Extra connection attributes when using SQL Server as a target for DMS in the Database Migration Service User Guide.

" }, "IBMDb2Settings":{ "shape":"IBMDb2Settings", - "documentation":"

Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for DMS in the Database Migration Service User Guide.

" }, "DocDbSettings":{ "shape":"DocDbSettings", - "documentation":"

Settings in JSON format for the source DocumentDB endpoint. For more information about the available settings, see the configuration properties section in Using DocumentDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source DocumentDB endpoint. For more information about the available settings, see the configuration properties section in Using DocumentDB as a Target for Database Migration Service in the Database Migration Service User Guide.

" + }, + "ExactSettings":{ + "shape":"BooleanOptional", + "documentation":"

If this attribute is Y, the current call to ModifyEndpoint replaces all existing endpoint settings with the exact settings that you specify in this call. If this attribute is N, the current call to ModifyEndpoint does two things:

  • It replaces any endpoint settings that already exist with new values, for settings with the same names.

  • It creates new endpoint settings that you specify in the call, for settings with different names.

For example, if you call create-endpoint ... --endpoint-settings '{\"a\":1}' ..., the endpoint has the following endpoint settings: '{\"a\":1}'. If you then call modify-endpoint ... --endpoint-settings '{\"b\":2}' ... for the same endpoint, the endpoint has the following settings: '{\"a\":1,\"b\":2}'.

However, suppose that you follow this with a call to modify-endpoint ... --endpoint-settings '{\"b\":2}' --exact-settings ... for that same endpoint again. Then the endpoint has the following settings: '{\"b\":2}'. All existing settings are replaced with the exact settings that you specify.

" } }, "documentation":"

" @@ -3377,7 +3393,7 @@ "members":{ "SubscriptionName":{ "shape":"String", - "documentation":"

The name of the AWS DMS event notification subscription to be modified.

" + "documentation":"

The name of the DMS event notification subscription to be modified.

" }, "SnsTopicArn":{ "shape":"String", @@ -3385,7 +3401,7 @@ }, "SourceType":{ "shape":"String", - "documentation":"

The type of AWS DMS resource that generates the events you want to subscribe to.

Valid values: replication-instance | replication-task

" + "documentation":"

The type of DMS resource that generates the events you want to subscribe to.

Valid values: replication-instance | replication-task

" }, "EventCategories":{ "shape":"EventCategoriesList", @@ -3426,7 +3442,7 @@ }, "ReplicationInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.

" + "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

" }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", @@ -3450,7 +3466,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates that minor version upgrades are applied automatically to the replication instance during the maintenance window. Changing this parameter doesn't result in an outage, except in the case described following. The change is asynchronously applied as soon as possible.

An outage does result if these factors apply:

  • This parameter is set to true during the maintenance window.

  • A newer minor version is available.

  • AWS DMS has enabled automatic patching for the given engine version.

" + "documentation":"

A value that indicates that minor version upgrades are applied automatically to the replication instance during the maintenance window. Changing this parameter doesn't result in an outage, except in the case described following. The change is asynchronously applied as soon as possible.

An outage does result if these factors apply:

  • This parameter is set to true during the maintenance window.

  • A newer minor version is available.

  • DMS has enabled automatic patching for the given engine version.

" }, "ReplicationInstanceIdentifier":{ "shape":"String", @@ -3519,7 +3535,7 @@ }, "TableMappings":{ "shape":"String", - "documentation":"

When using the AWS CLI or boto3, provide the path of the JSON file that contains the table mappings. Precede the path with file://. For example, --table-mappings file://mappingfile.json. When working with the DMS API, provide the JSON as the parameter value.

" + "documentation":"

When using the CLI or boto3, provide the path of the JSON file that contains the table mappings. Precede the path with file://. For example, --table-mappings file://mappingfile.json. When working with the DMS API, provide the JSON as the parameter value.

" }, "ReplicationTaskSettings":{ "shape":"String", @@ -3531,7 +3547,7 @@ }, "CdcStartPosition":{ "shape":"String", - "documentation":"

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.

" + "documentation":"

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for DMS.

" }, "CdcStopPosition":{ "shape":"String", @@ -3539,7 +3555,7 @@ }, "TaskData":{ "shape":"String", - "documentation":"

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.

" + "documentation":"

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the Database Migration Service User Guide.

" } }, "documentation":"

" @@ -3603,11 +3619,11 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

" + "documentation":"

The KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId parameter, then DMS uses your default encryption key. KMS creates the default encryption key for your account. Your account has a different default encryption key for each Region.

" }, "SecretsManagerAccessRoleArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets Manager secret that allows access to the MongoDB endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret has the value of the Amazon Web Services Secrets Manager secret that allows access to the MongoDB endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

" }, "SecretsManagerSecretId":{ "shape":"String", @@ -3649,7 +3665,7 @@ "members":{ "AfterConnectScript":{ "shape":"String", - "documentation":"

Specifies a script to run immediately after AWS DMS connects to the endpoint. The migration task continues running regardless if the SQL statement succeeds or fails.

" + "documentation":"

Specifies a script to run immediately after DMS connects to the endpoint. The migration task continues running regardless if the SQL statement succeeds or fails.

For this parameter, provide the code of the script itself, not the name of a file containing the script.

" }, "CleanSourceMetadataOnMismatch":{ "shape":"BooleanOptional", @@ -3657,11 +3673,11 @@ }, "DatabaseName":{ "shape":"String", - "documentation":"

Database name for the endpoint.

" + "documentation":"

Database name for the endpoint. For a MySQL source or target endpoint, don't explicitly specify the database using the DatabaseName request parameter on either the CreateEndpoint or ModifyEndpoint API call. Specifying DatabaseName when you create or modify a MySQL endpoint replicates all the task tables to this single database. For MySQL endpoints, you specify the database only when you specify the schema in the table-mapping rules of the DMS task.

" }, "EventsPollInterval":{ "shape":"IntegerOptional", - "documentation":"

Specifies how often to check the binary log for new changes/events when the database is idle.

Example: eventsPollInterval=5;

In the example, AWS DMS checks for changes in the binary logs every five seconds.

" + "documentation":"

Specifies how often to check the binary log for new changes/events when the database is idle.

Example: eventsPollInterval=5;

In the example, DMS checks for changes in the binary logs every five seconds.

" }, "TargetDbType":{ "shape":"TargetDbType", @@ -3697,7 +3713,7 @@ }, "SecretsManagerAccessRoleArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets Manager secret that allows access to the MySQL endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret has the value of the Amazon Web Services Secrets Manager secret that allows access to the MySQL endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

" }, "SecretsManagerSecretId":{ "shape":"String", @@ -3715,31 +3731,31 @@ "members":{ "ServiceAccessRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the service role that you created for the Neptune target endpoint. For more information, see Creating an IAM Service Role for Accessing Amazon Neptune as a Target in the AWS Database Migration Service User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the service role that you created for the Neptune target endpoint. The role must allow the iam:PassRole action. For more information, see Creating an IAM Service Role for Accessing Amazon Neptune as a Target in the Database Migration Service User Guide.

" }, "S3BucketName":{ "shape":"String", - "documentation":"

The name of the Amazon S3 bucket where AWS DMS can temporarily store migrated graph data in .csv files before bulk-loading it to the Neptune target database. AWS DMS maps the SQL source data to graph data before storing it in these .csv files.

" + "documentation":"

The name of the Amazon S3 bucket where DMS can temporarily store migrated graph data in .csv files before bulk-loading it to the Neptune target database. DMS maps the SQL source data to graph data before storing it in these .csv files.

" }, "S3BucketFolder":{ "shape":"String", - "documentation":"

A folder path where you want AWS DMS to store migrated graph data in the S3 bucket specified by S3BucketName

" + "documentation":"

A folder path where you want DMS to store migrated graph data in the S3 bucket specified by S3BucketName

" }, "ErrorRetryDuration":{ "shape":"IntegerOptional", - "documentation":"

The number of milliseconds for AWS DMS to wait to retry a bulk-load of migrated graph data to the Neptune target database before raising an error. The default is 250.

" + "documentation":"

The number of milliseconds for DMS to wait to retry a bulk-load of migrated graph data to the Neptune target database before raising an error. The default is 250.

" }, "MaxFileSize":{ "shape":"IntegerOptional", - "documentation":"

The maximum size in kilobytes of migrated graph data stored in a .csv file before AWS DMS bulk-loads the data to the Neptune target database. The default is 1,048,576 KB. If the bulk load is successful, AWS DMS clears the bucket, ready to store the next batch of migrated graph data.

" + "documentation":"

The maximum size in kilobytes of migrated graph data stored in a .csv file before DMS bulk-loads the data to the Neptune target database. The default is 1,048,576 KB. If the bulk load is successful, DMS clears the bucket, ready to store the next batch of migrated graph data.

" }, "MaxRetryCount":{ "shape":"IntegerOptional", - "documentation":"

The number of times for AWS DMS to retry a bulk load of migrated graph data to the Neptune target database before raising an error. The default is 5.

" + "documentation":"

The number of times for DMS to retry a bulk load of migrated graph data to the Neptune target database before raising an error. The default is 5.

" }, "IamAuthEnabled":{ "shape":"BooleanOptional", - "documentation":"

If you want AWS Identity and Access Management (IAM) authorization enabled for this endpoint, set this parameter to true. Then attach the appropriate IAM policy document to your service role specified by ServiceAccessRoleArn. The default is false.

" + "documentation":"

If you want Identity and Access Management (IAM) authorization enabled for this endpoint, set this parameter to true. Then attach the appropriate IAM policy document to your service role specified by ServiceAccessRoleArn. The default is false.

" } }, "documentation":"

Provides information that defines an Amazon Neptune endpoint.

" @@ -3760,11 +3776,11 @@ }, "ArchivedLogDestId":{ "shape":"IntegerOptional", - "documentation":"

Specifies the destination of the archived redo logs. The value should be the same as the DEST_ID number in the v$archived_log table. When working with multiple log destinations (DEST_ID), we recommend that you to specify an archived redo logs location identifier. Doing this improves performance by ensuring that the correct logs are accessed from the outset.

" + "documentation":"

Specifies the ID of the destination for the archived redo logs. This value should be the same as a number in the dest_id column of the v$archived_log view. If you work with an additional redo log destination, use the AdditionalArchivedLogDestId option to specify the additional destination ID. Doing this improves performance by ensuring that the correct logs are accessed from the outset.

" }, "AdditionalArchivedLogDestId":{ "shape":"IntegerOptional", - "documentation":"

Set this attribute with archivedLogDestId in a primary/ standby setup. This attribute is useful in the case of a switchover. In this case, AWS DMS needs to know which destination to get archive redo logs from to read changes. This need arises because the previous primary instance is now a standby instance after switchover.

" + "documentation":"

Set this attribute with ArchivedLogDestId in a primary/ standby setup. This attribute is useful in the case of a switchover. In this case, DMS needs to know which destination to get archive redo logs from to read changes. This need arises because the previous primary instance is now a standby instance after switchover.

Although DMS supports the use of the Oracle RESETLOGS option to open the database, never use RESETLOGS unless necessary. For additional information about RESETLOGS, see RMAN Data Repair Concepts in the Oracle Database Backup and Recovery User's Guide.

" }, "AllowSelectNestedTables":{ "shape":"BooleanOptional", @@ -3772,11 +3788,11 @@ }, "ParallelAsmReadThreads":{ "shape":"IntegerOptional", - "documentation":"

Set this attribute to change the number of threads that DMS configures to perform a Change Data Capture (CDC) load using Oracle Automatic Storage Management (ASM). You can specify an integer value between 2 (the default) and 8 (the maximum). Use this attribute together with the readAheadBlocks attribute.

" + "documentation":"

Set this attribute to change the number of threads that DMS configures to perform a change data capture (CDC) load using Oracle Automatic Storage Management (ASM). You can specify an integer value between 2 (the default) and 8 (the maximum). Use this attribute together with the readAheadBlocks attribute.

" }, "ReadAheadBlocks":{ "shape":"IntegerOptional", - "documentation":"

Set this attribute to change the number of read-ahead blocks that DMS configures to perform a Change Data Capture (CDC) load using Oracle Automatic Storage Management (ASM). You can specify an integer value between 1000 (the default) and 200,000 (the maximum).

" + "documentation":"

Set this attribute to change the number of read-ahead blocks that DMS configures to perform a change data capture (CDC) load using Oracle Automatic Storage Management (ASM). You can specify an integer value between 1000 (the default) and 200,000 (the maximum).

" }, "AccessAlternateDirectly":{ "shape":"BooleanOptional", @@ -3808,7 +3824,7 @@ }, "ArchivedLogsOnly":{ "shape":"BooleanOptional", - "documentation":"

When this field is set to Y, AWS DMS only accesses the archived redo logs. If the archived redo logs are stored on Oracle ASM only, the AWS DMS user account needs to be granted ASM privileges.

" + "documentation":"

When this field is set to Y, DMS only accesses the archived redo logs. If the archived redo logs are stored on Oracle ASM only, the DMS user account needs to be granted ASM privileges.

" }, "AsmPassword":{ "shape":"SecretString", @@ -3832,7 +3848,7 @@ }, "DirectPathParallelLoad":{ "shape":"BooleanOptional", - "documentation":"

When set to true, this attribute specifies a parallel load when useDirectPathFullLoad is set to Y. This attribute also only applies when you use the AWS DMS parallel load feature. Note that the target table cannot have any constraints or indexes.

" + "documentation":"

When set to true, this attribute specifies a parallel load when useDirectPathFullLoad is set to Y. This attribute also only applies when you use the DMS parallel load feature. Note that the target table cannot have any constraints or indexes.

" }, "FailTasksOnLobTruncation":{ "shape":"BooleanOptional", @@ -3860,11 +3876,11 @@ }, "SecurityDbEncryption":{ "shape":"SecretString", - "documentation":"

For an Oracle source endpoint, the transparent data encryption (TDE) password required by AWM DMS to access Oracle redo logs encrypted by TDE using Binary Reader. It is also the TDE_Password part of the comma-separated value you set to the Password request parameter when you create the endpoint. The SecurityDbEncryptian setting is related to this SecurityDbEncryptionName setting. For more information, see Supported encryption methods for using Oracle as a source for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

For an Oracle source endpoint, the transparent data encryption (TDE) password required by AWM DMS to access Oracle redo logs encrypted by TDE using Binary Reader. It is also the TDE_Password part of the comma-separated value you set to the Password request parameter when you create the endpoint. The SecurityDbEncryptian setting is related to this SecurityDbEncryptionName setting. For more information, see Supported encryption methods for using Oracle as a source for DMS in the Database Migration Service User Guide.

" }, "SecurityDbEncryptionName":{ "shape":"String", - "documentation":"

For an Oracle source endpoint, the name of a key used for the transparent data encryption (TDE) of the columns and tablespaces in an Oracle source database that is encrypted using TDE. The key value is the value of the SecurityDbEncryption setting. For more information on setting the key name value of SecurityDbEncryptionName, see the information and example for setting the securityDbEncryptionName extra connection attribute in Supported encryption methods for using Oracle as a source for AWS DMS in the AWS Database Migration Service User Guide.

" + "documentation":"

For an Oracle source endpoint, the name of a key used for the transparent data encryption (TDE) of the columns and tablespaces in an Oracle source database that is encrypted using TDE. The key value is the value of the SecurityDbEncryption setting. For more information on setting the key name value of SecurityDbEncryptionName, see the information and example for setting the securityDbEncryptionName extra connection attribute in Supported encryption methods for using Oracle as a source for DMS in the Database Migration Service User Guide.

" }, "ServerName":{ "shape":"String", @@ -3874,13 +3890,29 @@ "shape":"String", "documentation":"

Use this attribute to convert SDO_GEOMETRY to GEOJSON format. By default, DMS calls the SDO2GEOJSON custom function if present and accessible. Or you can create your own custom function that mimics the operation of SDOGEOJSON and set SpatialDataOptionToGeoJsonFunctionName to call it instead.

" }, + "StandbyDelayTime":{ + "shape":"IntegerOptional", + "documentation":"

Use this attribute to specify a time in minutes for the delay in standby sync. If the source is an Oracle Active Data Guard standby database, use this attribute to specify the time lag between primary and standby databases.

In DMS, you can create an Oracle CDC task that uses an Active Data Guard standby instance as a source for replicating ongoing changes. Doing this eliminates the need to connect to an active database that might be in production.

" + }, "Username":{ "shape":"String", "documentation":"

Endpoint connection user name.

" }, + "UseBFile":{ + "shape":"BooleanOptional", + "documentation":"

Set this attribute to Y to capture change data using the Binary Reader utility. Set UseLogminerReader to N to set this attribute to Y. To use Binary Reader with Amazon RDS for Oracle as the source, you set additional attributes. For more information about using this setting with Oracle Automatic Storage Management (ASM), see Using Oracle LogMiner or DMS Binary Reader for CDC.

" + }, + "UseDirectPathFullLoad":{ + "shape":"BooleanOptional", + "documentation":"

Set this attribute to Y to have DMS use a direct path full load. Specify this value to use the direct path protocol in the Oracle Call Interface (OCI). By using this OCI protocol, you can bulk-load Oracle target tables during a full load.

" + }, + "UseLogminerReader":{ + "shape":"BooleanOptional", + "documentation":"

Set this attribute to Y to capture change data using the Oracle LogMiner utility (the default). Set this attribute to N if you want to access the redo logs as a binary file. When you set UseLogminerReader to N, also set UseBfile to Y. For more information on this setting and using Oracle ASM, see Using Oracle LogMiner or DMS Binary Reader for CDC in the DMS User Guide.

" + }, "SecretsManagerAccessRoleArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets Manager secret that allows access to the Oracle endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret has the value of the Amazon Web Services Secrets Manager secret that allows access to the Oracle endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

" }, "SecretsManagerSecretId":{ "shape":"String", @@ -3888,7 +3920,7 @@ }, "SecretsManagerOracleAsmAccessRoleArn":{ "shape":"String", - "documentation":"

Required only if your Oracle endpoint uses Advanced Storage Manager (ASM). The full ARN of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the SecretsManagerOracleAsmSecret. This SecretsManagerOracleAsmSecret has the secret value that allows access to the Oracle ASM of the endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerOracleAsmSecretId. Or you can specify clear-text values for AsmUserName, AsmPassword, and AsmServerName. You can't specify both. For more information on creating this SecretsManagerOracleAsmSecret and the SecretsManagerOracleAsmAccessRoleArn and SecretsManagerOracleAsmSecretId required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.

" + "documentation":"

Required only if your Oracle endpoint uses Advanced Storage Manager (ASM). The full ARN of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the SecretsManagerOracleAsmSecret. This SecretsManagerOracleAsmSecret has the secret value that allows access to the Oracle ASM of the endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerOracleAsmSecretId. Or you can specify clear-text values for AsmUserName, AsmPassword, and AsmServerName. You can't specify both. For more information on creating this SecretsManagerOracleAsmSecret and the SecretsManagerOracleAsmAccessRoleArn and SecretsManagerOracleAsmSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

" }, "SecretsManagerOracleAsmSecretId":{ "shape":"String", @@ -3906,7 +3938,7 @@ }, "ReplicationInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.

" + "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

" }, "StorageType":{ "shape":"String", @@ -3934,7 +3966,7 @@ }, "ReleaseStatus":{ "shape":"ReleaseStatusValues", - "documentation":"

The value returned when the specified EngineVersion of the replication instance is in Beta or test mode. This indicates some features might not work as expected.

AWS DMS supports the ReleaseStatus parameter in versions 3.1.4 and later.

" + "documentation":"

The value returned when the specified EngineVersion of the replication instance is in Beta or test mode. This indicates some features might not work as expected.

DMS supports the ReleaseStatus parameter in versions 3.1.4 and later.

" } }, "documentation":"

In response to the DescribeOrderableReplicationInstances operation, this object describes an available replication instance. This description includes the replication instance's type, engine version, and allocated storage.

" @@ -3978,7 +4010,7 @@ "documentation":"

A description providing more detail about the maintenance action.

" } }, - "documentation":"

Describes a maintenance action pending for an AWS DMS resource, including when and how it will be applied. This data type is a response element to the DescribePendingMaintenanceActions operation.

" + "documentation":"

Describes a maintenance action pending for an DMS resource, including when and how it will be applied. This data type is a response element to the DescribePendingMaintenanceActions operation.

" }, "PendingMaintenanceActionDetails":{ "type":"list", @@ -3988,16 +4020,24 @@ "type":"list", "member":{"shape":"ResourcePendingMaintenanceActions"} }, + "PluginNameValue":{ + "type":"string", + "enum":[ + "no-preference", + "test-decoding", + "pglogical" + ] + }, "PostgreSQLSettings":{ "type":"structure", "members":{ "AfterConnectScript":{ "shape":"String", - "documentation":"

For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.

Example: afterConnectScript=SET session_replication_role='replica'

" + "documentation":"

For use with change data capture (CDC) only, this attribute has DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.

Example: afterConnectScript=SET session_replication_role='replica'

" }, "CaptureDdls":{ "shape":"BooleanOptional", - "documentation":"

To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.

If this value is set to N, you don't have to create tables or triggers on the source database.

" + "documentation":"

To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.

If this value is set to N, you don't have to create tables or triggers on the source database.

" }, "MaxFileSize":{ "shape":"IntegerOptional", @@ -4019,6 +4059,18 @@ "shape":"BooleanOptional", "documentation":"

When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize.

If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.

" }, + "HeartbeatEnable":{ + "shape":"BooleanOptional", + "documentation":"

If this attribute is set to true, the write-ahead log (WAL) heartbeat keeps restart_lsn moving and prevents storage full scenarios. The WAL heartbeat mimics a dummy transaction, so that idle logical replication slots don't hold onto old WAL logs that result in storage full situations on the source.

" + }, + "HeartbeatSchema":{ + "shape":"String", + "documentation":"

Sets the schema in which the heartbeat artifacts are created.

" + }, + "HeartbeatFrequency":{ + "shape":"IntegerOptional", + "documentation":"

Sets the WAL heartbeat frequency (in minutes).

" + }, "Password":{ "shape":"SecretString", "documentation":"

Endpoint connection password.

" @@ -4037,11 +4089,15 @@ }, "SlotName":{ "shape":"String", - "documentation":"

Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.

When used with the AWS DMS API CdcStartPosition request parameter, this attribute also enables using native CDC start points.

" + "documentation":"

Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.

When used with the DMS API CdcStartPosition request parameter, this attribute also enables using native CDC start points.

" + }, + "PluginName":{ + "shape":"PluginNameValue", + "documentation":"

Specifies the plugin to use to create a replication slot.

" }, "SecretsManagerAccessRoleArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets Manager secret that allows access to the PostgreSQL endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret has the value of the Amazon Web Services Secrets Manager secret that allows access to the PostgreSQL endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

" }, "SecretsManagerSecretId":{ "shape":"String", @@ -4086,7 +4142,7 @@ }, "BucketFolder":{ "shape":"String", - "documentation":"

An S3 folder where the comma-separated-value (.csv) files are stored before being uploaded to the target Redshift cluster.

For full load mode, AWS DMS converts source records into .csv files and loads them to the BucketFolder/TableID path. AWS DMS uses the Redshift COPY command to upload the .csv files to the target table. The files are deleted once the COPY operation has finished. For more information, see COPY in the Amazon Redshift Database Developer Guide.

For change-data-capture (CDC) mode, AWS DMS creates a NetChanges table, and loads the .csv files to this BucketFolder/NetChangesTableID path.

" + "documentation":"

An S3 folder where the comma-separated-value (.csv) files are stored before being uploaded to the target Redshift cluster.

For full load mode, DMS converts source records into .csv files and loads them to the BucketFolder/TableID path. DMS uses the Redshift COPY command to upload the .csv files to the target table. The files are deleted once the COPY operation has finished. For more information, see COPY in the Amazon Redshift Database Developer Guide.

For change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads the .csv files to this BucketFolder/NetChangesTableID path.

" }, "BucketName":{ "shape":"String", @@ -4114,11 +4170,11 @@ }, "EmptyAsNull":{ "shape":"BooleanOptional", - "documentation":"

A value that specifies whether AWS DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of true sets empty CHAR and VARCHAR fields to null. The default is false.

" + "documentation":"

A value that specifies whether DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of true sets empty CHAR and VARCHAR fields to null. The default is false.

" }, "EncryptionMode":{ "shape":"EncryptionModeValue", - "documentation":"

The type of server-side encryption that you want to use for your data. This encryption type is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3 (the default) or SSE_KMS.

For the ModifyEndpoint operation, you can change the existing value of the EncryptionMode parameter from SSE_KMS to SSE_S3. But you can’t change the existing value from SSE_S3 to SSE_KMS.

To use SSE_S3, create an AWS Identity and Access Management (IAM) role with a policy that allows \"arn:aws:s3:::*\" to use the following actions: \"s3:PutObject\", \"s3:ListBucket\"

" + "documentation":"

The type of server-side encryption that you want to use for your data. This encryption type is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3 (the default) or SSE_KMS.

For the ModifyEndpoint operation, you can change the existing value of the EncryptionMode parameter from SSE_KMS to SSE_S3. But you can’t change the existing value from SSE_S3 to SSE_KMS.

To use SSE_S3, create an Identity and Access Management (IAM) role with a policy that allows \"arn:aws:s3:::*\" to use the following actions: \"s3:PutObject\", \"s3:ListBucket\"

" }, "ExplicitIds":{ "shape":"BooleanOptional", @@ -4130,7 +4186,7 @@ }, "LoadTimeout":{ "shape":"IntegerOptional", - "documentation":"

The amount of time to wait (in milliseconds) before timing out of operations performed by AWS DMS on a Redshift cluster, such as Redshift COPY, INSERT, DELETE, and UPDATE.

" + "documentation":"

The amount of time to wait (in milliseconds) before timing out of operations performed by DMS on a Redshift cluster, such as Redshift COPY, INSERT, DELETE, and UPDATE.

" }, "MaxFileSize":{ "shape":"IntegerOptional", @@ -4162,11 +4218,11 @@ }, "ServiceAccessRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon Redshift service.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon Redshift service. The role must allow the iam:PassRole action.

" }, "ServerSideEncryptionKmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key ID. If you are using SSE_KMS for the EncryptionMode, provide this key ID. The key that you use needs an attached policy that enables IAM user permissions and allows use of the key.

" + "documentation":"

The KMS key ID. If you are using SSE_KMS for the EncryptionMode, provide this key ID. The key that you use needs an attached policy that enables IAM user permissions and allows use of the key.

" }, "TimeFormat":{ "shape":"String", @@ -4190,7 +4246,7 @@ }, "SecretsManagerAccessRoleArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets Manager secret that allows access to the Amazon Redshift endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret has the value of the Amazon Web Services Secrets Manager secret that allows access to the Amazon Redshift endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

" }, "SecretsManagerSecretId":{ "shape":"String", @@ -4311,14 +4367,14 @@ "members":{ "ResourceArn":{ "shape":"String", - "documentation":"

An AWS DMS resource from which you want to remove tag(s). The value for this parameter is an Amazon Resource Name (ARN).

" + "documentation":"

An DMS resource from which you want to remove tag(s). The value for this parameter is an Amazon Resource Name (ARN).

" }, "TagKeys":{ "shape":"KeyList", "documentation":"

The tag key (name) of the tag to be removed.

" } }, - "documentation":"

Removes one or more tags from an AWS DMS resource.

" + "documentation":"

Removes one or more tags from an DMS resource.

" }, "RemoveTagsFromResourceResponse":{ "type":"structure", @@ -4342,7 +4398,7 @@ }, "ReplicationInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. It is a required parameter, although a defualt value is pre-selected in the DMS console.

For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.

" + "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. It is a required parameter, although a default value is pre-selected in the DMS console.

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

" }, "ReplicationInstanceStatus":{ "shape":"String", @@ -4390,7 +4446,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

An AWS KMS key identifier that is used to encrypt the data on the replication instance.

If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

" + "documentation":"

An KMS key identifier that is used to encrypt the data on the replication instance.

If you don't specify a value for the KmsKeyId parameter, then DMS uses your default encryption key.

KMS creates the default encryption key for your account. Your account has a different default encryption key for each Region.

" }, "ReplicationInstanceArn":{ "shape":"String", @@ -4472,7 +4528,7 @@ "members":{ "ReplicationInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class.

For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.

" + "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class.

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

" }, "AllocatedStorage":{ "shape":"IntegerOptional", @@ -4603,7 +4659,7 @@ }, "TaskData":{ "shape":"String", - "documentation":"

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.

" + "documentation":"

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the Database Migration Service User Guide.

" }, "TargetReplicationInstanceArn":{ "shape":"String", @@ -4637,11 +4693,11 @@ }, "AssessmentResults":{ "shape":"String", - "documentation":"

The task assessment results in JSON format.

" + "documentation":"

The task assessment results in JSON format.

The response object only contains this field if you provide DescribeReplicationTaskAssessmentResultsMessage$ReplicationTaskArn in the request.

" }, "S3ObjectUrl":{ "shape":"String", - "documentation":"

The URL of the S3 object containing the task assessment results.

" + "documentation":"

The URL of the S3 object containing the task assessment results.

The response object only contains this field if you provide DescribeReplicationTaskAssessmentResultsMessage$ReplicationTaskArn in the request.

" } }, "documentation":"

The task assessment report in JSON format.

" @@ -4679,15 +4735,15 @@ }, "ServiceAccessRoleArn":{ "shape":"String", - "documentation":"

ARN of the service role used to start the assessment run using the StartReplicationTaskAssessmentRun operation.

" + "documentation":"

ARN of the service role used to start the assessment run using the StartReplicationTaskAssessmentRun operation. The role must allow the iam:PassRole action.

" }, "ResultLocationBucket":{ "shape":"String", - "documentation":"

Amazon S3 bucket where AWS DMS stores the results of this assessment run.

" + "documentation":"

Amazon S3 bucket where DMS stores the results of this assessment run.

" }, "ResultLocationFolder":{ "shape":"String", - "documentation":"

Folder in an Amazon S3 bucket where AWS DMS stores the results of this assessment run.

" + "documentation":"

Folder in an Amazon S3 bucket where DMS stores the results of this assessment run.

" }, "ResultEncryptionMode":{ "shape":"String", @@ -4695,7 +4751,7 @@ }, "ResultKmsKeyArn":{ "shape":"String", - "documentation":"

ARN of the AWS KMS encryption key used to encrypt the assessment run results.

" + "documentation":"

ARN of the KMS encryption key used to encrypt the assessment run results.

" }, "AssessmentRunName":{ "shape":"String", @@ -4835,14 +4891,14 @@ "members":{ "ResourceIdentifier":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the DMS resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) for AWS DMS in the DMS documentation.

" + "documentation":"

The Amazon Resource Name (ARN) of the DMS resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) for DMS in the DMS documentation.

" }, "PendingMaintenanceActionDetails":{ "shape":"PendingMaintenanceActionDetails", "documentation":"

Detailed information about the pending maintenance action.

" } }, - "documentation":"

Identifies an AWS DMS resource and any pending actions for it.

" + "documentation":"

Identifies an DMS resource and any pending actions for it.

" }, "ResourceQuotaExceededFault":{ "type":"structure", @@ -4876,7 +4932,7 @@ "members":{ "ServiceAccessRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) used by the service access IAM role. It is a required parameter that enables DMS to write and read objects from an S3 bucket.

" + "documentation":"

The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the iam:PassRole action. It is a required parameter that enables DMS to write and read objects from an S3 bucket.

" }, "ExternalTableDefinition":{ "shape":"String", @@ -4904,11 +4960,11 @@ }, "EncryptionMode":{ "shape":"EncryptionModeValue", - "documentation":"

The type of server-side encryption that you want to use for your data. This encryption type is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3 (the default) or SSE_KMS.

For the ModifyEndpoint operation, you can change the existing value of the EncryptionMode parameter from SSE_KMS to SSE_S3. But you can’t change the existing value from SSE_S3 to SSE_KMS.

To use SSE_S3, you need an AWS Identity and Access Management (IAM) role with permission to allow \"arn:aws:s3:::dms-*\" to use the following actions:

  • s3:CreateBucket

  • s3:ListBucket

  • s3:DeleteBucket

  • s3:GetBucketLocation

  • s3:GetObject

  • s3:PutObject

  • s3:DeleteObject

  • s3:GetObjectVersion

  • s3:GetBucketPolicy

  • s3:PutBucketPolicy

  • s3:DeleteBucketPolicy

" + "documentation":"

The type of server-side encryption that you want to use for your data. This encryption type is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3 (the default) or SSE_KMS.

For the ModifyEndpoint operation, you can change the existing value of the EncryptionMode parameter from SSE_KMS to SSE_S3. But you can’t change the existing value from SSE_S3 to SSE_KMS.

To use SSE_S3, you need an Identity and Access Management (IAM) role with permission to allow \"arn:aws:s3:::dms-*\" to use the following actions:

  • s3:CreateBucket

  • s3:ListBucket

  • s3:DeleteBucket

  • s3:GetBucketLocation

  • s3:GetObject

  • s3:PutObject

  • s3:DeleteObject

  • s3:GetObjectVersion

  • s3:GetBucketPolicy

  • s3:PutBucketPolicy

  • s3:DeleteBucketPolicy

" }, "ServerSideEncryptionKmsKeyId":{ "shape":"String", - "documentation":"

If you are using SSE_KMS for the EncryptionMode, provide the AWS KMS key ID. The key that you use needs an attached policy that enables AWS Identity and Access Management (IAM) user permissions and allows use of the key.

Here is a CLI example: aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value

" + "documentation":"

If you are using SSE_KMS for the EncryptionMode, provide the KMS key ID. The key that you use needs an attached policy that enables Identity and Access Management (IAM) user permissions and allows use of the key.

Here is a CLI example: aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value

" }, "DataFormat":{ "shape":"DataFormatValue", @@ -4940,27 +4996,27 @@ }, "IncludeOpForFullLoad":{ "shape":"BooleanOptional", - "documentation":"

A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output files only to indicate how the rows were added to the source database.

AWS DMS supports the IncludeOpForFullLoad parameter in versions 3.1.4 and later.

For full load, records can only be inserted. By default (the false setting), no information is recorded in these output files for a full load to indicate that the rows were inserted at the source database. If IncludeOpForFullLoad is set to true or y, the INSERT is recorded as an I annotation in the first field of the .csv file. This allows the format of your target records from a full load to be consistent with the target records from a CDC load.

This setting works together with the CdcInsertsOnly and the CdcInsertsAndUpdates parameters for output to .csv files only. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..

" + "documentation":"

A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output files only to indicate how the rows were added to the source database.

DMS supports the IncludeOpForFullLoad parameter in versions 3.1.4 and later.

For full load, records can only be inserted. By default (the false setting), no information is recorded in these output files for a full load to indicate that the rows were inserted at the source database. If IncludeOpForFullLoad is set to true or y, the INSERT is recorded as an I annotation in the first field of the .csv file. This allows the format of your target records from a full load to be consistent with the target records from a CDC load.

This setting works together with the CdcInsertsOnly and the CdcInsertsAndUpdates parameters for output to .csv files only. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..

" }, "CdcInsertsOnly":{ "shape":"BooleanOptional", - "documentation":"

A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage (.parquet) output files. By default (the false setting), the first field in a .csv or .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was inserted, updated, or deleted at the source database for a CDC load to the target.

If CdcInsertsOnly is set to true or y, only INSERTs from the source database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends on the value of IncludeOpForFullLoad. If IncludeOpForFullLoad is set to true, the first field of every CDC record is set to I to indicate the INSERT operation at the source. If IncludeOpForFullLoad is set to false, every CDC record is written without a first field to indicate the INSERT operation at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..

AWS DMS supports the interaction described preceding between the CdcInsertsOnly and IncludeOpForFullLoad parameters in versions 3.1.4 and later.

CdcInsertsOnly and CdcInsertsAndUpdates can't both be set to true for the same endpoint. Set either CdcInsertsOnly or CdcInsertsAndUpdates to true for the same endpoint, but not both.

" + "documentation":"

A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage (.parquet) output files. By default (the false setting), the first field in a .csv or .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was inserted, updated, or deleted at the source database for a CDC load to the target.

If CdcInsertsOnly is set to true or y, only INSERTs from the source database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends on the value of IncludeOpForFullLoad. If IncludeOpForFullLoad is set to true, the first field of every CDC record is set to I to indicate the INSERT operation at the source. If IncludeOpForFullLoad is set to false, every CDC record is written without a first field to indicate the INSERT operation at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..

DMS supports the interaction described preceding between the CdcInsertsOnly and IncludeOpForFullLoad parameters in versions 3.1.4 and later.

CdcInsertsOnly and CdcInsertsAndUpdates can't both be set to true for the same endpoint. Set either CdcInsertsOnly or CdcInsertsAndUpdates to true for the same endpoint, but not both.

" }, "TimestampColumnName":{ "shape":"String", - "documentation":"

A value that when nonblank causes AWS DMS to add a column with timestamp information to the endpoint data for an Amazon S3 target.

AWS DMS supports the TimestampColumnName parameter in versions 3.1.4 and later.

DMS includes an additional STRING column in the .csv or .parquet object files of your migrated data when you set TimestampColumnName to a nonblank value.

For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from the source to the target by DMS.

For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of that row in the source database.

The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, the precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit timestamp supported by DMS for the source database.

When the AddColumnName parameter is set to true, DMS also includes a name for the timestamp column that you set with TimestampColumnName.

" + "documentation":"

A value that when nonblank causes DMS to add a column with timestamp information to the endpoint data for an Amazon S3 target.

DMS supports the TimestampColumnName parameter in versions 3.1.4 and later.

DMS includes an additional STRING column in the .csv or .parquet object files of your migrated data when you set TimestampColumnName to a nonblank value.

For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from the source to the target by DMS.

For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of that row in the source database.

The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, the precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit timestamp supported by DMS for the source database.

When the AddColumnName parameter is set to true, DMS also includes a name for the timestamp column that you set with TimestampColumnName.

" }, "ParquetTimestampInMillisecond":{ "shape":"BooleanOptional", - "documentation":"

A value that specifies the precision of any TIMESTAMP column values that are written to an Amazon S3 object file in .parquet format.

AWS DMS supports the ParquetTimestampInMillisecond parameter in versions 3.1.4 and later.

When ParquetTimestampInMillisecond is set to true or y, AWS DMS writes all TIMESTAMP columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes them with microsecond precision.

Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP values. Set this parameter to true for S3 endpoint object files that are .parquet formatted only if you plan to query or process the data with Athena or AWS Glue.

AWS DMS writes any TIMESTAMP column values written to an S3 file in .csv format with microsecond precision.

Setting ParquetTimestampInMillisecond has no effect on the string format of the timestamp column value that is inserted by setting the TimestampColumnName parameter.

" + "documentation":"

A value that specifies the precision of any TIMESTAMP column values that are written to an Amazon S3 object file in .parquet format.

DMS supports the ParquetTimestampInMillisecond parameter in versions 3.1.4 and later.

When ParquetTimestampInMillisecond is set to true or y, DMS writes all TIMESTAMP columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes them with microsecond precision.

Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP values. Set this parameter to true for S3 endpoint object files that are .parquet formatted only if you plan to query or process the data with Athena or Glue.

DMS writes any TIMESTAMP column values written to an S3 file in .csv format with microsecond precision.

Setting ParquetTimestampInMillisecond has no effect on the string format of the timestamp column value that is inserted by setting the TimestampColumnName parameter.

" }, "CdcInsertsAndUpdates":{ "shape":"BooleanOptional", - "documentation":"

A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet (columnar storage) output files. The default setting is false, but when CdcInsertsAndUpdates is set to true or y, only INSERTs and UPDATEs from the source database are migrated to the .csv or .parquet file.

For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad parameter. If IncludeOpForFullLoad is set to true, the first field of every CDC record is set to either I or U to indicate INSERT and UPDATE operations at the source. But if IncludeOpForFullLoad is set to false, CDC records are written without an indication of INSERT or UPDATE operations at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..

AWS DMS supports the use of the CdcInsertsAndUpdates parameter in versions 3.3.1 and later.

CdcInsertsOnly and CdcInsertsAndUpdates can't both be set to true for the same endpoint. Set either CdcInsertsOnly or CdcInsertsAndUpdates to true for the same endpoint, but not both.

" + "documentation":"

A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet (columnar storage) output files. The default setting is false, but when CdcInsertsAndUpdates is set to true or y, only INSERTs and UPDATEs from the source database are migrated to the .csv or .parquet file.

For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad parameter. If IncludeOpForFullLoad is set to true, the first field of every CDC record is set to either I or U to indicate INSERT and UPDATE operations at the source. But if IncludeOpForFullLoad is set to false, CDC records are written without an indication of INSERT or UPDATE operations at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..

DMS supports the use of the CdcInsertsAndUpdates parameter in versions 3.3.1 and later.

CdcInsertsOnly and CdcInsertsAndUpdates can't both be set to true for the same endpoint. Set either CdcInsertsOnly or CdcInsertsAndUpdates to true for the same endpoint, but not both.

" }, "DatePartitionEnabled":{ "shape":"BooleanOptional", - "documentation":"

When set to true, this parameter partitions S3 bucket folders based on transaction commit dates. The default value is false. For more information about date-based folder partitoning, see Using date-based folder partitioning.

" + "documentation":"

When set to true, this parameter partitions S3 bucket folders based on transaction commit dates. The default value is false. For more information about date-based folder partitioning, see Using date-based folder partitioning.

" }, "DatePartitionSequence":{ "shape":"DatePartitionSequenceValue", @@ -4972,19 +5028,19 @@ }, "UseCsvNoSupValue":{ "shape":"BooleanOptional", - "documentation":"

This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv format. If set to true for columns not included in the supplemental log, AWS DMS uses the value specified by CsvNoSupValue . If not set or set to false, AWS DMS uses the null value for these columns.

This setting is supported in AWS DMS versions 3.4.1 and later.

" + "documentation":"

This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv format. If set to true for columns not included in the supplemental log, DMS uses the value specified by CsvNoSupValue . If not set or set to false, DMS uses the null value for these columns.

This setting is supported in DMS versions 3.4.1 and later.

" }, "CsvNoSupValue":{ "shape":"String", - "documentation":"

This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are written in .csv format. If UseCsvNoSupValue is set to true, specify a string value that you want AWS DMS to use for all columns not included in the supplemental log. If you do not specify a string value, AWS DMS uses the null value for these columns regardless of the UseCsvNoSupValue setting.

This setting is supported in AWS DMS versions 3.4.1 and later.

" + "documentation":"

This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are written in .csv format. If UseCsvNoSupValue is set to true, specify a string value that you want DMS to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of the UseCsvNoSupValue setting.

This setting is supported in DMS versions 3.4.1 and later.

" }, "PreserveTransactions":{ "shape":"BooleanOptional", - "documentation":"

If set to true, AWS DMS saves the transaction order for a change data capture (CDC) load on the Amazon S3 target specified by CdcPath . For more information, see Capturing data changes (CDC) including transaction order on the S3 target.

This setting is supported in AWS DMS versions 3.4.2 and later.

" + "documentation":"

If set to true, DMS saves the transaction order for a change data capture (CDC) load on the Amazon S3 target specified by CdcPath . For more information, see Capturing data changes (CDC) including transaction order on the S3 target.

This setting is supported in DMS versions 3.4.2 and later.

" }, "CdcPath":{ "shape":"String", - "documentation":"

Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If CdcPath is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. For an S3 target if you set PreserveTransactions to true, AWS DMS verifies that you have set this parameter to a folder path on your S3 target where AWS DMS can save the transaction order for the CDC load. AWS DMS creates this CDC folder path in either your S3 target working directory or the S3 target location specified by BucketFolder and BucketName .

For example, if you specify CdcPath as MyChangedData, and you specify BucketName as MyTargetBucket but do not specify BucketFolder, AWS DMS creates the CDC folder path following: MyTargetBucket/MyChangedData.

If you specify the same CdcPath, and you specify BucketName as MyTargetBucket and BucketFolder as MyTargetData, AWS DMS creates the CDC folder path following: MyTargetBucket/MyTargetData/MyChangedData.

For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target.

This setting is supported in AWS DMS versions 3.4.2 and later.

" + "documentation":"

Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If CdcPath is set, DMS reads CDC files from this path and replicates the data changes to the target endpoint. For an S3 target if you set PreserveTransactions to true, DMS verifies that you have set this parameter to a folder path on your S3 target where DMS can save the transaction order for the CDC load. DMS creates this CDC folder path in either your S3 target working directory or the S3 target location specified by BucketFolder and BucketName .

For example, if you specify CdcPath as MyChangedData, and you specify BucketName as MyTargetBucket but do not specify BucketFolder, DMS creates the CDC folder path following: MyTargetBucket/MyChangedData.

If you specify the same CdcPath, and you specify BucketName as MyTargetBucket and BucketFolder as MyTargetData, DMS creates the CDC folder path following: MyTargetBucket/MyTargetData/MyChangedData.

For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target.

This setting is supported in DMS versions 3.4.2 and later.

" } }, "documentation":"

Settings for exporting data to Amazon S3.

" @@ -5071,19 +5127,19 @@ }, "ServiceAccessRoleArn":{ "shape":"String", - "documentation":"

ARN of a service role needed to start the assessment run.

" + "documentation":"

ARN of the service role needed to start the assessment run. The role must allow the iam:PassRole action.

" }, "ResultLocationBucket":{ "shape":"String", - "documentation":"

Amazon S3 bucket where you want AWS DMS to store the results of this assessment run.

" + "documentation":"

Amazon S3 bucket where you want DMS to store the results of this assessment run.

" }, "ResultLocationFolder":{ "shape":"String", - "documentation":"

Folder within an Amazon S3 bucket where you want AWS DMS to store the results of this assessment run.

" + "documentation":"

Folder within an Amazon S3 bucket where you want DMS to store the results of this assessment run.

" }, "ResultEncryptionMode":{ "shape":"String", - "documentation":"

Encryption mode that you can specify to encrypt the results of this assessment run. If you don't specify this request parameter, AWS DMS stores the assessment run results without encryption. You can specify one of the options following:

  • \"SSE_S3\" – The server-side encryption provided as a default by Amazon S3.

  • \"SSE_KMS\" – AWS Key Management Service (AWS KMS) encryption. This encryption can use either a custom KMS encryption key that you specify or the default KMS encryption key that DMS provides.

" + "documentation":"

Encryption mode that you can specify to encrypt the results of this assessment run. If you don't specify this request parameter, DMS stores the assessment run results without encryption. You can specify one of the options following:

  • \"SSE_S3\" – The server-side encryption provided as a default by Amazon S3.

  • \"SSE_KMS\" – Key Management Service (KMS) encryption. This encryption can use either a custom KMS encryption key that you specify or the default KMS encryption key that DMS provides.

" }, "ResultKmsKeyArn":{ "shape":"String", @@ -5095,11 +5151,11 @@ }, "IncludeOnly":{ "shape":"IncludeTestList", - "documentation":"

Space-separated list of names for specific individual assessments that you want to include. These names come from the default list of individual assessments that AWS DMS supports for the associated migration task. This task is specified by ReplicationTaskArn.

You can't set a value for IncludeOnly if you also set a value for Exclude in the API operation.

To identify the names of the default individual assessments that AWS DMS supports for the associated migration task, run the DescribeApplicableIndividualAssessments operation using its own ReplicationTaskArn request parameter.

" + "documentation":"

Space-separated list of names for specific individual assessments that you want to include. These names come from the default list of individual assessments that DMS supports for the associated migration task. This task is specified by ReplicationTaskArn.

You can't set a value for IncludeOnly if you also set a value for Exclude in the API operation.

To identify the names of the default individual assessments that DMS supports for the associated migration task, run the DescribeApplicableIndividualAssessments operation using its own ReplicationTaskArn request parameter.

" }, "Exclude":{ "shape":"ExcludeTestList", - "documentation":"

Space-separated list of names for specific individual assessments that you want to exclude. These names come from the default list of individual assessments that AWS DMS supports for the associated migration task. This task is specified by ReplicationTaskArn.

You can't set a value for Exclude if you also set a value for IncludeOnly in the API operation.

To identify the names of the default individual assessments that AWS DMS supports for the associated migration task, run the DescribeApplicableIndividualAssessments operation using its own ReplicationTaskArn request parameter.

" + "documentation":"

Space-separated list of names for specific individual assessments that you want to exclude. These names come from the default list of individual assessments that DMS supports for the associated migration task. This task is specified by ReplicationTaskArn.

You can't set a value for Exclude if you also set a value for IncludeOnly in the API operation.

To identify the names of the default individual assessments that DMS supports for the associated migration task, run the DescribeApplicableIndividualAssessments operation using its own ReplicationTaskArn request parameter.

" } }, "documentation":"

" @@ -5135,7 +5191,7 @@ }, "CdcStartPosition":{ "shape":"String", - "documentation":"

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.

" + "documentation":"

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for DMS.

" }, "CdcStopPosition":{ "shape":"String", @@ -5241,7 +5297,7 @@ }, "SupportsCDC":{ "shape":"Boolean", - "documentation":"

Indicates if Change Data Capture (CDC) is supported.

" + "documentation":"

Indicates if change data capture (CDC) is supported.

" }, "EndpointType":{ "shape":"ReplicationEndpointTypeValue", @@ -5249,7 +5305,7 @@ }, "ReplicationInstanceEngineMinimumVersion":{ "shape":"String", - "documentation":"

The earliest AWS DMS engine version that supports this endpoint engine. Note that endpoint engines released with AWS DMS versions earlier than 3.1.1 do not return a value for this parameter.

" + "documentation":"

The earliest DMS engine version that supports this endpoint engine. Note that endpoint engines released with DMS versions earlier than 3.1.1 do not return a value for this parameter.

" }, "EngineDisplayName":{ "shape":"String", @@ -5287,7 +5343,7 @@ }, "SecretsManagerAccessRoleArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets Manager secret that allows access to the SAP ASE endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret has the value of the Amazon Web Services Secrets Manager secret that allows access to the SAP ASE endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

" }, "SecretsManagerSecretId":{ "shape":"String", @@ -5417,7 +5473,7 @@ "documentation":"

A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regular expressions: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

" } }, - "documentation":"

A user-defined key-value pair that describes metadata added to an AWS DMS resource and that is used by operations such as the following:

  • AddTagsToResource

  • ListTagsForResource

  • RemoveTagsFromResource

" + "documentation":"

A user-defined key-value pair that describes metadata added to an DMS resource and that is used by operations such as the following:

  • AddTagsToResource

  • ListTagsForResource

  • RemoveTagsFromResource

" }, "TagList":{ "type":"list", @@ -5492,5 +5548,5 @@ "member":{"shape":"VpcSecurityGroupMembership"} } }, - "documentation":"AWS Database Migration Service

AWS Database Migration Service (AWS DMS) can migrate your data to and from the most widely used commercial and open-source databases such as Oracle, PostgreSQL, Microsoft SQL Server, Amazon Redshift, MariaDB, Amazon Aurora, MySQL, and SAP Adaptive Server Enterprise (ASE). The service supports homogeneous migrations such as Oracle to Oracle, as well as heterogeneous migrations between different database platforms, such as Oracle to MySQL or SQL Server to PostgreSQL.

For more information about AWS DMS, see What Is AWS Database Migration Service? in the AWS Database Migration User Guide.

" + "documentation":"Database Migration Service

Database Migration Service (DMS) can migrate your data to and from the most widely used commercial and open-source databases such as Oracle, PostgreSQL, Microsoft SQL Server, Amazon Redshift, MariaDB, Amazon Aurora, MySQL, and SAP Adaptive Server Enterprise (ASE). The service supports homogeneous migrations such as Oracle to Oracle, as well as heterogeneous migrations between different database platforms, such as Oracle to MySQL or SQL Server to PostgreSQL.

For more information about DMS, see What Is Database Migration Service? in the Database Migration Service User Guide.

" } diff --git a/services/databrew/pom.xml b/services/databrew/pom.xml index 6176b29136c4..cfd19ebf2132 100644 --- a/services/databrew/pom.xml +++ b/services/databrew/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT databrew AWS Java SDK :: Services :: Data Brew diff --git a/services/databrew/src/main/resources/codegen-resources/service-2.json b/services/databrew/src/main/resources/codegen-resources/service-2.json index 173618476935..80436ba54710 100644 --- a/services/databrew/src/main/resources/codegen-resources/service-2.json +++ b/services/databrew/src/main/resources/codegen-resources/service-2.json @@ -669,6 +669,45 @@ "max":20, "min":0 }, + "ColumnSelector":{ + "type":"structure", + "members":{ + "Regex":{ + "shape":"ColumnName", + "documentation":"

A regular expression for selecting a column from a dataset.

" + }, + "Name":{ + "shape":"ColumnName", + "documentation":"

The name of a column from a dataset.

" + } + }, + "documentation":"

Selector of a column from a dataset for profile job configuration. One selector includes either a column name or a regular expression.

" + }, + "ColumnSelectorList":{ + "type":"list", + "member":{"shape":"ColumnSelector"}, + "min":1 + }, + "ColumnStatisticsConfiguration":{ + "type":"structure", + "required":["Statistics"], + "members":{ + "Selectors":{ + "shape":"ColumnSelectorList", + "documentation":"

List of column selectors. Selectors can be used to select columns from the dataset. When selectors are undefined, configuration will be applied to all supported columns.

" + }, + "Statistics":{ + "shape":"StatisticsConfiguration", + "documentation":"

Configuration for evaluations. Statistics can be used to select evaluations and override parameters of evaluations.

" + } + }, + "documentation":"

Configuration for column evaluations for a profile job. ColumnStatisticsConfiguration can be used to select evaluations and override parameters of evaluations for particular columns.

" + }, + "ColumnStatisticsConfigurationList":{ + "type":"list", + "member":{"shape":"ColumnStatisticsConfiguration"}, + "min":1 + }, "CompressionFormat":{ "type":"string", "enum":[ @@ -804,6 +843,10 @@ "documentation":"

The maximum number of times to retry the job after a job run fails.

" }, "OutputLocation":{"shape":"S3Location"}, + "Configuration":{ + "shape":"ProfileConfiguration", + "documentation":"

Configuration for profile jobs. Used to select columns, do evaluations, and override default parameters of evaluations. When configuration is null, the profile job will run with default settings.

" + }, "RoleArn":{ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role to be assumed when DataBrew runs the job.

" @@ -915,7 +958,11 @@ }, "DataCatalogOutputs":{ "shape":"DataCatalogOutputList", - "documentation":"

One or more artifacts that represent the AWS Glue Data Catalog output from running the job.

" + "documentation":"

One or more artifacts that represent the Glue Data Catalog output from running the job.

" + }, + "DatabaseOutputs":{ + "shape":"DatabaseOutputList", + "documentation":"

Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write to.

" }, "ProjectName":{ "shape":"ProjectName", @@ -1081,7 +1128,7 @@ "members":{ "CatalogId":{ "shape":"CatalogId", - "documentation":"

The unique identifier of the AWS account that holds the Data Catalog that stores the data.

" + "documentation":"

The unique identifier of the Amazon Web Services account that holds the Data Catalog that stores the data.

" }, "DatabaseName":{ "shape":"DatabaseName", @@ -1093,7 +1140,7 @@ }, "S3Options":{ "shape":"S3TableOutputOptions", - "documentation":"

Represents options that specify how and where DataBrew writes the S3 output generated by recipe jobs.

" + "documentation":"

Represents options that specify how and where DataBrew writes the Amazon S3 output generated by recipe jobs.

" }, "DatabaseOptions":{ "shape":"DatabaseTableOutputOptions", @@ -1104,7 +1151,7 @@ "documentation":"

A value that, if true, means that any data in the location specified for output is overwritten with new output. Not supported with DatabaseOptions.

" } }, - "documentation":"

Represents options that specify how and where DataBrew writes the output generated by recipe jobs.

" + "documentation":"

Represents options that specify how and where in the Glue Data Catalog DataBrew writes the output generated by recipe jobs.

" }, "DataCatalogOutputList":{ "type":"list", @@ -1135,6 +1182,37 @@ "max":255, "min":1 }, + "DatabaseOutput":{ + "type":"structure", + "required":[ + "GlueConnectionName", + "DatabaseOptions" + ], + "members":{ + "GlueConnectionName":{ + "shape":"GlueConnectionName", + "documentation":"

The Glue connection that stores the connection information for the target database.

" + }, + "DatabaseOptions":{ + "shape":"DatabaseTableOutputOptions", + "documentation":"

Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.

" + }, + "DatabaseOutputMode":{ + "shape":"DatabaseOutputMode", + "documentation":"

The output mode to write into the database. Currently supported option: NEW_TABLE.

" + } + }, + "documentation":"

Represents a JDBC database output object which defines the output destination for a DataBrew recipe job to write into.

" + }, + "DatabaseOutputList":{ + "type":"list", + "member":{"shape":"DatabaseOutput"}, + "min":1 + }, + "DatabaseOutputMode":{ + "type":"string", + "enum":["NEW_TABLE"] + }, "DatabaseTableName":{ "type":"string", "max":255, @@ -1545,12 +1623,20 @@ }, "DataCatalogOutputs":{ "shape":"DataCatalogOutputList", - "documentation":"

One or more artifacts that represent the AWS Glue Data Catalog output from running the job.

" + "documentation":"

One or more artifacts that represent the Glue Data Catalog output from running the job.

" + }, + "DatabaseOutputs":{ + "shape":"DatabaseOutputList", + "documentation":"

Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into.

" }, "ProjectName":{ "shape":"ProjectName", "documentation":"

The DataBrew project associated with this job.

" }, + "ProfileConfiguration":{ + "shape":"ProfileConfiguration", + "documentation":"

Configuration for profile jobs. Used to select columns, do evaluations, and override default parameters of evaluations. When configuration is null, the profile job will run with default settings.

" + }, "RecipeReference":{"shape":"RecipeReference"}, "ResourceArn":{ "shape":"Arn", @@ -1623,6 +1709,10 @@ "shape":"JobName", "documentation":"

The name of the job being processed during this run.

" }, + "ProfileConfiguration":{ + "shape":"ProfileConfiguration", + "documentation":"

Configuration for profile jobs. Used to select columns, do evaluations, and override default parameters of evaluations. When configuration is null, the profile job will run with default settings.

" + }, "RunId":{ "shape":"JobRunId", "documentation":"

The unique identifier of the job run.

" @@ -1645,7 +1735,11 @@ }, "DataCatalogOutputs":{ "shape":"DataCatalogOutputList", - "documentation":"

One or more artifacts that represent the AWS Glue Data Catalog output from running the job.

" + "documentation":"

One or more artifacts that represent the Glue Data Catalog output from running the job.

" + }, + "DatabaseOutputs":{ + "shape":"DatabaseOutputList", + "documentation":"

Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into.

" }, "RecipeReference":{"shape":"RecipeReference"}, "StartedBy":{ @@ -2067,7 +2161,11 @@ }, "DataCatalogOutputs":{ "shape":"DataCatalogOutputList", - "documentation":"

One or more artifacts that represent the AWS Glue Data Catalog output from running the job.

" + "documentation":"

One or more artifacts that represent the Glue Data Catalog output from running the job.

" + }, + "DatabaseOutputs":{ + "shape":"DatabaseOutputList", + "documentation":"

Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into.

" }, "ProjectName":{ "shape":"ProjectName", @@ -2163,7 +2261,11 @@ }, "DataCatalogOutputs":{ "shape":"DataCatalogOutputList", - "documentation":"

One or more artifacts that represent the AWS Glue Data Catalog output from running the job.

" + "documentation":"

One or more artifacts that represent the Glue Data Catalog output from running the job.

" + }, + "DatabaseOutputs":{ + "shape":"DatabaseOutputList", + "documentation":"

Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into.

" }, "RecipeReference":{ "shape":"RecipeReference", @@ -2606,7 +2708,7 @@ "documentation":"

Represents options that define how DataBrew formats job output files.

" } }, - "documentation":"

Represents options that specify how and where DataBrew writes the output generated by recipe jobs or profile jobs.

" + "documentation":"

Represents options that specify how and where in Amazon S3 DataBrew writes the output generated by recipe jobs or profile jobs.

" }, "OutputFormat":{ "type":"string", @@ -2657,7 +2759,7 @@ }, "ParameterValue":{ "type":"string", - "max":12288, + "max":32768, "min":1 }, "PathOptions":{ @@ -2692,6 +2794,24 @@ "min":1 }, "Preview":{"type":"boolean"}, + "ProfileConfiguration":{ + "type":"structure", + "members":{ + "DatasetStatisticsConfiguration":{ + "shape":"StatisticsConfiguration", + "documentation":"

Configuration for inter-column evaluations. Configuration can be used to select evaluations and override parameters of evaluations. When configuration is undefined, the profile job will run all supported inter-column evaluations.

" + }, + "ProfileColumns":{ + "shape":"ColumnSelectorList", + "documentation":"

List of column selectors. ProfileColumns can be used to select columns from the dataset. When ProfileColumns is undefined, the profile job will profile all supported columns.

" + }, + "ColumnStatisticsConfigurations":{ + "shape":"ColumnStatisticsConfigurationList", + "documentation":"

List of configurations for column evaluations. ColumnStatisticsConfigurations are used to select evaluations and override parameters of evaluations for particular columns. When ColumnStatisticsConfigurations is undefined, the profile job will profile all supported columns and run all supported evaluations.

" + } + }, + "documentation":"

Configuration for profile jobs. Configuration can be used to select columns, do evaluations, and override default parameters of evaluations. When configuration is undefined, the profile job will apply default settings to all supported columns.

" + }, "Project":{ "type":"structure", "required":[ @@ -2983,7 +3103,7 @@ "documentation":"

Represents an Amazon S3 location (bucket name and object key) where DataBrew can write output from a job.

" } }, - "documentation":"

Represents options that specify how and where DataBrew writes the S3 output generated by recipe jobs.

" + "documentation":"

Represents options that specify how and where DataBrew writes the Amazon S3 output generated by recipe jobs.

" }, "Sample":{ "type":"structure", @@ -3231,6 +3351,54 @@ } }, "StartedBy":{"type":"string"}, + "Statistic":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Z\\_]+$" + }, + "StatisticList":{ + "type":"list", + "member":{"shape":"Statistic"}, + "min":1 + }, + "StatisticOverride":{ + "type":"structure", + "required":[ + "Statistic", + "Parameters" + ], + "members":{ + "Statistic":{ + "shape":"Statistic", + "documentation":"

The name of an evaluation

" + }, + "Parameters":{ + "shape":"ParameterMap", + "documentation":"

A map that includes overrides of an evaluation’s parameters.

" + } + }, + "documentation":"

Override of a particular evaluation for a profile job.

" + }, + "StatisticOverrideList":{ + "type":"list", + "member":{"shape":"StatisticOverride"}, + "min":1 + }, + "StatisticsConfiguration":{ + "type":"structure", + "members":{ + "IncludedStatistics":{ + "shape":"StatisticList", + "documentation":"

List of included evaluations. When the list is undefined, all supported evaluations will be included.

" + }, + "Overrides":{ + "shape":"StatisticOverrideList", + "documentation":"

List of overrides for evaluations.

" + } + }, + "documentation":"

Configuration of evaluations for a profile job. This configuration can be used to select evaluations and override the parameters of selected evaluations.

" + }, "StepIndex":{ "type":"integer", "min":0 @@ -3401,6 +3569,10 @@ "RoleArn" ], "members":{ + "Configuration":{ + "shape":"ProfileConfiguration", + "documentation":"

Configuration for profile jobs. Used to select columns, do evaluations, and override default parameters of evaluations. When configuration is null, the profile job will run with default settings.

" + }, "EncryptionKeyArn":{ "shape":"EncryptionKeyArn", "documentation":"

The Amazon Resource Name (ARN) of an encryption key that is used to protect the job.

" @@ -3525,7 +3697,11 @@ }, "DataCatalogOutputs":{ "shape":"DataCatalogOutputList", - "documentation":"

One or more artifacts that represent the AWS Glue Data Catalog output from running the job.

" + "documentation":"

One or more artifacts that represent the Glue Data Catalog output from running the job.

" + }, + "DatabaseOutputs":{ + "shape":"DatabaseOutputList", + "documentation":"

Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into.

" }, "RoleArn":{ "shape":"Arn", diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml index 07cba7b9ee3f..9cfb9e88aa17 100644 --- a/services/dataexchange/pom.xml +++ b/services/dataexchange/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT dataexchange AWS Java SDK :: Services :: DataExchange diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index 117a53e80dd3..ff4d7f5f7f65 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT datapipeline AWS Java SDK :: Services :: AWS Data Pipeline diff --git a/services/datasync/pom.xml b/services/datasync/pom.xml index 62de344b817a..066017d15a8f 100644 --- a/services/datasync/pom.xml +++ b/services/datasync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT datasync AWS Java SDK :: Services :: DataSync diff --git a/services/dax/pom.xml b/services/dax/pom.xml index d0a990c0fe4b..e2be5456e939 100644 --- a/services/dax/pom.xml +++ b/services/dax/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT dax AWS Java SDK :: Services :: Amazon DynamoDB Accelerator (DAX) diff --git a/services/detective/pom.xml b/services/detective/pom.xml index adc71ab0ce87..85ca5643c12a 100644 --- a/services/detective/pom.xml +++ b/services/detective/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT detective AWS Java SDK :: Services :: Detective diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index 734a9866d2b8..5f963b424f23 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT devicefarm AWS Java SDK :: Services :: AWS Device Farm diff --git a/services/devopsguru/pom.xml b/services/devopsguru/pom.xml index 12ec2b2488d2..10c91d1a274b 100644 --- a/services/devopsguru/pom.xml +++ b/services/devopsguru/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT devopsguru AWS Java SDK :: Services :: Dev Ops Guru diff --git a/services/devopsguru/src/main/resources/codegen-resources/paginators-1.json b/services/devopsguru/src/main/resources/codegen-resources/paginators-1.json index a0d974285995..67745d4d9f3b 100644 --- a/services/devopsguru/src/main/resources/codegen-resources/paginators-1.json +++ b/services/devopsguru/src/main/resources/codegen-resources/paginators-1.json @@ -4,11 +4,18 @@ "input_token": "NextToken", "output_token": "NextToken", "result_key": [ - "CloudFormation" + "CloudFormation", + "Service" ] }, "GetCostEstimation": { "input_token": "NextToken", + "non_aggregate_keys": [ + "Status", + "TotalCost", + "TimeRange", + "ResourceCollection" + ], "output_token": "NextToken", "result_key": [ "Costs" diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index 37546b79fa94..d62e636f5753 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT directconnect AWS Java SDK :: Services :: AWS Direct Connect diff --git a/services/directconnect/src/main/resources/codegen-resources/service-2.json b/services/directconnect/src/main/resources/codegen-resources/service-2.json index ab20ed2aa1af..19d2e0228737 100644 --- a/services/directconnect/src/main/resources/codegen-resources/service-2.json +++ b/services/directconnect/src/main/resources/codegen-resources/service-2.json @@ -38,7 +38,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Deprecated. Use AllocateHostedConnection instead.

Creates a hosted connection on an interconnect.

Allocates a VLAN number and a specified amount of bandwidth for use by a hosted connection on the specified interconnect.

Intended for use by AWS Direct Connect Partners only.

", + "documentation":"

Deprecated. Use AllocateHostedConnection instead.

Creates a hosted connection on an interconnect.

Allocates a VLAN number and a specified amount of bandwidth for use by a hosted connection on the specified interconnect.

Intended for use by Direct Connect Partners only.

", "deprecated":true }, "AllocateHostedConnection":{ @@ -55,7 +55,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a hosted connection on the specified interconnect or a link aggregation group (LAG) of interconnects.

Allocates a VLAN number and a specified amount of capacity (bandwidth) for use by a hosted connection on the specified interconnect or LAG of interconnects. AWS polices the hosted connection for the specified capacity and the AWS Direct Connect Partner must also police the hosted connection for the specified capacity.

Intended for use by AWS Direct Connect Partners only.

" + "documentation":"

Creates a hosted connection on the specified interconnect or a link aggregation group (LAG) of interconnects.

Allocates a VLAN number and a specified amount of capacity (bandwidth) for use by a hosted connection on the specified interconnect or LAG of interconnects. Amazon Web Services polices the hosted connection for the specified capacity and the Direct Connect Partner must also police the hosted connection for the specified capacity.

Intended for use by Direct Connect Partners only.

" }, "AllocatePrivateVirtualInterface":{ "name":"AllocatePrivateVirtualInterface", @@ -71,7 +71,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Provisions a private virtual interface to be owned by the specified AWS account.

Virtual interfaces created using this action must be confirmed by the owner using ConfirmPrivateVirtualInterface. Until then, the virtual interface is in the Confirming state and is not available to handle traffic.

" + "documentation":"

Provisions a private virtual interface to be owned by the specified account.

Virtual interfaces created using this action must be confirmed by the owner using ConfirmPrivateVirtualInterface. Until then, the virtual interface is in the Confirming state and is not available to handle traffic.

" }, "AllocatePublicVirtualInterface":{ "name":"AllocatePublicVirtualInterface", @@ -87,7 +87,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Provisions a public virtual interface to be owned by the specified AWS account.

The owner of a connection calls this function to provision a public virtual interface to be owned by the specified AWS account.

Virtual interfaces created using this function must be confirmed by the owner using ConfirmPublicVirtualInterface. Until this step has been completed, the virtual interface is in the confirming state and is not available to handle traffic.

When creating an IPv6 public virtual interface, omit the Amazon address and customer address. IPv6 addresses are automatically assigned from the Amazon pool of IPv6 addresses; you cannot specify custom IPv6 addresses.

" + "documentation":"

Provisions a public virtual interface to be owned by the specified account.

The owner of a connection calls this function to provision a public virtual interface to be owned by the specified account.

Virtual interfaces created using this function must be confirmed by the owner using ConfirmPublicVirtualInterface. Until this step has been completed, the virtual interface is in the confirming state and is not available to handle traffic.

When creating an IPv6 public virtual interface, omit the Amazon address and customer address. IPv6 addresses are automatically assigned from the Amazon pool of IPv6 addresses; you cannot specify custom IPv6 addresses.

" }, "AllocateTransitVirtualInterface":{ "name":"AllocateTransitVirtualInterface", @@ -103,7 +103,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Provisions a transit virtual interface to be owned by the specified AWS account. Use this type of interface to connect a transit gateway to your Direct Connect gateway.

The owner of a connection provisions a transit virtual interface to be owned by the specified AWS account.

After you create a transit virtual interface, it must be confirmed by the owner using ConfirmTransitVirtualInterface. Until this step has been completed, the transit virtual interface is in the requested state and is not available to handle traffic.

" + "documentation":"

Provisions a transit virtual interface to be owned by the specified account. Use this type of interface to connect a transit gateway to your Direct Connect gateway.

The owner of a connection provisions a transit virtual interface to be owned by the specified account.

After you create a transit virtual interface, it must be confirmed by the owner using ConfirmTransitVirtualInterface. Until this step has been completed, the transit virtual interface is in the requested state and is not available to handle traffic.

" }, "AssociateConnectionWithLag":{ "name":"AssociateConnectionWithLag", @@ -117,7 +117,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Associates an existing connection with a link aggregation group (LAG). The connection is interrupted and re-established as a member of the LAG (connectivity to AWS is interrupted). The connection must be hosted on the same AWS Direct Connect endpoint as the LAG, and its bandwidth must match the bandwidth for the LAG. You can re-associate a connection that's currently associated with a different LAG; however, if removing the connection would cause the original LAG to fall below its setting for minimum number of operational connections, the request fails.

Any virtual interfaces that are directly associated with the connection are automatically re-associated with the LAG. If the connection was originally associated with a different LAG, the virtual interfaces remain associated with the original LAG.

For interconnects, any hosted connections are automatically re-associated with the LAG. If the interconnect was originally associated with a different LAG, the hosted connections remain associated with the original LAG.

" + "documentation":"

Associates an existing connection with a link aggregation group (LAG). The connection is interrupted and re-established as a member of the LAG (connectivity to Amazon Web Services is interrupted). The connection must be hosted on the same Direct Connect endpoint as the LAG, and its bandwidth must match the bandwidth for the LAG. You can re-associate a connection that's currently associated with a different LAG; however, if removing the connection would cause the original LAG to fall below its setting for minimum number of operational connections, the request fails.

Any virtual interfaces that are directly associated with the connection are automatically re-associated with the LAG. If the connection was originally associated with a different LAG, the virtual interfaces remain associated with the original LAG.

For interconnects, any hosted connections are automatically re-associated with the LAG. If the interconnect was originally associated with a different LAG, the hosted connections remain associated with the original LAG.

" }, "AssociateHostedConnection":{ "name":"AssociateHostedConnection", @@ -131,7 +131,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Associates a hosted connection and its virtual interfaces with a link aggregation group (LAG) or interconnect. If the target interconnect or LAG has an existing hosted connection with a conflicting VLAN number or IP address, the operation fails. This action temporarily interrupts the hosted connection's connectivity to AWS as it is being migrated.

Intended for use by AWS Direct Connect Partners only.

" + "documentation":"

Associates a hosted connection and its virtual interfaces with a link aggregation group (LAG) or interconnect. If the target interconnect or LAG has an existing hosted connection with a conflicting VLAN number or IP address, the operation fails. This action temporarily interrupts the hosted connection's connectivity to Amazon Web Services as it is being migrated.

Intended for use by Direct Connect Partners only.

" }, "AssociateMacSecKey":{ "name":"AssociateMacSecKey", @@ -145,7 +145,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Associates a MAC Security (MACsec) Connection Key Name (CKN)/ Connectivity Association Key (CAK) pair with an AWS Direct Connect dedicated connection.

You must supply either the secretARN, or the CKN/CAK (ckn and cak) pair in the request.

For information about MAC Security (MACsec) key considerations, see MACsec pre-shared CKN/CAK key considerations in the AWS Direct Connect User Guide.

" + "documentation":"

Associates a MAC Security (MACsec) Connection Key Name (CKN)/ Connectivity Association Key (CAK) pair with an Direct Connect dedicated connection.

You must supply either the secretARN, or the CKN/CAK (ckn and cak) pair in the request.

For information about MAC Security (MACsec) key considerations, see MACsec pre-shared CKN/CAK key considerations in the Direct Connect User Guide.

" }, "AssociateVirtualInterface":{ "name":"AssociateVirtualInterface", @@ -159,7 +159,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Associates a virtual interface with a specified link aggregation group (LAG) or connection. Connectivity to AWS is temporarily interrupted as the virtual interface is being migrated. If the target connection or LAG has an associated virtual interface with a conflicting VLAN number or a conflicting IP address, the operation fails.

Virtual interfaces associated with a hosted connection cannot be associated with a LAG; hosted connections must be migrated along with their virtual interfaces using AssociateHostedConnection.

To reassociate a virtual interface to a new connection or LAG, the requester must own either the virtual interface itself or the connection to which the virtual interface is currently associated. Additionally, the requester must own the connection or LAG for the association.

" + "documentation":"

Associates a virtual interface with a specified link aggregation group (LAG) or connection. Connectivity to Amazon Web Services is temporarily interrupted as the virtual interface is being migrated. If the target connection or LAG has an associated virtual interface with a conflicting VLAN number or a conflicting IP address, the operation fails.

Virtual interfaces associated with a hosted connection cannot be associated with a LAG; hosted connections must be migrated along with their virtual interfaces using AssociateHostedConnection.

To reassociate a virtual interface to a new connection or LAG, the requester must own either the virtual interface itself or the connection to which the virtual interface is currently associated. Additionally, the requester must own the connection or LAG for the association.

" }, "ConfirmConnection":{ "name":"ConfirmConnection", @@ -187,7 +187,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Accepts ownership of a private virtual interface created by another AWS account.

After the virtual interface owner makes this call, the virtual interface is created and attached to the specified virtual private gateway or Direct Connect gateway, and is made available to handle traffic.

" + "documentation":"

Accepts ownership of a private virtual interface created by another account.

After the virtual interface owner makes this call, the virtual interface is created and attached to the specified virtual private gateway or Direct Connect gateway, and is made available to handle traffic.

" }, "ConfirmPublicVirtualInterface":{ "name":"ConfirmPublicVirtualInterface", @@ -201,7 +201,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Accepts ownership of a public virtual interface created by another AWS account.

After the virtual interface owner makes this call, the specified virtual interface is created and made available to handle traffic.

" + "documentation":"

Accepts ownership of a public virtual interface created by another account.

After the virtual interface owner makes this call, the specified virtual interface is created and made available to handle traffic.

" }, "ConfirmTransitVirtualInterface":{ "name":"ConfirmTransitVirtualInterface", @@ -215,7 +215,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Accepts ownership of a transit virtual interface created by another AWS account.

After the owner of the transit virtual interface makes this call, the specified transit virtual interface is created and made available to handle traffic.

" + "documentation":"

Accepts ownership of a transit virtual interface created by another account.

After the owner of the transit virtual interface makes this call, the specified transit virtual interface is created and made available to handle traffic.

" }, "CreateBGPPeer":{ "name":"CreateBGPPeer", @@ -229,7 +229,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a BGP peer on the specified virtual interface.

You must create a BGP peer for the corresponding address family (IPv4/IPv6) in order to access AWS resources that also use that address family.

If logical redundancy is not supported by the connection, interconnect, or LAG, the BGP peer cannot be in the same address family as an existing BGP peer on the virtual interface.

When creating a IPv6 BGP peer, omit the Amazon address and customer address. IPv6 addresses are automatically assigned from the Amazon pool of IPv6 addresses; you cannot specify custom IPv6 addresses.

For a public virtual interface, the Autonomous System Number (ASN) must be private or already on the allow list for the virtual interface.

" + "documentation":"

Creates a BGP peer on the specified virtual interface.

You must create a BGP peer for the corresponding address family (IPv4/IPv6) in order to access Amazon Web Services resources that also use that address family.

If logical redundancy is not supported by the connection, interconnect, or LAG, the BGP peer cannot be in the same address family as an existing BGP peer on the virtual interface.

When creating a IPv6 BGP peer, omit the Amazon address and customer address. IPv6 addresses are automatically assigned from the Amazon pool of IPv6 addresses; you cannot specify custom IPv6 addresses.

For a public virtual interface, the Autonomous System Number (ASN) must be private or already on the allow list for the virtual interface.

" }, "CreateConnection":{ "name":"CreateConnection", @@ -245,7 +245,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a connection between a customer network and a specific AWS Direct Connect location.

A connection links your internal network to an AWS Direct Connect location over a standard Ethernet fiber-optic cable. One end of the cable is connected to your router, the other to an AWS Direct Connect router.

To find the locations for your Region, use DescribeLocations.

You can automatically add the new connection to a link aggregation group (LAG) by specifying a LAG ID in the request. This ensures that the new connection is allocated on the same AWS Direct Connect endpoint that hosts the specified LAG. If there are no available ports on the endpoint, the request fails and no connection is created.

" + "documentation":"

Creates a connection between a customer network and a specific Direct Connect location.

A connection links your internal network to an Direct Connect location over a standard Ethernet fiber-optic cable. One end of the cable is connected to your router, the other to an Direct Connect router.

To find the locations for your Region, use DescribeLocations.

You can automatically add the new connection to a link aggregation group (LAG) by specifying a LAG ID in the request. This ensures that the new connection is allocated on the same Direct Connect endpoint that hosts the specified LAG. If there are no available ports on the endpoint, the request fails and no connection is created.

" }, "CreateDirectConnectGateway":{ "name":"CreateDirectConnectGateway", @@ -259,7 +259,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a Direct Connect gateway, which is an intermediate object that enables you to connect a set of virtual interfaces and virtual private gateways. A Direct Connect gateway is global and visible in any AWS Region after it is created. The virtual interfaces and virtual private gateways that are connected through a Direct Connect gateway can be in different AWS Regions. This enables you to connect to a VPC in any Region, regardless of the Region in which the virtual interfaces are located, and pass traffic between them.

" + "documentation":"

Creates a Direct Connect gateway, which is an intermediate object that enables you to connect a set of virtual interfaces and virtual private gateways. A Direct Connect gateway is global and visible in any Region after it is created. The virtual interfaces and virtual private gateways that are connected through a Direct Connect gateway can be in different Regions. This enables you to connect to a VPC in any Region, regardless of the Region in which the virtual interfaces are located, and pass traffic between them.

" }, "CreateDirectConnectGatewayAssociation":{ "name":"CreateDirectConnectGatewayAssociation", @@ -287,7 +287,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a proposal to associate the specified virtual private gateway or transit gateway with the specified Direct Connect gateway.

You can associate a Direct Connect gateway and virtual private gateway or transit gateway that is owned by any AWS account.

" + "documentation":"

Creates a proposal to associate the specified virtual private gateway or transit gateway with the specified Direct Connect gateway.

You can associate a Direct Connect gateway and virtual private gateway or transit gateway that is owned by any account.

" }, "CreateInterconnect":{ "name":"CreateInterconnect", @@ -303,7 +303,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates an interconnect between an AWS Direct Connect Partner's network and a specific AWS Direct Connect location.

An interconnect is a connection that is capable of hosting other connections. The AWS Direct Connect partner can use an interconnect to provide AWS Direct Connect hosted connections to customers through their own network services. Like a standard connection, an interconnect links the partner's network to an AWS Direct Connect location over a standard Ethernet fiber-optic cable. One end is connected to the partner's router, the other to an AWS Direct Connect router.

You can automatically add the new interconnect to a link aggregation group (LAG) by specifying a LAG ID in the request. This ensures that the new interconnect is allocated on the same AWS Direct Connect endpoint that hosts the specified LAG. If there are no available ports on the endpoint, the request fails and no interconnect is created.

For each end customer, the AWS Direct Connect Partner provisions a connection on their interconnect by calling AllocateHostedConnection. The end customer can then connect to AWS resources by creating a virtual interface on their connection, using the VLAN assigned to them by the AWS Direct Connect Partner.

Intended for use by AWS Direct Connect Partners only.

" + "documentation":"

Creates an interconnect between an Direct Connect Partner's network and a specific Direct Connect location.

An interconnect is a connection that is capable of hosting other connections. The Direct Connect Partner can use an interconnect to provide Direct Connect hosted connections to customers through their own network services. Like a standard connection, an interconnect links the partner's network to an Direct Connect location over a standard Ethernet fiber-optic cable. One end is connected to the partner's router, the other to an Direct Connect router.

You can automatically add the new interconnect to a link aggregation group (LAG) by specifying a LAG ID in the request. This ensures that the new interconnect is allocated on the same Direct Connect endpoint that hosts the specified LAG. If there are no available ports on the endpoint, the request fails and no interconnect is created.

For each end customer, the Direct Connect Partner provisions a connection on their interconnect by calling AllocateHostedConnection. The end customer can then connect to Amazon Web Services resources by creating a virtual interface on their connection, using the VLAN assigned to them by the Direct Connect Partner.

Intended for use by Direct Connect Partners only.

" }, "CreateLag":{ "name":"CreateLag", @@ -319,7 +319,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a link aggregation group (LAG) with the specified number of bundled physical dedicated connections between the customer network and a specific AWS Direct Connect location. A LAG is a logical interface that uses the Link Aggregation Control Protocol (LACP) to aggregate multiple interfaces, enabling you to treat them as a single interface.

All connections in a LAG must use the same bandwidth (either 1Gbps or 10Gbps) and must terminate at the same AWS Direct Connect endpoint.

You can have up to 10 dedicated connections per LAG. Regardless of this limit, if you request more connections for the LAG than AWS Direct Connect can allocate on a single endpoint, no LAG is created.

You can specify an existing physical dedicated connection or interconnect to include in the LAG (which counts towards the total number of connections). Doing so interrupts the current physical dedicated connection, and re-establishes them as a member of the LAG. The LAG will be created on the same AWS Direct Connect endpoint to which the dedicated connection terminates. Any virtual interfaces associated with the dedicated connection are automatically disassociated and re-associated with the LAG. The connection ID does not change.

If the AWS account used to create a LAG is a registered AWS Direct Connect Partner, the LAG is automatically enabled to host sub-connections. For a LAG owned by a partner, any associated virtual interfaces cannot be directly configured.

" + "documentation":"

Creates a link aggregation group (LAG) with the specified number of bundled physical dedicated connections between the customer network and a specific Direct Connect location. A LAG is a logical interface that uses the Link Aggregation Control Protocol (LACP) to aggregate multiple interfaces, enabling you to treat them as a single interface.

All connections in a LAG must use the same bandwidth (either 1Gbps or 10Gbps) and must terminate at the same Direct Connect endpoint.

You can have up to 10 dedicated connections per LAG. Regardless of this limit, if you request more connections for the LAG than Direct Connect can allocate on a single endpoint, no LAG is created.

You can specify an existing physical dedicated connection or interconnect to include in the LAG (which counts towards the total number of connections). Doing so interrupts the current physical dedicated connection, and re-establishes them as a member of the LAG. The LAG will be created on the same Direct Connect endpoint to which the dedicated connection terminates. Any virtual interfaces associated with the dedicated connection are automatically disassociated and re-associated with the LAG. The connection ID does not change.

If the account used to create a LAG is a registered Direct Connect Partner, the LAG is automatically enabled to host sub-connections. For a LAG owned by a partner, any associated virtual interfaces cannot be directly configured.

" }, "CreatePrivateVirtualInterface":{ "name":"CreatePrivateVirtualInterface", @@ -335,7 +335,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a private virtual interface. A virtual interface is the VLAN that transports AWS Direct Connect traffic. A private virtual interface can be connected to either a Direct Connect gateway or a Virtual Private Gateway (VGW). Connecting the private virtual interface to a Direct Connect gateway enables the possibility for connecting to multiple VPCs, including VPCs in different AWS Regions. Connecting the private virtual interface to a VGW only provides access to a single VPC within the same Region.

Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

" + "documentation":"

Creates a private virtual interface. A virtual interface is the VLAN that transports Direct Connect traffic. A private virtual interface can be connected to either a Direct Connect gateway or a Virtual Private Gateway (VGW). Connecting the private virtual interface to a Direct Connect gateway enables the possibility for connecting to multiple VPCs, including VPCs in different Regions. Connecting the private virtual interface to a VGW only provides access to a single VPC within the same Region.

Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

" }, "CreatePublicVirtualInterface":{ "name":"CreatePublicVirtualInterface", @@ -351,7 +351,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a public virtual interface. A virtual interface is the VLAN that transports AWS Direct Connect traffic. A public virtual interface supports sending traffic to public services of AWS such as Amazon S3.

When creating an IPv6 public virtual interface (addressFamily is ipv6), leave the customer and amazon address fields blank to use auto-assigned IPv6 space. Custom IPv6 addresses are not supported.

" + "documentation":"

Creates a public virtual interface. A virtual interface is the VLAN that transports Direct Connect traffic. A public virtual interface supports sending traffic to public services of Amazon Web Services such as Amazon S3.

When creating an IPv6 public virtual interface (addressFamily is ipv6), leave the customer and amazon address fields blank to use auto-assigned IPv6 space. Custom IPv6 addresses are not supported.

" }, "CreateTransitVirtualInterface":{ "name":"CreateTransitVirtualInterface", @@ -395,7 +395,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Deletes the specified connection.

Deleting a connection only stops the AWS Direct Connect port hour and data transfer charges. If you are partnering with any third parties to connect with the AWS Direct Connect location, you must cancel your service with them separately.

" + "documentation":"

Deletes the specified connection.

Deleting a connection only stops the Direct Connect port hour and data transfer charges. If you are partnering with any third parties to connect with the Direct Connect location, you must cancel your service with them separately.

" }, "DeleteDirectConnectGateway":{ "name":"DeleteDirectConnectGateway", @@ -451,7 +451,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Deletes the specified interconnect.

Intended for use by AWS Direct Connect Partners only.

" + "documentation":"

Deletes the specified interconnect.

Intended for use by Direct Connect Partners only.

" }, "DeleteLag":{ "name":"DeleteLag", @@ -493,7 +493,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Deprecated. Use DescribeLoa instead.

Gets the LOA-CFA for a connection.

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that your APN partner or service provider uses when establishing your cross connect to AWS at the colocation facility. For more information, see Requesting Cross Connects at AWS Direct Connect Locations in the AWS Direct Connect User Guide.

", + "documentation":"

Deprecated. Use DescribeLoa instead.

Gets the LOA-CFA for a connection.

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that your APN partner or service provider uses when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at Direct Connect Locations in the Direct Connect User Guide.

", "deprecated":true }, "DescribeConnections":{ @@ -522,7 +522,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Deprecated. Use DescribeHostedConnections instead.

Lists the connections that have been provisioned on the specified interconnect.

Intended for use by AWS Direct Connect Partners only.

", + "documentation":"

Deprecated. Use DescribeHostedConnections instead.

Lists the connections that have been provisioned on the specified interconnect.

Intended for use by Direct Connect Partners only.

", "deprecated":true }, "DescribeDirectConnectGatewayAssociationProposals":{ @@ -593,7 +593,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Lists the hosted connections that have been provisioned on the specified interconnect or link aggregation group (LAG).

Intended for use by AWS Direct Connect Partners only.

" + "documentation":"

Lists the hosted connections that have been provisioned on the specified interconnect or link aggregation group (LAG).

Intended for use by Direct Connect Partners only.

" }, "DescribeInterconnectLoa":{ "name":"DescribeInterconnectLoa", @@ -607,7 +607,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Deprecated. Use DescribeLoa instead.

Gets the LOA-CFA for the specified interconnect.

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to AWS at the colocation facility. For more information, see Requesting Cross Connects at AWS Direct Connect Locations in the AWS Direct Connect User Guide.

", + "documentation":"

Deprecated. Use DescribeLoa instead.

Gets the LOA-CFA for the specified interconnect.

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at Direct Connect Locations in the Direct Connect User Guide.

", "deprecated":true }, "DescribeInterconnects":{ @@ -622,7 +622,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Lists the interconnects owned by the AWS account or only the specified interconnect.

" + "documentation":"

Lists the interconnects owned by the account or only the specified interconnect.

" }, "DescribeLags":{ "name":"DescribeLags", @@ -650,7 +650,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Gets the LOA-CFA for a connection, interconnect, or link aggregation group (LAG).

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to AWS at the colocation facility. For more information, see Requesting Cross Connects at AWS Direct Connect Locations in the AWS Direct Connect User Guide.

" + "documentation":"

Gets the LOA-CFA for a connection, interconnect, or link aggregation group (LAG).

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at Direct Connect Locations in the Direct Connect User Guide.

" }, "DescribeLocations":{ "name":"DescribeLocations", @@ -663,7 +663,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Lists the AWS Direct Connect locations in the current AWS Region. These are the locations that can be selected when calling CreateConnection or CreateInterconnect.

" + "documentation":"

Lists the Direct Connect locations in the current Region. These are the locations that can be selected when calling CreateConnection or CreateInterconnect.

" }, "DescribeTags":{ "name":"DescribeTags", @@ -677,7 +677,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Describes the tags associated with the specified AWS Direct Connect resources.

" + "documentation":"

Describes the tags associated with the specified Direct Connect resources.

" }, "DescribeVirtualGateways":{ "name":"DescribeVirtualGateways", @@ -690,7 +690,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Lists the virtual private gateways owned by the AWS account.

You can create one or more AWS Direct Connect private virtual interfaces linked to a virtual private gateway.

" + "documentation":"

Lists the virtual private gateways owned by the account.

You can create one or more Direct Connect private virtual interfaces linked to a virtual private gateway.

" }, "DescribeVirtualInterfaces":{ "name":"DescribeVirtualInterfaces", @@ -704,7 +704,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Displays all virtual interfaces for an AWS account. Virtual interfaces deleted fewer than 15 minutes before you make the request are also returned. If you specify a connection ID, only the virtual interfaces associated with the connection are returned. If you specify a virtual interface ID, then only a single virtual interface is returned.

A virtual interface (VLAN) transmits the traffic between the AWS Direct Connect location and the customer network.

" + "documentation":"

Displays all virtual interfaces for an account. Virtual interfaces deleted fewer than 15 minutes before you make the request are also returned. If you specify a connection ID, only the virtual interfaces associated with the connection are returned. If you specify a virtual interface ID, then only a single virtual interface is returned.

A virtual interface (VLAN) transmits the traffic between the Direct Connect location and the customer network.

" }, "DisassociateConnectionFromLag":{ "name":"DisassociateConnectionFromLag", @@ -718,7 +718,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Disassociates a connection from a link aggregation group (LAG). The connection is interrupted and re-established as a standalone connection (the connection is not deleted; to delete the connection, use the DeleteConnection request). If the LAG has associated virtual interfaces or hosted connections, they remain associated with the LAG. A disassociated connection owned by an AWS Direct Connect Partner is automatically converted to an interconnect.

If disassociating the connection would cause the LAG to fall below its setting for minimum number of operational connections, the request fails, except when it's the last member of the LAG. If all connections are disassociated, the LAG continues to exist as an empty LAG with no physical connections.

" + "documentation":"

Disassociates a connection from a link aggregation group (LAG). The connection is interrupted and re-established as a standalone connection (the connection is not deleted; to delete the connection, use the DeleteConnection request). If the LAG has associated virtual interfaces or hosted connections, they remain associated with the LAG. A disassociated connection owned by an Direct Connect Partner is automatically converted to an interconnect.

If disassociating the connection would cause the LAG to fall below its setting for minimum number of operational connections, the request fails, except when it's the last member of the LAG. If all connections are disassociated, the LAG continues to exist as an empty LAG with no physical connections.

" }, "DisassociateMacSecKey":{ "name":"DisassociateMacSecKey", @@ -732,7 +732,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Removes the association between a MAC Security (MACsec) security key and an AWS Direct Connect dedicated connection.

" + "documentation":"

Removes the association between a MAC Security (MACsec) security key and an Direct Connect dedicated connection.

" }, "ListVirtualInterfaceTestHistory":{ "name":"ListVirtualInterfaceTestHistory", @@ -790,7 +790,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Adds the specified tags to the specified AWS Direct Connect resource. Each resource can have a maximum of 50 tags.

Each tag consists of a key and an optional value. If a tag with the same key is already associated with the resource, this action updates its value.

" + "documentation":"

Adds the specified tags to the specified Direct Connect resource. Each resource can have a maximum of 50 tags.

Each tag consists of a key and an optional value. If a tag with the same key is already associated with the resource, this action updates its value.

" }, "UntagResource":{ "name":"UntagResource", @@ -804,7 +804,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Removes one or more tags from the specified AWS Direct Connect resource.

" + "documentation":"

Removes one or more tags from the specified Direct Connect resource.

" }, "UpdateConnection":{ "name":"UpdateConnection", @@ -818,7 +818,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Updates the AWS Direct Connect dedicated connection configuration.

You can update the following parameters for a connection:

  • The connection name

  • The connection's MAC Security (MACsec) encryption mode.

" + "documentation":"

Updates the Direct Connect dedicated connection configuration.

You can update the following parameters for a connection:

  • The connection name

  • The connection's MAC Security (MACsec) encryption mode.

" }, "UpdateDirectConnectGatewayAssociation":{ "name":"UpdateDirectConnectGatewayAssociation", @@ -846,7 +846,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Updates the attributes of the specified link aggregation group (LAG).

You can update the following LAG attributes:

  • The name of the LAG.

  • The value for the minimum number of connections that must be operational for the LAG itself to be operational.

  • The LAG's MACsec encryption mode.

    AWS assigns this value to each connection which is part of the LAG.

  • The tags

If you adjust the threshold value for the minimum number of operational connections, ensure that the new value does not cause the LAG to fall below the threshold and become non-operational.

" + "documentation":"

Updates the attributes of the specified link aggregation group (LAG).

You can update the following LAG attributes:

  • The name of the LAG.

  • The value for the minimum number of connections that must be operational for the LAG itself to be operational.

  • The LAG's MACsec encryption mode.

    Amazon Web Services assigns this value to each connection which is part of the LAG.

  • The tags

If you adjust the threshold value for the minimum number of operational connections, ensure that the new value does not cause the LAG to fall below the threshold and become non-operational.

" }, "UpdateVirtualInterfaceAttributes":{ "name":"UpdateVirtualInterfaceAttributes", @@ -883,11 +883,11 @@ }, "associatedGatewayOwnerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the virtual private gateway or transit gateway.

" + "documentation":"

The ID of the account that owns the virtual private gateway or transit gateway.

" }, "overrideAllowedPrefixesToDirectConnectGateway":{ "shape":"RouteFilterPrefixList", - "documentation":"

Overrides the Amazon VPC prefixes advertised to the Direct Connect gateway.

For information about how to set the prefixes, see Allowed Prefixes in the AWS Direct Connect User Guide.

" + "documentation":"

Overrides the Amazon VPC prefixes advertised to the Direct Connect gateway.

For information about how to set the prefixes, see Allowed Prefixes in the Direct Connect User Guide.

" } } }, @@ -916,7 +916,7 @@ "members":{ "bandwidth":{ "shape":"Bandwidth", - "documentation":"

The bandwidth of the connection. The possible values are 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, and 10Gbps. Note that only those AWS Direct Connect Partners who have met specific requirements are allowed to create a 1Gbps, 2Gbps, 5Gbps or 10Gbps hosted connection.

" + "documentation":"

The bandwidth of the connection. The possible values are 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, and 10Gbps. Note that only those Direct Connect Partners who have met specific requirements are allowed to create a 1Gbps, 2Gbps, 5Gbps or 10Gbps hosted connection.

" }, "connectionName":{ "shape":"ConnectionName", @@ -924,7 +924,7 @@ }, "ownerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account of the customer for whom the connection will be provisioned.

" + "documentation":"

The ID of the account of the customer for whom the connection will be provisioned.

" }, "interconnectId":{ "shape":"InterconnectId", @@ -952,11 +952,11 @@ }, "ownerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account ID of the customer for the connection.

" + "documentation":"

The ID of the account ID of the customer for the connection.

" }, "bandwidth":{ "shape":"Bandwidth", - "documentation":"

The bandwidth of the connection. The possible values are 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, and 10Gbps. Note that only those AWS Direct Connect Partners who have met specific requirements are allowed to create a 1Gbps, 2Gbps, 5Gbps or 10Gbps hosted connection.

" + "documentation":"

The bandwidth of the connection. The possible values are 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, and 10Gbps. Note that only those Direct Connect Partners who have met specific requirements are allowed to create a 1Gbps, 2Gbps, 5Gbps or 10Gbps hosted connection.

" }, "connectionName":{ "shape":"ConnectionName", @@ -986,7 +986,7 @@ }, "ownerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the virtual private interface.

" + "documentation":"

The ID of the account that owns the virtual private interface.

" }, "newPrivateVirtualInterfaceAllocation":{ "shape":"NewPrivateVirtualInterfaceAllocation", @@ -1008,7 +1008,7 @@ }, "ownerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the public virtual interface.

" + "documentation":"

The ID of the account that owns the public virtual interface.

" }, "newPublicVirtualInterfaceAllocation":{ "shape":"NewPublicVirtualInterfaceAllocation", @@ -1030,7 +1030,7 @@ }, "ownerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the transit virtual interface.

" + "documentation":"

The ID of the account that owns the transit virtual interface.

" }, "newTransitVirtualInterfaceAllocation":{ "shape":"NewTransitVirtualInterfaceAllocation", @@ -1144,7 +1144,7 @@ }, "ownerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the associated virtual private gateway or transit gateway.

" + "documentation":"

The ID of the account that owns the associated virtual private gateway or transit gateway.

" }, "region":{ "shape":"Region", @@ -1167,6 +1167,7 @@ "deprecated":true }, "AwsDeviceV2":{"type":"string"}, + "AwsLogicalDeviceId":{"type":"string"}, "BGPAuthKey":{"type":"string"}, "BGPPeer":{ "type":"structure", @@ -1205,7 +1206,11 @@ }, "awsDeviceV2":{ "shape":"AwsDeviceV2", - "documentation":"

The Direct Connect endpoint on which the BGP peer terminates.

" + "documentation":"

The Direct Connect endpoint that terminates the BGP peer.

" + }, + "awsLogicalDeviceId":{ + "shape":"AwsLogicalDeviceId", + "documentation":"

The Direct Connect endpoint that terminates the logical connection. This device might be different than the device that terminates the physical connection.

" } }, "documentation":"

Information about a BGP peer.

" @@ -1338,7 +1343,7 @@ "members":{ "ownerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the connection.

" + "documentation":"

The ID of the account that owns the connection.

" }, "connectionId":{ "shape":"ConnectionId", @@ -1354,7 +1359,7 @@ }, "region":{ "shape":"Region", - "documentation":"

The AWS Region where the connection is located.

" + "documentation":"

The Region where the connection is located.

" }, "location":{ "shape":"LocationCode", @@ -1370,7 +1375,7 @@ }, "partnerName":{ "shape":"PartnerName", - "documentation":"

The name of the AWS Direct Connect service provider associated with the connection.

" + "documentation":"

The name of the Direct Connect service provider associated with the connection.

" }, "loaIssueTime":{ "shape":"LoaIssueTime", @@ -1390,7 +1395,11 @@ }, "awsDeviceV2":{ "shape":"AwsDeviceV2", - "documentation":"

The Direct Connect endpoint on which the physical connection terminates.

" + "documentation":"

The Direct Connect endpoint that terminates the physical connection.

" + }, + "awsLogicalDeviceId":{ + "shape":"AwsLogicalDeviceId", + "documentation":"

The Direct Connect endpoint that terminates the logical connection. This device might be different than the device that terminates the physical connection.

" }, "hasLogicalRedundancy":{ "shape":"HasLogicalRedundancy", @@ -1421,7 +1430,7 @@ "documentation":"

The MAC Security (MACsec) security keys associated with the connection.

" } }, - "documentation":"

Information about an AWS Direct Connect connection.

" + "documentation":"

Information about an Direct Connect connection.

" }, "ConnectionId":{"type":"string"}, "ConnectionList":{ @@ -1509,7 +1518,7 @@ }, "requestMACSec":{ "shape":"RequestMACSec", - "documentation":"

Indicates whether you want the connection to support MAC Security (MACsec).

MAC Security (MACsec) is only available on dedicated connections. For information about MAC Security (MACsec) prerequisties, see MACsec prerequisties in the AWS Direct Connect User Guide.

" + "documentation":"

Indicates whether you want the connection to support MAC Security (MACsec).

MAC Security (MACsec) is only available on dedicated connections. For information about MAC Security (MACsec) prerequisties, see MACsec prerequisties in the Direct Connect User Guide.

" } } }, @@ -1527,7 +1536,7 @@ }, "directConnectGatewayOwnerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the Direct Connect gateway.

" + "documentation":"

The ID of the account that owns the Direct Connect gateway.

" }, "gatewayId":{ "shape":"GatewayIdToAssociate", @@ -1566,7 +1575,7 @@ }, "addAllowedPrefixesToDirectConnectGateway":{ "shape":"RouteFilterPrefixList", - "documentation":"

The Amazon VPC prefixes to advertise to the Direct Connect gateway

This parameter is required when you create an association to a transit gateway.

For information about how to set the prefixes, see Allowed Prefixes in the AWS Direct Connect User Guide.

" + "documentation":"

The Amazon VPC prefixes to advertise to the Direct Connect gateway

This parameter is required when you create an association to a transit gateway.

For information about how to set the prefixes, see Allowed Prefixes in the Direct Connect User Guide.

" }, "virtualGatewayId":{ "shape":"VirtualGatewayId", @@ -1683,7 +1692,7 @@ }, "requestMACSec":{ "shape":"RequestMACSec", - "documentation":"

Indicates whether the connection will support MAC Security (MACsec).

All connections in the LAG must be capable of supporting MAC Security (MACsec). For information about MAC Security (MACsec) prerequisties, see MACsec prerequisties in the AWS Direct Connect User Guide.

" + "documentation":"

Indicates whether the connection will support MAC Security (MACsec).

All connections in the LAG must be capable of supporting MAC Security (MACsec). For information about MAC Security (MACsec) prerequisties, see MACsec prerequisties in the Direct Connect User Guide.

" } } }, @@ -2217,7 +2226,7 @@ }, "ownerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the Direct Connect gateway.

" + "documentation":"

The ID of the account that owns the Direct Connect gateway.

" }, "directConnectGatewayState":{ "shape":"DirectConnectGatewayState", @@ -2239,7 +2248,7 @@ }, "directConnectGatewayOwnerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the associated gateway.

" + "documentation":"

The ID of the account that owns the associated gateway.

" }, "associationState":{ "shape":"DirectConnectGatewayAssociationState", @@ -2267,11 +2276,11 @@ }, "virtualGatewayRegion":{ "shape":"VirtualGatewayRegion", - "documentation":"

The AWS Region where the virtual private gateway is located.

" + "documentation":"

The Region where the virtual private gateway is located.

" }, "virtualGatewayOwnerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the virtual private gateway.

" + "documentation":"

The ID of the account that owns the virtual private gateway.

" } }, "documentation":"

Information about an association between a Direct Connect gateway and a virtual private gateway or transit gateway.

" @@ -2294,7 +2303,7 @@ }, "directConnectGatewayOwnerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the Direct Connect gateway.

" + "documentation":"

The ID of the account that owns the Direct Connect gateway.

" }, "proposalState":{ "shape":"DirectConnectGatewayAssociationProposalState", @@ -2351,11 +2360,11 @@ }, "virtualInterfaceRegion":{ "shape":"VirtualInterfaceRegion", - "documentation":"

The AWS Region where the virtual interface is located.

" + "documentation":"

The Region where the virtual interface is located.

" }, "virtualInterfaceOwnerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the virtual interface.

" + "documentation":"

The ID of the account that owns the virtual interface.

" }, "attachmentState":{ "shape":"DirectConnectGatewayAttachmentState", @@ -2507,7 +2516,7 @@ }, "region":{ "shape":"Region", - "documentation":"

The AWS Region where the connection is located.

" + "documentation":"

The Region where the connection is located.

" }, "location":{ "shape":"LocationCode", @@ -2535,7 +2544,11 @@ }, "awsDeviceV2":{ "shape":"AwsDeviceV2", - "documentation":"

The Direct Connect endpoint on which the physical connection terminates.

" + "documentation":"

The Direct Connect endpoint that terminates the physical connection.

" + }, + "awsLogicalDeviceId":{ + "shape":"AwsLogicalDeviceId", + "documentation":"

The Direct Connect endpoint that terminates the logical connection. This device might be different than the device that terminates the physical connection.

" }, "hasLogicalRedundancy":{ "shape":"HasLogicalRedundancy", @@ -2597,7 +2610,7 @@ }, "ownerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the LAG.

" + "documentation":"

The ID of the account that owns the LAG.

" }, "lagName":{ "shape":"LagName", @@ -2613,7 +2626,7 @@ }, "region":{ "shape":"Region", - "documentation":"

The AWS Region where the connection is located.

" + "documentation":"

The Region where the connection is located.

" }, "minimumLinks":{ "shape":"Count", @@ -2621,11 +2634,15 @@ }, "awsDevice":{ "shape":"AwsDevice", - "documentation":"

The AWS Direct Connect endpoint that hosts the LAG.

" + "documentation":"

The Direct Connect endpoint that hosts the LAG.

" }, "awsDeviceV2":{ "shape":"AwsDeviceV2", - "documentation":"

The AWS Direct Connect endpoint that hosts the LAG.

" + "documentation":"

The Direct Connect endpoint that hosts the LAG.

" + }, + "awsLogicalDeviceId":{ + "shape":"AwsLogicalDeviceId", + "documentation":"

The Direct Connect endpoint that terminates the logical connection. This device might be different than the device that terminates the physical connection.

" }, "connections":{ "shape":"ConnectionList", @@ -2768,7 +2785,7 @@ }, "region":{ "shape":"Region", - "documentation":"

The AWS Region for the location.

" + "documentation":"

The Region for the location.

" }, "availablePortSpeeds":{ "shape":"AvailablePortSpeeds", @@ -2783,7 +2800,7 @@ "documentation":"

The available MAC Security (MACsec) port speeds for the location.

" } }, - "documentation":"

Information about an AWS Direct Connect location.

" + "documentation":"

Information about an Direct Connect location.

" }, "LocationCode":{"type":"string"}, "LocationList":{ @@ -2999,7 +3016,7 @@ }, "routeFilterPrefixes":{ "shape":"RouteFilterPrefixList", - "documentation":"

The routes to be advertised to the AWS network in this Region. Applies to public virtual interfaces.

" + "documentation":"

The routes to be advertised to the Amazon Web Services network in this Region. Applies to public virtual interfaces.

" }, "tags":{ "shape":"TagList", @@ -3046,7 +3063,7 @@ }, "routeFilterPrefixes":{ "shape":"RouteFilterPrefixList", - "documentation":"

The routes to be advertised to the AWS network in this Region. Applies to public virtual interfaces.

" + "documentation":"

The routes to be advertised to the Amazon Web Services network in this Region. Applies to public virtual interfaces.

" }, "tags":{ "shape":"TagList", @@ -3172,7 +3189,7 @@ "documentation":"

The tags.

" } }, - "documentation":"

Information about a tag associated with an AWS Direct Connect resource.

" + "documentation":"

Information about a tag associated with an Direct Connect resource.

" }, "ResourceTagList":{ "type":"list", @@ -3395,7 +3412,7 @@ }, "encryptionMode":{ "shape":"EncryptionMode", - "documentation":"

The LAG MAC Security (MACsec) encryption mode.

AWS applies the value to all connections which are part of the LAG.

" + "documentation":"

The LAG MAC Security (MACsec) encryption mode.

Amazon Web Services applies the value to all connections which are part of the LAG.

" } } }, @@ -3452,7 +3469,7 @@ "members":{ "ownerAccount":{ "shape":"OwnerAccount", - "documentation":"

The ID of the AWS account that owns the virtual interface.

" + "documentation":"

The ID of the account that owns the virtual interface.

" }, "virtualInterfaceId":{ "shape":"VirtualInterfaceId", @@ -3528,7 +3545,7 @@ }, "routeFilterPrefixes":{ "shape":"RouteFilterPrefixList", - "documentation":"

The routes to be advertised to the AWS network in this Region. Applies to public virtual interfaces.

" + "documentation":"

The routes to be advertised to the Amazon Web Services network in this Region. Applies to public virtual interfaces.

" }, "bgpPeers":{ "shape":"BGPPeerList", @@ -3536,11 +3553,15 @@ }, "region":{ "shape":"Region", - "documentation":"

The AWS Region where the virtual interface is located.

" + "documentation":"

The Region where the virtual interface is located.

" }, "awsDeviceV2":{ "shape":"AwsDeviceV2", - "documentation":"

The Direct Connect endpoint on which the virtual interface terminates.

" + "documentation":"

The Direct Connect endpoint that terminates the physical connection.

" + }, + "awsLogicalDeviceId":{ + "shape":"AwsLogicalDeviceId", + "documentation":"

The Direct Connect endpoint that terminates the logical connection. This device might be different than the device that terminates the physical connection.

" }, "tags":{ "shape":"TagList", @@ -3623,5 +3644,5 @@ } } }, - "documentation":"

AWS Direct Connect links your internal network to an AWS Direct Connect location over a standard Ethernet fiber-optic cable. One end of the cable is connected to your router, the other to an AWS Direct Connect router. With this connection in place, you can create virtual interfaces directly to the AWS cloud (for example, to Amazon EC2 and Amazon S3) and to Amazon VPC, bypassing Internet service providers in your network path. A connection provides access to all AWS Regions except the China (Beijing) and (China) Ningxia Regions. AWS resources in the China Regions can only be accessed through locations associated with those Regions.

" + "documentation":"

Direct Connect links your internal network to an Direct Connect location over a standard Ethernet fiber-optic cable. One end of the cable is connected to your router, the other to an Direct Connect router. With this connection in place, you can create virtual interfaces directly to the Cloud (for example, to Amazon EC2 and Amazon S3) and to Amazon VPC, bypassing Internet service providers in your network path. A connection provides access to all Regions except the China (Beijing) and (China) Ningxia Regions. Amazon Web Services resources in the China Regions can only be accessed through locations associated with those Regions.

" } diff --git a/services/directory/pom.xml b/services/directory/pom.xml index e42308268226..dab853db568c 100644 --- a/services/directory/pom.xml +++ b/services/directory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT directory AWS Java SDK :: Services :: AWS Directory Service diff --git a/services/dlm/pom.xml b/services/dlm/pom.xml index 2fe44ce6d97c..c10fac238e24 100644 --- a/services/dlm/pom.xml +++ b/services/dlm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT dlm AWS Java SDK :: Services :: DLM diff --git a/services/docdb/pom.xml b/services/docdb/pom.xml index 994fcb263567..524c5f094b4c 100644 --- a/services/docdb/pom.xml +++ b/services/docdb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT docdb AWS Java SDK :: Services :: DocDB diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index a27b8d261ced..2a8b0faafcd1 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT dynamodb AWS Java SDK :: Services :: Amazon DynamoDB diff --git a/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java b/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java index b470e1b8a700..eb46ebf2261e 100644 --- a/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java +++ b/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java @@ -73,6 +73,7 @@ public static RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { RetryMode retryMode = RetryMode.resolver() .profileFile(() -> config.option(SdkClientOption.PROFILE_FILE)) .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) .resolve(); switch (retryMode) { diff --git a/services/ebs/pom.xml b/services/ebs/pom.xml index 055e421c91c1..acc38966ef93 100644 --- a/services/ebs/pom.xml +++ b/services/ebs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ebs AWS Java SDK :: Services :: EBS diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index acdfde40b093..55e5cb59eec3 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ec2 AWS Java SDK :: Services :: Amazon EC2 diff --git a/services/ec2/src/main/resources/codegen-resources/paginators-1.json b/services/ec2/src/main/resources/codegen-resources/paginators-1.json index e1d041efc2c5..9da7b629d7bf 100755 --- a/services/ec2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/ec2/src/main/resources/codegen-resources/paginators-1.json @@ -174,6 +174,12 @@ "output_token": "NextToken", "result_key": "InstanceCreditSpecifications" }, + "DescribeInstanceEventWindows": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "InstanceEventWindows" + }, "DescribeInstanceStatus": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index 31b978ea0e11..32434141a5dd 100755 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -90,7 +90,7 @@ }, "input":{"shape":"AllocateAddressRequest"}, "output":{"shape":"AllocateAddressResult"}, - "documentation":"

Allocates an Elastic IP address to your account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different account.

You can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

[EC2-VPC] If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another account. You cannot recover an Elastic IP address for EC2-Classic. To attempt to recover an Elastic IP address that you released, specify it in this operation.

An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic per Region and 5 Elastic IP addresses for EC2-VPC per Region.

For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

You can allocate a carrier IP address which is a public IP address from a telecommunication carrier, to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance).

" + "documentation":"

Allocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different Amazon Web Services account.

You can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

[EC2-VPC] If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another Amazon Web Services account. You cannot recover an Elastic IP address for EC2-Classic. To attempt to recover an Elastic IP address that you released, specify it in this operation.

An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic per Region and 5 Elastic IP addresses for EC2-VPC per Region.

For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

You can allocate a carrier IP address which is a public IP address from a telecommunication carrier, to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance).

" }, "AllocateHosts":{ "name":"AllocateHosts", @@ -120,7 +120,7 @@ }, "input":{"shape":"AssignIpv6AddressesRequest"}, "output":{"shape":"AssignIpv6AddressesResult"}, - "documentation":"

Assigns one or more IPv6 addresses to the specified network interface. You can specify one or more specific IPv6 addresses, or you can specify the number of IPv6 addresses to be automatically assigned from within the subnet's IPv6 CIDR block range. You can assign as many IPv6 addresses to a network interface as you can assign private IPv4 addresses, and the limit varies per instance type. For information, see IP Addresses Per Network Interface Per Instance Type in the Amazon Elastic Compute Cloud User Guide.

You must specify either the IPv6 addresses or the IPv6 address count in the request.

" + "documentation":"

Assigns one or more IPv6 addresses to the specified network interface. You can specify one or more specific IPv6 addresses, or you can specify the number of IPv6 addresses to be automatically assigned from within the subnet's IPv6 CIDR block range. You can assign as many IPv6 addresses to a network interface as you can assign private IPv4 addresses, and the limit varies per instance type. For information, see IP Addresses Per Network Interface Per Instance Type in the Amazon Elastic Compute Cloud User Guide.

You must specify either the IPv6 addresses or the IPv6 address count in the request.

You can optionally use Prefix Delegation on the network interface. You must specify either the IPV6 Prefix Delegation prefixes, or the IPv6 Prefix Delegation count. For information, see Prefix Delegation in the Amazon Elastic Compute Cloud User Guide.

" }, "AssignPrivateIpAddresses":{ "name":"AssignPrivateIpAddresses", @@ -130,7 +130,7 @@ }, "input":{"shape":"AssignPrivateIpAddressesRequest"}, "output":{"shape":"AssignPrivateIpAddressesResult"}, - "documentation":"

Assigns one or more secondary private IP addresses to the specified network interface.

You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

When you move a secondary private IP address to another network interface, any Elastic IP address that is associated with the IP address is also moved.

Remapping an IP address is an asynchronous operation. When you move an IP address from one network interface to another, check network/interfaces/macs/mac/local-ipv4s in the instance metadata to confirm that the remapping is complete.

You must specify either the IP addresses or the IP address count in the request.

" + "documentation":"

Assigns one or more secondary private IP addresses to the specified network interface.

You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

When you move a secondary private IP address to another network interface, any Elastic IP address that is associated with the IP address is also moved.

Remapping an IP address is an asynchronous operation. When you move an IP address from one network interface to another, check network/interfaces/macs/mac/local-ipv4s in the instance metadata to confirm that the remapping is complete.

You must specify either the IP addresses or the IP address count in the request.

You can optionally use Prefix Delegation on the network interface. You must specify either the IPv4 Prefix Delegation prefixes, or the IPv4 Prefix Delegation count. For information, see Prefix Delegation in the Amazon Elastic Compute Cloud User Guide.

" }, "AssociateAddress":{ "name":"AssociateAddress", @@ -159,7 +159,7 @@ "requestUri":"/" }, "input":{"shape":"AssociateDhcpOptionsRequest"}, - "documentation":"

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

For more information, see DHCP options sets in the Amazon Virtual Private Cloud User Guide.

" }, "AssociateEnclaveCertificateIamRole":{ "name":"AssociateEnclaveCertificateIamRole", @@ -181,6 +181,16 @@ "output":{"shape":"AssociateIamInstanceProfileResult"}, "documentation":"

Associates an IAM instance profile with a running or stopped instance. You cannot associate more than one IAM instance profile with an instance.

" }, + "AssociateInstanceEventWindow":{ + "name":"AssociateInstanceEventWindow", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateInstanceEventWindowRequest"}, + "output":{"shape":"AssociateInstanceEventWindowResult"}, + "documentation":"

Associates one or more targets with an event window. Only one type of target (instance IDs, Dedicated Host IDs, or tags) can be specified with an event window.

For more information, see Define event windows for scheduled events in the Amazon EC2 User Guide.

" + }, "AssociateRouteTable":{ "name":"AssociateRouteTable", "http":{ @@ -189,7 +199,7 @@ }, "input":{"shape":"AssociateRouteTableRequest"}, "output":{"shape":"AssociateRouteTableResult"}, - "documentation":"

Associates a subnet in your VPC or an internet gateway or virtual private gateway attached to your VPC with a route table in your VPC. This association causes traffic from the subnet or gateway to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table later. A route table can be associated with multiple subnets.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Associates a subnet in your VPC or an internet gateway or virtual private gateway attached to your VPC with a route table in your VPC. This association causes traffic from the subnet or gateway to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table later. A route table can be associated with multiple subnets.

For more information, see Route tables in the Amazon Virtual Private Cloud User Guide.

" }, "AssociateSubnetCidrBlock":{ "name":"AssociateSubnetCidrBlock", @@ -239,7 +249,7 @@ }, "input":{"shape":"AssociateVpcCidrBlockRequest"}, "output":{"shape":"AssociateVpcCidrBlockResult"}, - "documentation":"

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, an Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from an IPv6 address pool that you provisioned through bring your own IP addresses (BYOIP). The IPv6 CIDR block size is fixed at /56.

You must specify one of the following in the request: an IPv4 CIDR block, an IPv6 pool, or an Amazon-provided IPv6 CIDR block.

For more information about associating CIDR blocks with your VPC and applicable restrictions, see VPC and Subnet Sizing in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, an Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from an IPv6 address pool that you provisioned through bring your own IP addresses (BYOIP). The IPv6 CIDR block size is fixed at /56.

You must specify one of the following in the request: an IPv4 CIDR block, an IPv6 pool, or an Amazon-provided IPv6 CIDR block.

For more information about associating CIDR blocks with your VPC and applicable restrictions, see VPC and subnet sizing in the Amazon Virtual Private Cloud User Guide.

" }, "AttachClassicLinkVpc":{ "name":"AttachClassicLinkVpc", @@ -278,7 +288,7 @@ }, "input":{"shape":"AttachVolumeRequest"}, "output":{"shape":"VolumeAttachment"}, - "documentation":"

Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

After you attach an EBS volume, you must make it available. For more information, see Making an EBS volume available for use.

If a volume has an AWS Marketplace product code:

  • The volume can be attached only to a stopped instance.

  • AWS Marketplace product codes are copied from the volume to the instance.

  • You must be subscribed to the product.

  • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

For more information, see Attaching Amazon EBS volumes in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

After you attach an EBS volume, you must make it available. For more information, see Make an EBS volume available for use.

If a volume has an Amazon Web Services Marketplace product code:

  • The volume can be attached only to a stopped instance.

  • Amazon Web Services Marketplace product codes are copied from the volume to the instance.

  • You must be subscribed to the product.

  • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

For more information, see Attach an Amazon EBS volume to an instance in the Amazon Elastic Compute Cloud User Guide.

" }, "AttachVpnGateway":{ "name":"AttachVpnGateway", @@ -446,7 +456,7 @@ }, "input":{"shape":"CopySnapshotRequest"}, "output":{"shape":"CopySnapshotResult"}, - "documentation":"

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy a snapshot within the same Region, from one Region to another, or from a Region to an Outpost. You can't copy a snapshot from an Outpost to a Region, from one Outpost to another, or within the same Outpost.

You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs).

When copying snapshots to a Region, copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable encryption for the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a different CMK. To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots copied to an Outpost are encrypted by default using the default encryption key for the Region, or a different key that you specify in the request using KmsKeyId. Outposts do not support unencrypted snapshots. For more information, Amazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS snapshot in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy a snapshot within the same Region, from one Region to another, or from a Region to an Outpost. You can't copy a snapshot from an Outpost to a Region, from one Outpost to another, or within the same Outpost.

You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs).

When copying snapshots to a Region, copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable encryption for the snapshot copy operation. By default, encrypted snapshot copies use the default Key Management Service (KMS) KMS key; however, you can specify a different KMS key. To copy an encrypted snapshot that has been shared from another account, you must have permissions for the KMS key used to encrypt the snapshot.

Snapshots copied to an Outpost are encrypted by default using the default encryption key for the Region, or a different key that you specify in the request using KmsKeyId. Outposts do not support unencrypted snapshots. For more information, Amazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copy an Amazon EBS snapshot in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateCapacityReservation":{ "name":"CreateCapacityReservation", @@ -466,7 +476,7 @@ }, "input":{"shape":"CreateCarrierGatewayRequest"}, "output":{"shape":"CreateCarrierGatewayResult"}, - "documentation":"

Creates a carrier gateway. For more information about carrier gateways, see Carrier gateways in the AWS Wavelength Developer Guide.

" + "documentation":"

Creates a carrier gateway. For more information about carrier gateways, see Carrier gateways in the Amazon Web Services Wavelength Developer Guide.

" }, "CreateClientVpnEndpoint":{ "name":"CreateClientVpnEndpoint", @@ -506,7 +516,7 @@ }, "input":{"shape":"CreateDefaultSubnetRequest"}, "output":{"shape":"CreateDefaultSubnetResult"}, - "documentation":"

Creates a default subnet with a size /20 IPv4 CIDR block in the specified Availability Zone in your default VPC. You can have only one default subnet per Availability Zone. For more information, see Creating a Default Subnet in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a default subnet with a size /20 IPv4 CIDR block in the specified Availability Zone in your default VPC. You can have only one default subnet per Availability Zone. For more information, see Creating a default subnet in the Amazon Virtual Private Cloud User Guide.

" }, "CreateDefaultVpc":{ "name":"CreateDefaultVpc", @@ -516,7 +526,7 @@ }, "input":{"shape":"CreateDefaultVpcRequest"}, "output":{"shape":"CreateDefaultVpcResult"}, - "documentation":"

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

If you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that possible?\" in the Default VPCs FAQ.

" + "documentation":"

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and default subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

If you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that possible?\" in the Default VPCs FAQ.

" }, "CreateDhcpOptions":{ "name":"CreateDhcpOptions", @@ -526,7 +536,7 @@ }, "input":{"shape":"CreateDhcpOptionsRequest"}, "output":{"shape":"CreateDhcpOptionsResult"}, - "documentation":"

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

  • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas. To have your instance receive a custom DNS hostname as specified in domain-name, you must set domain-name-servers to a custom DNS server.

  • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in another Region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). Otherwise, specify a domain name (for example, ExampleCompany.com). This value is used to complete unqualified DNS hostnames. Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.

  • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.

  • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.

  • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

  • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas. To have your instance receive a custom DNS hostname as specified in domain-name, you must set domain-name-servers to a custom DNS server.

  • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in another Region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). Otherwise, specify a domain name (for example, ExampleCompany.com). This value is used to complete unqualified DNS hostnames. Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.

  • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.

  • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.

  • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information, see DHCP options sets in the Amazon Virtual Private Cloud User Guide.

" }, "CreateEgressOnlyInternetGateway":{ "name":"CreateEgressOnlyInternetGateway", @@ -556,7 +566,7 @@ }, "input":{"shape":"CreateFlowLogsRequest"}, "output":{"shape":"CreateFlowLogsResult"}, - "documentation":"

Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC.

Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow Log Records in the Amazon Virtual Private Cloud User Guide.

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket.

For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC.

Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow log records in the Amazon Virtual Private Cloud User Guide.

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket.

For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide.

" }, "CreateFpgaImage":{ "name":"CreateFpgaImage", @@ -578,6 +588,16 @@ "output":{"shape":"CreateImageResult"}, "documentation":"

Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

If you customized your instance with instance store volumes or EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

For more information, see Creating Amazon EBS-Backed Linux AMIs in the Amazon Elastic Compute Cloud User Guide.

" }, + "CreateInstanceEventWindow":{ + "name":"CreateInstanceEventWindow", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceEventWindowRequest"}, + "output":{"shape":"CreateInstanceEventWindowResult"}, + "documentation":"

Creates an event window in which scheduled events for the associated Amazon EC2 instances can run.

You can define either a set of time ranges or a cron expression when creating the event window, but not both. All event window times are in UTC.

You can create up to 200 event windows per Amazon Web Services Region.

When you create the event window, targets (instance IDs, Dedicated Host IDs, or tags) are not yet associated with it. To ensure that the event window can be used, you must associate one or more targets with it by using the AssociateInstanceEventWindow API.

Event windows are applicable only for scheduled events that stop, reboot, or terminate instances.

Event windows are not applicable for:

  • Expedited scheduled events and network maintenance events.

  • Unscheduled maintenance such as AutoRecovery and unplanned reboots.

For more information, see Define event windows for scheduled events in the Amazon EC2 User Guide.

" + }, "CreateInstanceExportTask":{ "name":"CreateInstanceExportTask", "http":{ @@ -666,7 +686,7 @@ }, "input":{"shape":"CreateNatGatewayRequest"}, "output":{"shape":"CreateNatGatewayResult"}, - "documentation":"

Creates a NAT gateway in the specified subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. You can create either a public NAT gateway or a private NAT gateway.

With a public NAT gateway, internet-bound traffic from a private subnet can be routed to the NAT gateway, so that instances in a private subnet can connect to the internet.

With a private NAT gateway, private communication is routed across VPCs and on-premises networks through a transit gateway or virtual private gateway. Common use cases include running large workloads behind a small pool of allowlisted IPv4 addresses, preserving private IPv4 addresses, and communicating between overlapping networks.

For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a NAT gateway in the specified subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. You can create either a public NAT gateway or a private NAT gateway.

With a public NAT gateway, internet-bound traffic from a private subnet can be routed to the NAT gateway, so that instances in a private subnet can connect to the internet.

With a private NAT gateway, private communication is routed across VPCs and on-premises networks through a transit gateway or virtual private gateway. Common use cases include running large workloads behind a small pool of allowlisted IPv4 addresses, preserving private IPv4 addresses, and communicating between overlapping networks.

For more information, see NAT gateways in the Amazon Virtual Private Cloud User Guide.

" }, "CreateNetworkAcl":{ "name":"CreateNetworkAcl", @@ -715,7 +735,7 @@ }, "input":{"shape":"CreateNetworkInterfacePermissionRequest"}, "output":{"shape":"CreateNetworkInterfacePermissionResult"}, - "documentation":"

Grants an Amazon Web Services-authorized account permission to attach the specified network interface to an instance in their account.

You can grant permission to a single account only, and only one account at a time.

" + "documentation":"

Grants an Amazon Web Services-authorized account permission to attach the specified network interface to an instance in their account.

You can grant permission to a single Amazon Web Services account only, and only one account at a time.

" }, "CreatePlacementGroup":{ "name":"CreatePlacementGroup", @@ -735,7 +755,7 @@ }, "input":{"shape":"CreateReplaceRootVolumeTaskRequest"}, "output":{"shape":"CreateReplaceRootVolumeTaskResult"}, - "documentation":"

Creates a root volume replacement task for an Amazon EC2 instance. The root volume can either be restored to its initial launch state, or it can be restored using a specific snapshot.

For more information, see Replace a root volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a root volume replacement task for an Amazon EC2 instance. The root volume can either be restored to its initial launch state, or it can be restored using a specific snapshot.

For more information, see Replace a root volume in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateReservedInstancesListing":{ "name":"CreateReservedInstancesListing", @@ -765,7 +785,7 @@ }, "input":{"shape":"CreateRouteRequest"}, "output":{"shape":"CreateRouteResult"}, - "documentation":"

Creates a route in a route table within a VPC.

You must specify one of the following targets: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, egress-only internet gateway, or transit gateway.

When determining how to route traffic, we use the route with the most specific match. For example, traffic is destined for the IPv4 address 192.0.2.3, and the route table includes the following two IPv4 routes:

  • 192.0.2.0/24 (goes to some target A)

  • 192.0.2.0/28 (goes to some target B)

Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a route in a route table within a VPC.

You must specify one of the following targets: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, egress-only internet gateway, or transit gateway.

When determining how to route traffic, we use the route with the most specific match. For example, traffic is destined for the IPv4 address 192.0.2.3, and the route table includes the following two IPv4 routes:

  • 192.0.2.0/24 (goes to some target A)

  • 192.0.2.0/28 (goes to some target B)

Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

For more information about route tables, see Route tables in the Amazon Virtual Private Cloud User Guide.

" }, "CreateRouteTable":{ "name":"CreateRouteTable", @@ -775,7 +795,7 @@ }, "input":{"shape":"CreateRouteTableRequest"}, "output":{"shape":"CreateRouteTableResult"}, - "documentation":"

Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

For more information, see Route tables in the Amazon Virtual Private Cloud User Guide.

" }, "CreateSecurityGroup":{ "name":"CreateSecurityGroup", @@ -795,7 +815,7 @@ }, "input":{"shape":"CreateSnapshotRequest"}, "output":{"shape":"Snapshot"}, - "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

You can create snapshots of volumes in a Region and volumes on an Outpost. If you create a snapshot of a volume in a Region, the snapshot must be stored in the same Region as the volume. If you create a snapshot of a volume on an Outpost, the snapshot can be stored on the same Outpost as the volume, or in the Region for that Outpost.

When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tagging your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Amazon Elastic Block Store and Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

You can create snapshots of volumes in a Region and volumes on an Outpost. If you create a snapshot of a volume in a Region, the snapshot must be stored in the same Region as the volume. If you create a snapshot of a volume on an Outpost, the snapshot can be stored on the same Outpost as the volume, or in the Region for that Outpost.

When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your Amazon EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

To create a snapshot for Amazon EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tag your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Amazon Elastic Block Store and Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateSnapshots":{ "name":"CreateSnapshots", @@ -815,7 +835,7 @@ }, "input":{"shape":"CreateSpotDatafeedSubscriptionRequest"}, "output":{"shape":"CreateSpotDatafeedSubscriptionResult"}, - "documentation":"

Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per account. For more information, see Spot Instance data feed in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per Amazon Web Services account. For more information, see Spot Instance data feed in the Amazon EC2 User Guide for Linux Instances.

" }, "CreateStoreImageTask":{ "name":"CreateStoreImageTask", @@ -835,7 +855,17 @@ }, "input":{"shape":"CreateSubnetRequest"}, "output":{"shape":"CreateSubnetResult"}, - "documentation":"

Creates a subnet in a specified VPC.

You must specify an IPv4 CIDR block for the subnet. After you create a subnet, you can't change its CIDR block. The allowed block size is between a /16 netmask (65,536 IP addresses) and /28 netmask (16 IP addresses). The CIDR block must not overlap with the CIDR block of an existing subnet in the VPC.

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

AWS reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

When you stop an instance in a subnet, it retains its private IPv4 address. It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a subnet in a specified VPC.

You must specify an IPv4 CIDR block for the subnet. After you create a subnet, you can't change its CIDR block. The allowed block size is between a /16 netmask (65,536 IP addresses) and /28 netmask (16 IP addresses). The CIDR block must not overlap with the CIDR block of an existing subnet in the VPC.

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

Amazon Web Services reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

When you stop an instance in a subnet, it retains its private IPv4 address. It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and subnets in the Amazon Virtual Private Cloud User Guide.

" + }, + "CreateSubnetCidrReservation":{ + "name":"CreateSubnetCidrReservation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSubnetCidrReservationRequest"}, + "output":{"shape":"CreateSubnetCidrReservationResult"}, + "documentation":"

Creates a subnet CIDR reservation. For information about subnet CIDR reservations, see Subnet CIDR reservations in the Amazon Virtual Private Cloud User Guide.

" }, "CreateTags":{ "name":"CreateTags", @@ -984,7 +1014,7 @@ }, "input":{"shape":"CreateVolumeRequest"}, "output":{"shape":"Volume"}, - "documentation":"

Creates an EBS volume that can be attached to an instance in the same Availability Zone.

You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes. Encrypted volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

You can tag your volumes during creation. For more information, see Tagging your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Creating an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates an EBS volume that can be attached to an instance in the same Availability Zone.

You can create a new empty volume or restore a volume from an EBS snapshot. Any Amazon Web Services Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes. Encrypted volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

You can tag your volumes during creation. For more information, see Tag your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Create an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateVpc":{ "name":"CreateVpc", @@ -994,7 +1024,7 @@ }, "input":{"shape":"CreateVpcRequest"}, "output":{"shape":"CreateVpcResult"}, - "documentation":"

Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). For more information about how large to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

You can optionally request an IPv6 CIDR block for the VPC. You can request an Amazon-provided IPv6 CIDR block from Amazon's pool of IPv6 addresses, or an IPv6 CIDR block from an IPv6 address pool that you provisioned through bring your own IP addresses (BYOIP).

By default, each instance you launch in the VPC has the default DHCP options, which include only a default DNS server that we provide (AmazonProvidedDNS). For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). For more information about how large to make your VPC, see Your VPC and subnets in the Amazon Virtual Private Cloud User Guide.

You can optionally request an IPv6 CIDR block for the VPC. You can request an Amazon-provided IPv6 CIDR block from Amazon's pool of IPv6 addresses, or an IPv6 CIDR block from an IPv6 address pool that you provisioned through bring your own IP addresses (BYOIP).

By default, each instance you launch in the VPC has the default DHCP options, which include only a default DNS server that we provide (AmazonProvidedDNS). For more information, see DHCP options sets in the Amazon Virtual Private Cloud User Guide.

You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateVpcEndpoint":{ "name":"CreateVpcEndpoint", @@ -1034,7 +1064,7 @@ }, "input":{"shape":"CreateVpcPeeringConnectionRequest"}, "output":{"shape":"CreateVpcPeeringConnectionResult"}, - "documentation":"

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another AWS account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

" + "documentation":"

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another Amazon Web Services account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

" }, "CreateVpnConnection":{ "name":"CreateVpnConnection", @@ -1153,6 +1183,16 @@ "output":{"shape":"DeleteFpgaImageResult"}, "documentation":"

Deletes the specified Amazon FPGA Image (AFI).

" }, + "DeleteInstanceEventWindow":{ + "name":"DeleteInstanceEventWindow", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInstanceEventWindowRequest"}, + "output":{"shape":"DeleteInstanceEventWindowResult"}, + "documentation":"

Deletes the specified event window.

For more information, see Define event windows for scheduled events in the Amazon EC2 User Guide.

" + }, "DeleteInternetGateway":{ "name":"DeleteInternetGateway", "http":{ @@ -1341,7 +1381,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteSnapshotRequest"}, - "documentation":"

Deletes the specified snapshot.

When you make periodic snapshots of a volume, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the volume.

You cannot delete a snapshot of the root device of an EBS volume used by a registered AMI. You must first de-register the AMI before you can delete the snapshot.

For more information, see Deleting an Amazon EBS snapshot in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Deletes the specified snapshot.

When you make periodic snapshots of a volume, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the volume.

You cannot delete a snapshot of the root device of an EBS volume used by a registered AMI. You must first de-register the AMI before you can delete the snapshot.

For more information, see Delete an Amazon EBS snapshot in the Amazon Elastic Compute Cloud User Guide.

" }, "DeleteSpotDatafeedSubscription":{ "name":"DeleteSpotDatafeedSubscription", @@ -1361,6 +1401,16 @@ "input":{"shape":"DeleteSubnetRequest"}, "documentation":"

Deletes the specified subnet. You must terminate all running instances in the subnet before you can delete the subnet.

" }, + "DeleteSubnetCidrReservation":{ + "name":"DeleteSubnetCidrReservation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSubnetCidrReservationRequest"}, + "output":{"shape":"DeleteSubnetCidrReservationResult"}, + "documentation":"

Deletes a subnet CIDR reservation.

" + }, "DeleteTags":{ "name":"DeleteTags", "http":{ @@ -1507,7 +1557,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteVolumeRequest"}, - "documentation":"

Deletes the specified EBS volume. The volume must be in the available state (not attached to an instance).

The volume can remain in the deleting state for several minutes.

For more information, see Deleting an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Deletes the specified EBS volume. The volume must be in the available state (not attached to an instance).

The volume can remain in the deleting state for several minutes.

For more information, see Delete an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide.

" }, "DeleteVpc":{ "name":"DeleteVpc", @@ -1612,7 +1662,7 @@ }, "input":{"shape":"DeregisterInstanceEventNotificationAttributesRequest"}, "output":{"shape":"DeregisterInstanceEventNotificationAttributesResult"}, - "documentation":"

Deregisters tag keys to prevent tags that have the specified tag keys from being included in scheduled event notifications for resources in the Region.

" + "documentation":"

c

Deregisters tag keys to prevent tags that have the specified tag keys from being included in scheduled event notifications for resources in the Region.

" }, "DeregisterTransitGatewayMulticastGroupMembers":{ "name":"DeregisterTransitGatewayMulticastGroupMembers", @@ -1712,7 +1762,7 @@ }, "input":{"shape":"DescribeCapacityReservationsRequest"}, "output":{"shape":"DescribeCapacityReservationsResult"}, - "documentation":"

Describes one or more of your Capacity Reservations. The results describe only the Capacity Reservations in the Region that you're currently using.

" + "documentation":"

Describes one or more of your Capacity Reservations. The results describe only the Capacity Reservations in the Amazon Web Services Region that you're currently using.

" }, "DescribeCarrierGateways":{ "name":"DescribeCarrierGateways", @@ -1822,7 +1872,7 @@ }, "input":{"shape":"DescribeDhcpOptionsRequest"}, "output":{"shape":"DescribeDhcpOptionsResult"}, - "documentation":"

Describes one or more of your DHCP options sets.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your DHCP options sets.

For more information, see DHCP options sets in the Amazon Virtual Private Cloud User Guide.

" }, "DescribeEgressOnlyInternetGateways":{ "name":"DescribeEgressOnlyInternetGateways", @@ -2064,6 +2114,16 @@ "output":{"shape":"DescribeInstanceEventNotificationAttributesResult"}, "documentation":"

Describes the tag keys that are registered to appear in scheduled event notifications for resources in the current Region.

" }, + "DescribeInstanceEventWindows":{ + "name":"DescribeInstanceEventWindows", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceEventWindowsRequest"}, + "output":{"shape":"DescribeInstanceEventWindowsResult"}, + "documentation":"

Describes the specified event windows or all event windows.

If you specify event window IDs, the output includes information for only the specified event windows. If you specify filters, the output includes information for only those event windows that meet the filter criteria. If you do not specify event windows IDs or filters, the output includes information for all event windows, which can affect performance. We recommend that you use pagination to ensure that the operation returns quickly and successfully.

For more information, see Define event windows for scheduled events in the Amazon EC2 User Guide.

" + }, "DescribeInstanceStatus":{ "name":"DescribeInstanceStatus", "http":{ @@ -2362,7 +2422,7 @@ }, "input":{"shape":"DescribeReplaceRootVolumeTasksRequest"}, "output":{"shape":"DescribeReplaceRootVolumeTasksResult"}, - "documentation":"

Describes a root volume replacement task. For more information, see Replace a root volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes a root volume replacement task. For more information, see Replace a root volume in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeReservedInstances":{ "name":"DescribeReservedInstances", @@ -2412,7 +2472,7 @@ }, "input":{"shape":"DescribeRouteTablesRequest"}, "output":{"shape":"DescribeRouteTablesResult"}, - "documentation":"

Describes one or more of your route tables.

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your route tables.

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

For more information, see Route tables in the Amazon Virtual Private Cloud User Guide.

" }, "DescribeScheduledInstanceAvailability":{ "name":"DescribeScheduledInstanceAvailability", @@ -2482,7 +2542,7 @@ }, "input":{"shape":"DescribeSnapshotsRequest"}, "output":{"shape":"DescribeSnapshotsResult"}, - "documentation":"

Describes the specified EBS snapshots available to you or all of the EBS snapshots available to you.

The snapshots available to you include public snapshots, private snapshots that you own, and private snapshots owned by other AWS accounts for which you have explicit create volume permissions.

The create volume permissions fall into the following categories:

  • public: The owner of the snapshot granted create volume permissions for the snapshot to the all group. All AWS accounts have create volume permissions for these snapshots.

  • explicit: The owner of the snapshot granted create volume permissions to a specific AWS account.

  • implicit: An AWS account has implicit create volume permissions for all snapshots it owns.

The list of snapshots returned can be filtered by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

If you specify one or more snapshot owners using the OwnerIds option, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

If you are describing a long list of snapshots, we recommend that you paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores.

For more information about EBS snapshots, see Amazon EBS snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified EBS snapshots available to you or all of the EBS snapshots available to you.

The snapshots available to you include public snapshots, private snapshots that you own, and private snapshots owned by other Amazon Web Services accounts for which you have explicit create volume permissions.

The create volume permissions fall into the following categories:

  • public: The owner of the snapshot granted create volume permissions for the snapshot to the all group. All Amazon Web Services accounts have create volume permissions for these snapshots.

  • explicit: The owner of the snapshot granted create volume permissions to a specific Amazon Web Services account.

  • implicit: An Amazon Web Services account has implicit create volume permissions for all snapshots it owns.

The list of snapshots returned can be filtered by specifying snapshot IDs, snapshot owners, or Amazon Web Services accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

If you specify one or more snapshot owners using the OwnerIds option, only snapshots from the specified owners and for which you have access are returned. The results can include the Amazon Web Services account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify Amazon Web Services account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

If you are describing a long list of snapshots, we recommend that you paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores.

For more information about EBS snapshots, see Amazon EBS snapshots in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeSpotDatafeedSubscription":{ "name":"DescribeSpotDatafeedSubscription", @@ -2572,7 +2632,7 @@ }, "input":{"shape":"DescribeSubnetsRequest"}, "output":{"shape":"DescribeSubnetsResult"}, - "documentation":"

Describes one or more of your subnets.

For more information, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your subnets.

For more information, see Your VPC and subnets in the Amazon Virtual Private Cloud User Guide.

" }, "DescribeTags":{ "name":"DescribeTags", @@ -2722,7 +2782,7 @@ }, "input":{"shape":"DescribeVolumeStatusRequest"}, "output":{"shape":"DescribeVolumeStatusResult"}, - "documentation":"

Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

The DescribeVolumeStatus operation provides the following information about the specified volumes:

Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks might still be taking place on your volume at the time. We recommend that you retry the request. For more information about volume status, see Monitoring the status of your volumes in the Amazon Elastic Compute Cloud User Guide.

Events: Reflect the cause of a volume status and might require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and might have inconsistent data.

Actions: Reflect the actions you might have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

" + "documentation":"

Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

The DescribeVolumeStatus operation provides the following information about the specified volumes:

Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks might still be taking place on your volume at the time. We recommend that you retry the request. For more information about volume status, see Monitor the status of your volumes in the Amazon Elastic Compute Cloud User Guide.

Events: Reflect the cause of a volume status and might require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and might have inconsistent data.

Actions: Reflect the actions you might have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

" }, "DescribeVolumes":{ "name":"DescribeVolumes", @@ -2742,7 +2802,7 @@ }, "input":{"shape":"DescribeVolumesModificationsRequest"}, "output":{"shape":"DescribeVolumesModificationsResult"}, - "documentation":"

Describes the most recent volume modification request for the specified EBS volumes.

If a volume has never been modified, some information in the output will be null. If a volume has been modified more than once, the output includes only the most recent modification request.

You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitoring volume modifications in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the most recent volume modification request for the specified EBS volumes.

If a volume has never been modified, some information in the output will be null. If a volume has been modified more than once, the output includes only the most recent modification request.

You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitor the progress of volume modifications in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeVpcAttribute":{ "name":"DescribeVpcAttribute", @@ -2910,7 +2970,7 @@ }, "input":{"shape":"DetachVolumeRequest"}, "output":{"shape":"VolumeAttachment"}, - "documentation":"

Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so can result in the volume becoming stuck in the busy state while detaching. If this happens, detachment can be delayed indefinitely until you unmount the volume, force detachment, reboot the instance, or all three. If an EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.

When a volume with an AWS Marketplace product code is detached from an instance, the product code is no longer associated with the instance.

For more information, see Detaching an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so can result in the volume becoming stuck in the busy state while detaching. If this happens, detachment can be delayed indefinitely until you unmount the volume, force detachment, reboot the instance, or all three. If an EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.

When a volume with an Amazon Web Services Marketplace product code is detached from an instance, the product code is no longer associated with the instance.

For more information, see Detach an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide.

" }, "DetachVpnGateway":{ "name":"DetachVpnGateway", @@ -3039,6 +3099,16 @@ "output":{"shape":"DisassociateIamInstanceProfileResult"}, "documentation":"

Disassociates an IAM instance profile from a running or stopped instance.

Use DescribeIamInstanceProfileAssociations to get the association ID.

" }, + "DisassociateInstanceEventWindow":{ + "name":"DisassociateInstanceEventWindow", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateInstanceEventWindowRequest"}, + "output":{"shape":"DisassociateInstanceEventWindowResult"}, + "documentation":"

Disassociates one or more targets from an event window.

For more information, see Define event windows for scheduled events in the Amazon EC2 User Guide.

" + }, "DisassociateRouteTable":{ "name":"DisassociateRouteTable", "http":{ @@ -3046,7 +3116,7 @@ "requestUri":"/" }, "input":{"shape":"DisassociateRouteTableRequest"}, - "documentation":"

Disassociates a subnet or gateway from a route table.

After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Disassociates a subnet or gateway from a route table.

After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route tables in the Amazon Virtual Private Cloud User Guide.

" }, "DisassociateSubnetCidrBlock":{ "name":"DisassociateSubnetCidrBlock", @@ -3106,7 +3176,7 @@ }, "input":{"shape":"EnableEbsEncryptionByDefaultRequest"}, "output":{"shape":"EnableEbsEncryptionByDefaultResult"}, - "documentation":"

Enables EBS encryption by default for your account in the current Region.

After you enable encryption by default, the EBS volumes that you create are always encrypted, either using the default CMK or the CMK that you specified when you created each volume. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

You can specify the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

Enabling encryption by default has no effect on the encryption status of your existing volumes.

After you enable encryption by default, you can no longer launch instances using instance types that do not support encryption. For more information, see Supported instance types.

" + "documentation":"

Enables EBS encryption by default for your account in the current Region.

After you enable encryption by default, the EBS volumes that you create are always encrypted, either using the default KMS key or the KMS key that you specified when you created each volume. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

You can specify the default KMS key for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

Enabling encryption by default has no effect on the encryption status of your existing volumes.

After you enable encryption by default, you can no longer launch instances using instance types that do not support encryption. For more information, see Supported instance types.

" }, "EnableFastSnapshotRestores":{ "name":"EnableFastSnapshotRestores", @@ -3254,7 +3324,7 @@ }, "input":{"shape":"GetCapacityReservationUsageRequest"}, "output":{"shape":"GetCapacityReservationUsageResult"}, - "documentation":"

Gets usage information about a Capacity Reservation. If the Capacity Reservation is shared, it shows usage information for the Capacity Reservation owner and each account that is currently using the shared capacity. If the Capacity Reservation is not shared, it shows only the Capacity Reservation owner's usage.

" + "documentation":"

Gets usage information about a Capacity Reservation. If the Capacity Reservation is shared, it shows usage information for the Capacity Reservation owner and each Amazon Web Services account that is currently using the shared capacity. If the Capacity Reservation is not shared, it shows only the Capacity Reservation owner's usage.

" }, "GetCoipPoolUsage":{ "name":"GetCoipPoolUsage", @@ -3304,7 +3374,7 @@ }, "input":{"shape":"GetEbsDefaultKmsKeyIdRequest"}, "output":{"shape":"GetEbsDefaultKmsKeyIdResult"}, - "documentation":"

Describes the default customer master key (CMK) for EBS encryption by default for your account in this Region. You can change the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the default KMS key for EBS encryption by default for your account in this Region. You can change the default KMS key for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "GetEbsEncryptionByDefault":{ "name":"GetEbsEncryptionByDefault", @@ -3406,6 +3476,16 @@ "output":{"shape":"GetSerialConsoleAccessStatusResult"}, "documentation":"

Retrieves the access status of your account to the EC2 serial console of all instances. By default, access to the EC2 serial console is disabled for your account. For more information, see Manage account access to the EC2 serial console in the Amazon EC2 User Guide.

" }, + "GetSubnetCidrReservations":{ + "name":"GetSubnetCidrReservations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSubnetCidrReservationsRequest"}, + "output":{"shape":"GetSubnetCidrReservationsResult"}, + "documentation":"

Gets information about the subnet CIDR reservations.

" + }, "GetTransitGatewayAttachmentPropagations":{ "name":"GetTransitGatewayAttachmentPropagations", "http":{ @@ -3564,7 +3644,7 @@ }, "input":{"shape":"ModifyDefaultCreditSpecificationRequest"}, "output":{"shape":"ModifyDefaultCreditSpecificationResult"}, - "documentation":"

Modifies the default credit option for CPU usage of burstable performance instances. The default credit option is set at the account level per Region, and is specified per instance family. All new burstable performance instances in the account launch using the default credit option.

ModifyDefaultCreditSpecification is an asynchronous operation, which works at an Region level and modifies the credit option for each Availability Zone. All zones in a Region are updated within five minutes. But if instances are launched during this operation, they might not get the new credit option until the zone is updated. To verify whether the update has occurred, you can call GetDefaultCreditSpecification and check DefaultCreditSpecification for updates.

For more information, see Burstable performance instances in the Amazon EC2 User Guide.

" + "documentation":"

Modifies the default credit option for CPU usage of burstable performance instances. The default credit option is set at the account level per Amazon Web Services Region, and is specified per instance family. All new burstable performance instances in the account launch using the default credit option.

ModifyDefaultCreditSpecification is an asynchronous operation, which works at an Amazon Web Services Region level and modifies the credit option for each Availability Zone. All zones in a Region are updated within five minutes. But if instances are launched during this operation, they might not get the new credit option until the zone is updated. To verify whether the update has occurred, you can call GetDefaultCreditSpecification and check DefaultCreditSpecification for updates.

For more information, see Burstable performance instances in the Amazon EC2 User Guide.

" }, "ModifyEbsDefaultKmsKeyId":{ "name":"ModifyEbsDefaultKmsKeyId", @@ -3574,7 +3654,7 @@ }, "input":{"shape":"ModifyEbsDefaultKmsKeyIdRequest"}, "output":{"shape":"ModifyEbsDefaultKmsKeyIdResult"}, - "documentation":"

Changes the default customer master key (CMK) for EBS encryption by default for your account in this Region.

AWS creates a unique AWS managed CMK in each Region for use with encryption by default. If you change the default CMK to a symmetric customer managed CMK, it is used instead of the AWS managed CMK. To reset the default CMK to the AWS managed CMK for EBS, use ResetEbsDefaultKmsKeyId. Amazon EBS does not support asymmetric CMKs.

If you delete or disable the customer managed CMK that you specified for use with encryption by default, your instances will fail to launch.

For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Changes the default KMS key for EBS encryption by default for your account in this Region.

Amazon Web Services creates a unique Amazon Web Services managed KMS key in each Region for use with encryption by default. If you change the default KMS key to a symmetric customer managed KMS key, it is used instead of the Amazon Web Services managed KMS key. To reset the default KMS key to the Amazon Web Services managed KMS key for EBS, use ResetEbsDefaultKmsKeyId. Amazon EBS does not support asymmetric KMS keys.

If you delete or disable the customer managed KMS key that you specified for use with encryption by default, your instances will fail to launch.

For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifyFleet":{ "name":"ModifyFleet", @@ -3672,6 +3752,16 @@ "output":{"shape":"ModifyInstanceEventStartTimeResult"}, "documentation":"

Modifies the start time for a scheduled Amazon EC2 instance event.

" }, + "ModifyInstanceEventWindow":{ + "name":"ModifyInstanceEventWindow", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstanceEventWindowRequest"}, + "output":{"shape":"ModifyInstanceEventWindowResult"}, + "documentation":"

Modifies the specified event window.

You can define either a set of time ranges or a cron expression when modifying the event window, but not both.

To modify the targets associated with the event window, use the AssociateInstanceEventWindow and DisassociateInstanceEventWindow API.

If Amazon Web Services has already scheduled an event, modifying an event window won't change the time of the scheduled event.

For more information, see Define event windows for scheduled events in the Amazon EC2 User Guide.

" + }, "ModifyInstanceMetadataOptions":{ "name":"ModifyInstanceMetadataOptions", "http":{ @@ -3748,7 +3838,7 @@ "requestUri":"/" }, "input":{"shape":"ModifySnapshotAttributeRequest"}, - "documentation":"

Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single operation. If you need to both add and remove account IDs for a snapshot, you must use multiple operations. You can make up to 500 modifications to a snapshot in a single operation.

Encrypted snapshots and snapshots with AWS Marketplace product codes cannot be made public. Snapshots encrypted with your default CMK cannot be shared with other accounts.

For more information about modifying snapshot permissions, see Sharing snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Adds or removes permission settings for the specified snapshot. You may add or remove specified Amazon Web Services account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single operation. If you need to both add and remove account IDs for a snapshot, you must use multiple operations. You can make up to 500 modifications to a snapshot in a single operation.

Encrypted snapshots and snapshots with Amazon Web Services Marketplace product codes cannot be made public. Snapshots encrypted with your default KMS key cannot be shared with other accounts.

For more information about modifying snapshot permissions, see Share a snapshot in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifySpotFleetRequest":{ "name":"ModifySpotFleetRequest", @@ -3837,7 +3927,7 @@ }, "input":{"shape":"ModifyVolumeRequest"}, "output":{"shape":"ModifyVolumeResult"}, - "documentation":"

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you might be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying an EBS volume running Linux, see Modifying the size, IOPS, or type of an EBS volume on Linux. For more information about modifying an EBS volume running Windows, see Modifying the size, IOPS, or type of an EBS volume on Windows.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For information about extending a Linux file system, see Extending a Linux file system. For information about extending a Windows file system, see Extending a Windows file system.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using DescribeVolumesModifications. For information about tracking status changes using either method, see Monitoring volume modifications.

With previous-generation instance types, resizing an EBS volume might require detaching and reattaching the volume or stopping and restarting the instance. For more information, see Amazon EBS Elastic Volumes (Linux) or Amazon EBS Elastic Volumes (Windows).

If you reach the maximum volume modification rate per volume limit, you will need to wait at least six hours before applying further modifications to the affected EBS volume.

" + "documentation":"

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you might be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying EBS volumes, see Amazon EBS Elastic Volumes (Linux instances) or Amazon EBS Elastic Volumes (Windows instances).

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For more information, see Extend a Linux file system or Extend a Windows file system.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using DescribeVolumesModifications. For information about tracking status changes using either method, see Monitor the progress of volume modifications.

With previous-generation instance types, resizing an EBS volume might require detaching and reattaching the volume or stopping and restarting the instance.

If you reach the maximum volume modification rate per volume limit, you must wait at least six hours before applying further modifications to the affected EBS volume.

" }, "ModifyVolumeAttribute":{ "name":"ModifyVolumeAttribute", @@ -3905,7 +3995,7 @@ }, "input":{"shape":"ModifyVpcPeeringConnectionOptionsRequest"}, "output":{"shape":"ModifyVpcPeeringConnectionOptionsResult"}, - "documentation":"

Modifies the VPC peering connection options on one side of a VPC peering connection. You can do the following:

  • Enable/disable communication over the peering connection between an EC2-Classic instance that's linked to your VPC (using ClassicLink) and instances in the peer VPC.

  • Enable/disable communication over the peering connection between instances in your VPC and an EC2-Classic instance that's linked to the peer VPC.

  • Enable/disable the ability to resolve public DNS hostnames to private IP addresses when queried from instances in the peer VPC.

If the peered VPCs are in the same AWS account, you can enable DNS resolution for queries from the local VPC. This ensures that queries from the local VPC resolve to private IP addresses in the peer VPC. This option is not available if the peered VPCs are in different AWS accounts or different Regions. For peered VPCs in different AWS accounts, each AWS account owner must initiate a separate request to modify the peering connection options. For inter-region peering connections, you must use the Region for the requester VPC to modify the requester VPC peering options and the Region for the accepter VPC to modify the accepter VPC peering options. To verify which VPCs are the accepter and the requester for a VPC peering connection, use the DescribeVpcPeeringConnections command.

" + "documentation":"

Modifies the VPC peering connection options on one side of a VPC peering connection. You can do the following:

  • Enable/disable communication over the peering connection between an EC2-Classic instance that's linked to your VPC (using ClassicLink) and instances in the peer VPC.

  • Enable/disable communication over the peering connection between instances in your VPC and an EC2-Classic instance that's linked to the peer VPC.

  • Enable/disable the ability to resolve public DNS hostnames to private IP addresses when queried from instances in the peer VPC.

If the peered VPCs are in the same Amazon Web Services account, you can enable DNS resolution for queries from the local VPC. This ensures that queries from the local VPC resolve to private IP addresses in the peer VPC. This option is not available if the peered VPCs are in different different Amazon Web Services accounts or different Regions. For peered VPCs in different Amazon Web Services accounts, each Amazon Web Services account owner must initiate a separate request to modify the peering connection options. For inter-region peering connections, you must use the Region for the requester VPC to modify the requester VPC peering options and the Region for the accepter VPC to modify the accepter VPC peering options. To verify which VPCs are the accepter and the requester for a VPC peering connection, use the DescribeVpcPeeringConnections command.

" }, "ModifyVpcTenancy":{ "name":"ModifyVpcTenancy", @@ -4123,7 +4213,7 @@ "requestUri":"/" }, "input":{"shape":"ReleaseAddressRequest"}, - "documentation":"

Releases the specified Elastic IP address.

[EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

[Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you can release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

After releasing an Elastic IP address, it is released to the IP address pool. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another account.

[EC2-VPC] After you release an Elastic IP address for use in a VPC, you might be able to recover it. For more information, see AllocateAddress.

" + "documentation":"

Releases the specified Elastic IP address.

[EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

[Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you can release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

After releasing an Elastic IP address, it is released to the IP address pool. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another Amazon Web Services account.

[EC2-VPC] After you release an Elastic IP address for use in a VPC, you might be able to recover it. For more information, see AllocateAddress.

" }, "ReleaseHosts":{ "name":"ReleaseHosts", @@ -4171,7 +4261,7 @@ "requestUri":"/" }, "input":{"shape":"ReplaceRouteRequest"}, - "documentation":"

Replaces an existing route within a route table in a VPC. You must provide only one of the following: internet gateway, virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, egress-only internet gateway, or transit gateway.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Replaces an existing route within a route table in a VPC. You must provide only one of the following: internet gateway, virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, egress-only internet gateway, or transit gateway.

For more information, see Route tables in the Amazon Virtual Private Cloud User Guide.

" }, "ReplaceRouteTableAssociation":{ "name":"ReplaceRouteTableAssociation", @@ -4181,7 +4271,7 @@ }, "input":{"shape":"ReplaceRouteTableAssociationRequest"}, "output":{"shape":"ReplaceRouteTableAssociationResult"}, - "documentation":"

Changes the route table associated with a given subnet, internet gateway, or virtual private gateway in a VPC. After the operation completes, the subnet or gateway uses the routes in the new route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

You can also use this operation to change which table is the main route table in the VPC. Specify the main route table's association ID and the route table ID of the new main route table.

" + "documentation":"

Changes the route table associated with a given subnet, internet gateway, or virtual private gateway in a VPC. After the operation completes, the subnet or gateway uses the routes in the new route table. For more information about route tables, see Route tables in the Amazon Virtual Private Cloud User Guide.

You can also use this operation to change which table is the main route table in the VPC. Specify the main route table's association ID and the route table ID of the new main route table.

" }, "ReplaceTransitGatewayRoute":{ "name":"ReplaceTransitGatewayRoute", @@ -4240,7 +4330,7 @@ }, "input":{"shape":"ResetEbsDefaultKmsKeyIdRequest"}, "output":{"shape":"ResetEbsDefaultKmsKeyIdResult"}, - "documentation":"

Resets the default customer master key (CMK) for EBS encryption for your account in this Region to the AWS managed CMK for EBS.

After resetting the default CMK to the AWS managed CMK, you can continue to encrypt by a customer managed CMK by specifying it when you create the volume. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Resets the default KMS key for EBS encryption for your account in this Region to the Amazon Web Services managed KMS key for EBS.

After resetting the default KMS key to the Amazon Web Services managed KMS key, you can continue to encrypt by a customer managed KMS key by specifying it when you create the volume. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "ResetFpgaImageAttribute":{ "name":"ResetFpgaImageAttribute", @@ -4286,7 +4376,7 @@ "requestUri":"/" }, "input":{"shape":"ResetSnapshotAttributeRequest"}, - "documentation":"

Resets permission settings for the specified snapshot.

For more information about modifying snapshot permissions, see Sharing snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Resets permission settings for the specified snapshot.

For more information about modifying snapshot permissions, see Share a snapshot in the Amazon Elastic Compute Cloud User Guide.

" }, "RestoreAddressToClassic":{ "name":"RestoreAddressToClassic", @@ -4465,7 +4555,7 @@ }, "input":{"shape":"UnassignIpv6AddressesRequest"}, "output":{"shape":"UnassignIpv6AddressesResult"}, - "documentation":"

Unassigns one or more IPv6 addresses from a network interface.

" + "documentation":"

Unassigns one or more IPv6 addresses IPv4 Prefix Delegation prefixes from a network interface.

" }, "UnassignPrivateIpAddresses":{ "name":"UnassignPrivateIpAddresses", @@ -4474,7 +4564,7 @@ "requestUri":"/" }, "input":{"shape":"UnassignPrivateIpAddressesRequest"}, - "documentation":"

Unassigns one or more secondary private IP addresses from a network interface.

" + "documentation":"

Unassigns one or more secondary private IP addresses, or IPv4 Prefix Delegation prefixes from a network interface.

" }, "UnmonitorInstances":{ "name":"UnmonitorInstances", @@ -4838,7 +4928,7 @@ }, "NetworkInterfaceOwnerId":{ "shape":"String", - "documentation":"

The ID of the account that owns the network interface.

", + "documentation":"

The ID of the Amazon Web Services account that owns the network interface.

", "locationName":"networkInterfaceOwnerId" }, "PrivateIpAddress":{ @@ -5327,7 +5417,7 @@ }, "DestinationPrefixListId":{ "shape":"String", - "documentation":"

The prefix of the AWS service.

", + "documentation":"

The prefix of the Amazon Web Service.

", "locationName":"destinationPrefixListId" }, "EgressOnlyInternetGatewayId":{ @@ -5505,6 +5595,15 @@ "documentation":"

One or more specific IPv6 addresses to be assigned to the network interface. You can't use this option if you're specifying a number of IPv6 addresses.

", "locationName":"ipv6Addresses" }, + "Ipv6PrefixCount":{ + "shape":"Integer", + "documentation":"

The number of IPv6 Prefix Delegation prefixes that AWS automatically assigns to the network interface. You cannot use this option if you use the Ipv6Prefixes option.

" + }, + "Ipv6Prefixes":{ + "shape":"IpPrefixList", + "documentation":"

One or more IPv6 Prefix Delegation prefixes assigned to the network interface. You cannot use this option if you use the Ipv6PrefixCount option.

", + "locationName":"Ipv6Prefix" + }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", "documentation":"

The ID of the network interface.

", @@ -5520,6 +5619,11 @@ "documentation":"

The new IPv6 addresses assigned to the network interface. Existing IPv6 addresses that were assigned to the network interface before the request are not included.

", "locationName":"assignedIpv6Addresses" }, + "AssignedIpv6Prefixes":{ + "shape":"IpPrefixList", + "documentation":"

The IPv6 Prefix Delegation prefixes that are assigned to the network interface.

", + "locationName":"assignedIpv6PrefixSet" + }, "NetworkInterfaceId":{ "shape":"String", "documentation":"

The ID of the network interface.

", @@ -5550,6 +5654,15 @@ "shape":"Integer", "documentation":"

The number of secondary IP addresses to assign to the network interface. You can't specify this parameter when also specifying private IP addresses.

", "locationName":"secondaryPrivateIpAddressCount" + }, + "Ipv4Prefixes":{ + "shape":"IpPrefixList", + "documentation":"

One or more IPv4 Prefix Delegation prefixes assigned to the network interface. You cannot use this option if you use the Ipv4PrefixCount option.

", + "locationName":"Ipv4Prefix" + }, + "Ipv4PrefixCount":{ + "shape":"Integer", + "documentation":"

The number of IPv4 Prefix Delegation prefixes that AWS automatically assigns to the network interface. You cannot use this option if you use the Ipv4 Prefixes option.

" } }, "documentation":"

Contains the parameters for AssignPrivateIpAddresses.

" @@ -5566,6 +5679,11 @@ "shape":"AssignedPrivateIpAddressList", "documentation":"

The private IP addresses assigned to the network interface.

", "locationName":"assignedPrivateIpAddressesSet" + }, + "AssignedIpv4Prefixes":{ + "shape":"Ipv4PrefixesList", + "documentation":"

The IPv4 Prefix Delegation prefixes that are assigned to the network interface.

", + "locationName":"assignedIpv4PrefixSet" } } }, @@ -5761,6 +5879,37 @@ } } }, + "AssociateInstanceEventWindowRequest":{ + "type":"structure", + "required":[ + "InstanceEventWindowId", + "AssociationTarget" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "InstanceEventWindowId":{ + "shape":"InstanceEventWindowId", + "documentation":"

The ID of the event window.

" + }, + "AssociationTarget":{ + "shape":"InstanceEventWindowAssociationRequest", + "documentation":"

One or more targets associated with the specified event window.

" + } + } + }, + "AssociateInstanceEventWindowResult":{ + "type":"structure", + "members":{ + "InstanceEventWindow":{ + "shape":"InstanceEventWindow", + "documentation":"

Information about the event window.

", + "locationName":"instanceEventWindow" + } + } + }, "AssociateRouteTableRequest":{ "type":"structure", "required":["RouteTableId"], @@ -7319,7 +7468,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the account that owns the Capacity Reservation.

", + "documentation":"

The ID of the Amazon Web Services account that owns the Capacity Reservation.

", "locationName":"ownerId" }, "CapacityReservationArn":{ @@ -7349,7 +7498,7 @@ }, "Tenancy":{ "shape":"CapacityReservationTenancy", - "documentation":"

Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:

  • default - The Capacity Reservation is created on hardware that is shared with other accounts.

  • dedicated - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single account.

", + "documentation":"

Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:

  • default - The Capacity Reservation is created on hardware that is shared with other Amazon Web Services accounts.

  • dedicated - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single Amazon Web Services account.

", "locationName":"tenancy" }, "TotalInstanceCount":{ @@ -7425,7 +7574,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the account that owns the resource group.

", + "documentation":"

The ID of the Amazon Web Services account that owns the resource group.

", "locationName":"ownerId" } }, @@ -7594,7 +7743,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The AWS account ID of the owner of the carrier gateway.

", + "documentation":"

The Amazon Web Services account ID of the owner of the carrier gateway.

", "locationName":"ownerId" }, "Tags":{ @@ -7668,7 +7817,7 @@ "documentation":"

The signed authorization message for the prefix and account.

" } }, - "documentation":"

Provides authorization for Amazon to bring a specific IP address range to a specific account using bring your own IP addresses (BYOIP). For more information, see Configuring your BYOIP address range in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Provides authorization for Amazon to bring a specific IP address range to a specific Amazon Web Services account using bring your own IP addresses (BYOIP). For more information, see Configuring your BYOIP address range in the Amazon Elastic Compute Cloud User Guide.

" }, "CidrBlock":{ "type":"structure", @@ -8416,7 +8565,7 @@ "members":{ "OwnerId":{ "shape":"String", - "documentation":"

The account ID of the instance owner. This is only present if the product code is attached to the instance.

", + "documentation":"

The Amazon Web Services account ID of the instance owner. This is only present if the product code is attached to the instance.

", "locationName":"ownerId" }, "Return":{ @@ -8715,11 +8864,11 @@ }, "DestinationOutpostArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the Outpost to which to copy the snapshot. Only specify this parameter when copying a snapshot from an AWS Region to an Outpost. The snapshot must be in the Region for the destination Outpost. You cannot copy a snapshot from an Outpost to a Region, from one Outpost to another, or within the same Outpost.

For more information, see Copying snapshots from an AWS Region to an Outpost in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the Outpost to which to copy the snapshot. Only specify this parameter when copying a snapshot from an Amazon Web Services Region to an Outpost. The snapshot must be in the Region for the destination Outpost. You cannot copy a snapshot from an Outpost to a Region, from one Outpost to another, or within the same Outpost.

For more information, see Copy snapshots from an Amazon Web Services Region to an Outpost in the Amazon Elastic Compute Cloud User Guide.

" }, "DestinationRegion":{ "shape":"String", - "documentation":"

The destination Region to use in the PresignedUrl parameter of a snapshot copy operation. This parameter is only valid for specifying the destination Region in a PresignedUrl parameter, where it is required.

The snapshot copy is sent to the regional endpoint that you sent the HTTP request to (for example, ec2.us-east-1.amazonaws.com). With the AWS CLI, this is specified using the --region parameter or the default Region in your AWS configuration file.

", + "documentation":"

The destination Region to use in the PresignedUrl parameter of a snapshot copy operation. This parameter is only valid for specifying the destination Region in a PresignedUrl parameter, where it is required.

The snapshot copy is sent to the regional endpoint that you sent the HTTP request to (for example, ec2.us-east-1.amazonaws.com). With the CLI, this is specified using the --region parameter or the default Region in your Amazon Web Services configuration file.

", "locationName":"destinationRegion" }, "Encrypted":{ @@ -8729,12 +8878,12 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

", + "documentation":"

The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the KMS key using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

", "locationName":"kmsKeyId" }, "PresignedUrl":{ "shape":"String", - "documentation":"

When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query requests.

The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests: Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

", + "documentation":"

When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query requests.

The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using Amazon Web Services Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

", "locationName":"presignedUrl" }, "SourceRegion":{ @@ -8844,7 +8993,7 @@ }, "Tenancy":{ "shape":"CapacityReservationTenancy", - "documentation":"

Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:

  • default - The Capacity Reservation is created on hardware that is shared with other accounts.

  • dedicated - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single account.

" + "documentation":"

Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:

  • default - The Capacity Reservation is created on hardware that is shared with other Amazon Web Services accounts.

  • dedicated - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single Amazon Web Services account.

" }, "InstanceCount":{ "shape":"Integer", @@ -8913,7 +9062,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "idempotencyToken":true } } @@ -9204,7 +9353,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

" }, "DryRun":{ "shape":"Boolean", @@ -9407,7 +9556,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

" }, "DeliverLogsPermissionArn":{ "shape":"String", @@ -9440,7 +9589,7 @@ }, "LogFormat":{ "shape":"String", - "documentation":"

The fields to include in the flow log record, in the order in which they should appear. For a list of available fields, see Flow Log Records. If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must specify at least one field.

Specify the fields using the ${field-id} format, separated by spaces. For the AWS CLI, use single quotation marks (' ') to surround the parameter value.

" + "documentation":"

The fields to include in the flow log record, in the order in which they should appear. For a list of available fields, see Flow log records. If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must specify at least one field.

Specify the fields using the ${field-id} format, separated by spaces. For the CLI, use single quotation marks (' ') to surround the parameter value.

" }, "TagSpecifications":{ "shape":"TagSpecificationList", @@ -9577,6 +9726,43 @@ } } }, + "CreateInstanceEventWindowRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "Name":{ + "shape":"String", + "documentation":"

The name of the event window.

" + }, + "TimeRanges":{ + "shape":"InstanceEventWindowTimeRangeRequestSet", + "documentation":"

The time range for the event window. If you specify a time range, you can't specify a cron expression.

", + "locationName":"TimeRange" + }, + "CronExpression":{ + "shape":"InstanceEventWindowCronExpression", + "documentation":"

The cron expression for the event window, for example, * 0-4,20-23 * * 1,5. If you specify a cron expression, you can't specify a time range.

Constraints:

  • Only hour and day of the week values are supported.

  • For day of the week values, you can specify either integers 0 through 6, or alternative single values SUN through SAT.

  • The minute, month, and year must be specified by *.

  • The hour value must be one or a multiple range, for example, 0-4 or 0-4,20-23.

  • Each hour range must be >= 2 hours, for example, 0-2 or 20-23.

  • The event window must be >= 4 hours. The combined total time ranges in the event window must be >= 4 hours.

For more information about cron expressions, see cron on the Wikipedia website.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the event window.

", + "locationName":"TagSpecification" + } + } + }, + "CreateInstanceEventWindowResult":{ + "type":"structure", + "members":{ + "InstanceEventWindow":{ + "shape":"InstanceEventWindow", + "documentation":"

Information about the event window.

", + "locationName":"instanceEventWindow" + } + } + }, "CreateInstanceExportTaskRequest":{ "type":"structure", "required":[ @@ -9898,7 +10084,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

Constraint: Maximum 64 ASCII characters.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

Constraint: Maximum 64 ASCII characters.

", "idempotencyToken":true }, "DryRun":{ @@ -10039,19 +10225,19 @@ "members":{ "SourceIp":{ "shape":"IpAddress", - "documentation":"

The IP address of the AWS resource that is the source of the path.

" + "documentation":"

The IP address of the Amazon Web Services resource that is the source of the path.

" }, "DestinationIp":{ "shape":"IpAddress", - "documentation":"

The IP address of the AWS resource that is the destination of the path.

" + "documentation":"

The IP address of the Amazon Web Services resource that is the destination of the path.

" }, "Source":{ "shape":"String", - "documentation":"

The AWS resource that is the source of the path.

" + "documentation":"

The Amazon Web Services resource that is the source of the path.

" }, "Destination":{ "shape":"String", - "documentation":"

The AWS resource that is the destination of the path.

" + "documentation":"

The Amazon Web Services resource that is the destination of the path.

" }, "Protocol":{ "shape":"Protocol", @@ -10072,7 +10258,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "idempotencyToken":true } } @@ -10100,7 +10286,7 @@ }, "AwsAccountId":{ "shape":"String", - "documentation":"

The account ID.

" + "documentation":"

The Amazon Web Services account ID.

" }, "AwsService":{ "shape":"String", @@ -10172,6 +10358,24 @@ "documentation":"

The number of secondary private IPv4 addresses to assign to a network interface. When you specify a number of secondary IPv4 addresses, Amazon EC2 selects these IP addresses within the subnet's IPv4 CIDR range. You can't specify this option and specify more than one private IP address using privateIpAddresses.

The number of IP addresses you can assign to a network interface varies by instance type. For more information, see IP Addresses Per ENI Per Instance Type in the Amazon Virtual Private Cloud User Guide.

", "locationName":"secondaryPrivateIpAddressCount" }, + "Ipv4Prefixes":{ + "shape":"Ipv4PrefixList", + "documentation":"

One or moreIPv4 Prefix Delegation prefixes assigned to the network interface. You cannot use this option if you use the Ipv4PrefixCount option.

", + "locationName":"Ipv4Prefix" + }, + "Ipv4PrefixCount":{ + "shape":"Integer", + "documentation":"

The number of IPv4 Prefix Delegation prefixes that AWS automatically assigns to the network interface. You cannot use this option if you use the Ipv4 Prefixes option.

" + }, + "Ipv6Prefixes":{ + "shape":"Ipv6PrefixList", + "documentation":"

One or moreIPv6 Prefix Delegation prefixes assigned to the network interface. You cannot use this option if you use the Ipv6PrefixCount option.

", + "locationName":"Ipv6Prefix" + }, + "Ipv6PrefixCount":{ + "shape":"Integer", + "documentation":"

The number of IPv6 Prefix Delegation prefixes that AWS automatically assigns to the network interface. You cannot use this option if you use the Ipv6Prefixes option.

" + }, "InterfaceType":{ "shape":"NetworkInterfaceCreationType", "documentation":"

Indicates the type of network interface. To create an Elastic Fabric Adapter (EFA), specify efa. For more information, see Elastic Fabric Adapter in the Amazon Elastic Compute Cloud User Guide. To create a trunk network interface, specify efa. For more information, see Network interface trunking in the Amazon Elastic Compute Cloud User Guide.

" @@ -10262,7 +10466,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency. For more information, see Ensuring Idempotency.

", + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency. For more information, see Ensuring idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -10543,11 +10747,11 @@ }, "OutpostArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Outpost on which to create a local snapshot.

  • To create a snapshot of a volume in a Region, omit this parameter. The snapshot is created in the same Region as the volume.

  • To create a snapshot of a volume on an Outpost and store the snapshot in the Region, omit this parameter. The snapshot is created in the Region for the Outpost.

  • To create a snapshot of a volume on an Outpost and store the snapshot on an Outpost, specify the ARN of the destination Outpost. The snapshot must be created on the same Outpost as the volume.

For more information, see Creating local snapshots from volumes on an Outpost in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the Outpost on which to create a local snapshot.

  • To create a snapshot of a volume in a Region, omit this parameter. The snapshot is created in the same Region as the volume.

  • To create a snapshot of a volume on an Outpost and store the snapshot in the Region, omit this parameter. The snapshot is created in the Region for the Outpost.

  • To create a snapshot of a volume on an Outpost and store the snapshot on an Outpost, specify the ARN of the destination Outpost. The snapshot must be created on the same Outpost as the volume.

For more information, see Create local snapshots from volumes on an Outpost in the Amazon Elastic Compute Cloud User Guide.

" }, "VolumeId":{ "shape":"VolumeId", - "documentation":"

The ID of the EBS volume.

" + "documentation":"

The ID of the Amazon EBS volume.

" }, "TagSpecifications":{ "shape":"TagSpecificationList", @@ -10575,7 +10779,7 @@ }, "OutpostArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Outpost on which to create the local snapshots.

  • To create snapshots from an instance in a Region, omit this parameter. The snapshots are created in the same Region as the instance.

  • To create snapshots from an instance on an Outpost and store the snapshots in the Region, omit this parameter. The snapshots are created in the Region for the Outpost.

  • To create snapshots from an instance on an Outpost and store the snapshots on an Outpost, specify the ARN of the destination Outpost. The snapshots must be created on the same Outpost as the instance.

For more information, see Creating multi-volume local snapshots from instances on an Outpost in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the Outpost on which to create the local snapshots.

  • To create snapshots from an instance in a Region, omit this parameter. The snapshots are created in the same Region as the instance.

  • To create snapshots from an instance on an Outpost and store the snapshots in the Region, omit this parameter. The snapshots are created in the Region for the Outpost.

  • To create snapshots from an instance on an Outpost and store the snapshots on an Outpost, specify the ARN of the destination Outpost. The snapshots must be created on the same Outpost as the instance.

For more information, see Create multi-volume local snapshots from instances on an Outpost in the Amazon Elastic Compute Cloud User Guide.

" }, "TagSpecifications":{ "shape":"TagSpecificationList", @@ -10671,11 +10875,56 @@ } } }, + "CreateSubnetCidrReservationRequest":{ + "type":"structure", + "required":[ + "SubnetId", + "Cidr", + "ReservationType" + ], + "members":{ + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the subnet CIDR reservation.

", + "locationName":"TagSpecification" + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

The ID of the subnet.

" + }, + "Cidr":{ + "shape":"String", + "documentation":"

The IPv4 or IPV6 CIDR range to reserve.

" + }, + "ReservationType":{ + "shape":"SubnetCidrReservationType", + "documentation":"

The type of reservation.

The following are valid values:

  • prefix: The Amazon EC2 Prefix Delegation feature assigns the IP addresses to network interfaces that are associated with an instance. For information about Prefix Delegation, see Prefix Delegation for Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide.

  • explicit: You manually assign the IP addresses to resources that reside in your subnet.

" + }, + "Description":{ + "shape":"String", + "documentation":"

The description to assign to the subnet CIDR reservation.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "CreateSubnetCidrReservationResult":{ + "type":"structure", + "members":{ + "SubnetCidrReservation":{ + "shape":"SubnetCidrReservation", + "documentation":"

Information about the created subnet CIDR reservation.

", + "locationName":"subnetCidrReservation" + } + } + }, "CreateSubnetRequest":{ "type":"structure", "required":[ - "CidrBlock", - "VpcId" + "VpcId", + "CidrBlock" ], "members":{ "TagSpecifications":{ @@ -10685,16 +10934,12 @@ }, "AvailabilityZone":{ "shape":"String", - "documentation":"

The Availability Zone or Local Zone for the subnet.

Default: AWS selects one for you. If you create more than one subnet in your VPC, we do not necessarily select a different zone for each subnet.

To create a subnet in a Local Zone, set this value to the Local Zone ID, for example us-west-2-lax-1a. For information about the Regions that support Local Zones, see Available Regions in the Amazon Elastic Compute Cloud User Guide.

To create a subnet in an Outpost, set this value to the Availability Zone for the Outpost and specify the Outpost ARN.

" + "documentation":"

The Availability Zone or Local Zone for the subnet.

Default: Amazon Web Services selects one for you. If you create more than one subnet in your VPC, we do not necessarily select a different zone for each subnet.

To create a subnet in a Local Zone, set this value to the Local Zone ID, for example us-west-2-lax-1a. For information about the Regions that support Local Zones, see Available Regions in the Amazon Elastic Compute Cloud User Guide.

To create a subnet in an Outpost, set this value to the Availability Zone for the Outpost and specify the Outpost ARN.

" }, "AvailabilityZoneId":{ "shape":"String", "documentation":"

The AZ ID or the Local Zone ID of the subnet.

" }, - "CidrBlock":{ - "shape":"String", - "documentation":"

The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

" - }, "Ipv6CidrBlock":{ "shape":"String", "documentation":"

The IPv6 network range for the subnet, in CIDR notation. The subnet size must use a /64 prefix length.

" @@ -10711,6 +10956,10 @@ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" + }, + "CidrBlock":{ + "shape":"String", + "documentation":"

The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

" } } }, @@ -11380,7 +11629,7 @@ }, "UserId":{ "shape":"String", - "documentation":"

The AWS account ID to be added or removed.

", + "documentation":"

The ID of the Amazon Web Services account to be added or removed.

", "locationName":"userId" } }, @@ -11398,11 +11647,11 @@ "members":{ "Add":{ "shape":"CreateVolumePermissionList", - "documentation":"

Adds the specified AWS account ID or group to the list.

" + "documentation":"

Adds the specified Amazon Web Services account ID or group to the list.

" }, "Remove":{ "shape":"CreateVolumePermissionList", - "documentation":"

Removes the specified AWS account ID or group from the list.

" + "documentation":"

Removes the specified Amazon Web Services account ID or group from the list.

" } }, "documentation":"

Describes modifications to the list of create volume permissions for a volume.

" @@ -11422,11 +11671,11 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type:

  • gp3: 3,000-16,000 IOPS

  • io1: 100-64,000 IOPS

  • io2: 100-64,000 IOPS

For io1 and io2 volumes, we guarantee 64,000 IOPS only for Instances built on the Nitro System. Other instance families guarantee performance up to 32,000 IOPS.

This parameter is required for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard volumes.

" + "documentation":"

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type:

  • gp3: 3,000-16,000 IOPS

  • io1: 100-64,000 IOPS

  • io2: 100-64,000 IOPS

io1 and io2 volumes support up to 64,000 IOPS only on Instances built on the Nitro System. Other instance families support performance up to 32,000 IOPS.

This parameter is required for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard volumes.

" }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

" + "documentation":"

The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the KMS key using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

" }, "OutpostArn":{ "shape":"String", @@ -11461,6 +11710,11 @@ "Throughput":{ "shape":"Integer", "documentation":"

The throughput to provision for a volume, with a maximum of 1,000 MiB/s.

This parameter is valid only for gp3 volumes.

Valid Range: Minimum value of 125. Maximum value of 1000.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency.

", + "idempotencyToken":true } } }, @@ -11647,7 +11901,7 @@ }, "PeerOwnerId":{ "shape":"String", - "documentation":"

The AWS account ID of the owner of the accepter VPC.

Default: Your AWS account ID

", + "documentation":"

The Amazon Web Services account ID of the owner of the accepter VPC.

Default: Your Amazon Web Services account ID

", "locationName":"peerOwnerId" }, "PeerVpcId":{ @@ -11951,6 +12205,13 @@ "DateTime":{"type":"timestamp"}, "DedicatedHostFlag":{"type":"boolean"}, "DedicatedHostId":{"type":"string"}, + "DedicatedHostIdList":{ + "type":"list", + "member":{ + "shape":"DedicatedHostId", + "locationName":"item" + } + }, "DefaultNetworkCardIndex":{"type":"integer"}, "DefaultRouteTableAssociationValue":{ "type":"string", @@ -12274,6 +12535,34 @@ } } }, + "DeleteInstanceEventWindowRequest":{ + "type":"structure", + "required":["InstanceEventWindowId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "ForceDelete":{ + "shape":"Boolean", + "documentation":"

Specify true to force delete the event window. Use the force delete parameter if the event window is currently associated with targets.

" + }, + "InstanceEventWindowId":{ + "shape":"InstanceEventWindowId", + "documentation":"

The ID of the event window.

" + } + } + }, + "DeleteInstanceEventWindowResult":{ + "type":"structure", + "members":{ + "InstanceEventWindowState":{ + "shape":"InstanceEventWindowStateChange", + "documentation":"

The state of the event window.

", + "locationName":"instanceEventWindowState" + } + } + }, "DeleteInternetGatewayRequest":{ "type":"structure", "required":["InternetGatewayId"], @@ -12847,6 +13136,30 @@ }, "documentation":"

Contains the parameters for DeleteSpotDatafeedSubscription.

" }, + "DeleteSubnetCidrReservationRequest":{ + "type":"structure", + "required":["SubnetCidrReservationId"], + "members":{ + "SubnetCidrReservationId":{ + "shape":"SubnetCidrReservationId", + "documentation":"

The ID of the subnet CIDR reservation.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DeleteSubnetCidrReservationResult":{ + "type":"structure", + "members":{ + "DeletedSubnetCidrReservation":{ + "shape":"SubnetCidrReservation", + "documentation":"

Information about the deleted subnet CIDR reservation.

", + "locationName":"deletedSubnetCidrReservation" + } + } + }, "DeleteSubnetRequest":{ "type":"structure", "required":["SubnetId"], @@ -12878,7 +13191,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

The tags to delete. Specify a tag key and an optional tag value to delete specific tags. If you specify a tag key without a tag value, we delete any tag with this key regardless of its value. If you specify a tag key with an empty string as the tag value, we delete the tag only if its value is an empty string.

If you omit this parameter, we delete all user-defined tags for the specified resources. We do not delete AWS-generated tags (tags that have the aws: prefix).

", + "documentation":"

The tags to delete. Specify a tag key and an optional tag value to delete specific tags. If you specify a tag key without a tag value, we delete any tag with this key regardless of its value. If you specify a tag key with an empty string as the tag value, we delete the tag only if its value is an empty string.

If you omit this parameter, we delete all user-defined tags for the specified resources. We do not delete Amazon Web Services-generated tags (tags that have the aws: prefix).

", "locationName":"tag" } } @@ -13603,7 +13916,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • allocation-id - [EC2-VPC] The allocation ID for the address.

  • association-id - [EC2-VPC] The association ID for the address.

  • domain - Indicates whether the address is for use in EC2-Classic (standard) or in a VPC (vpc).

  • instance-id - The ID of the instance the address is associated with, if any.

  • network-border-group - A unique set of Availability Zones, Local Zones, or Wavelength Zones from where Amazon Web Services advertises IP addresses.

  • network-interface-id - [EC2-VPC] The ID of the network interface that the address is associated with, if any.

  • network-interface-owner-id - The account ID of the owner.

  • private-ip-address - [EC2-VPC] The private IP address associated with the Elastic IP address.

  • public-ip - The Elastic IP address, or the carrier IP address.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • allocation-id - [EC2-VPC] The allocation ID for the address.

  • association-id - [EC2-VPC] The association ID for the address.

  • domain - Indicates whether the address is for use in EC2-Classic (standard) or in a VPC (vpc).

  • instance-id - The ID of the instance the address is associated with, if any.

  • network-border-group - A unique set of Availability Zones, Local Zones, or Wavelength Zones from where Amazon Web Services advertises IP addresses.

  • network-interface-id - [EC2-VPC] The ID of the network interface that the address is associated with, if any.

  • network-interface-owner-id - The Amazon Web Services account ID of the owner.

  • private-ip-address - [EC2-VPC] The private IP address associated with the Elastic IP address.

  • public-ip - The Elastic IP address, or the carrier IP address.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", "locationName":"Filter" }, "PublicIps":{ @@ -13787,7 +14100,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • instance-type - The type of instance for which the Capacity Reservation reserves capacity.

  • owner-id - The ID of the account that owns the Capacity Reservation.

  • availability-zone-id - The Availability Zone ID of the Capacity Reservation.

  • instance-platform - The type of operating system for which the Capacity Reservation reserves capacity.

  • availability-zone - The Availability Zone ID of the Capacity Reservation.

  • tenancy - Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:

    • default - The Capacity Reservation is created on hardware that is shared with other accounts.

    • dedicated - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single account.

  • outpost-arn - The Amazon Resource Name (ARN) of the Outpost on which the Capacity Reservation was created.

  • state - The current state of the Capacity Reservation. A Capacity Reservation can be in one of the following states:

    • active- The Capacity Reservation is active and the capacity is available for your use.

    • expired - The Capacity Reservation expired automatically at the date and time specified in your request. The reserved capacity is no longer available for your use.

    • cancelled - The Capacity Reservation was cancelled. The reserved capacity is no longer available for your use.

    • pending - The Capacity Reservation request was successful but the capacity provisioning is still pending.

    • failed - The Capacity Reservation request has failed. A request might fail due to invalid request parameters, capacity constraints, or instance limit constraints. Failed requests are retained for 60 minutes.

  • start-date - The date and time at which the Capacity Reservation was started.

  • end-date - The date and time at which the Capacity Reservation expires. When a Capacity Reservation expires, the reserved capacity is released and you can no longer launch instances into it. The Capacity Reservation's state changes to expired when it reaches its end date and time.

  • end-date-type - Indicates the way in which the Capacity Reservation ends. A Capacity Reservation can have one of the following end types:

    • unlimited - The Capacity Reservation remains active until you explicitly cancel it.

    • limited - The Capacity Reservation expires automatically at a specified date and time.

  • instance-match-criteria - Indicates the type of instance launches that the Capacity Reservation accepts. The options include:

    • open - The Capacity Reservation accepts all instances that have matching attributes (instance type, platform, and Availability Zone). Instances that have matching attributes launch into the Capacity Reservation automatically without specifying any additional parameters.

    • targeted - The Capacity Reservation only accepts instances that have matching attributes (instance type, platform, and Availability Zone), and explicitly target the Capacity Reservation. This ensures that only permitted instances can use the reserved capacity.

", + "documentation":"

One or more filters.

  • instance-type - The type of instance for which the Capacity Reservation reserves capacity.

  • owner-id - The ID of the Amazon Web Services account that owns the Capacity Reservation.

  • availability-zone-id - The Availability Zone ID of the Capacity Reservation.

  • instance-platform - The type of operating system for which the Capacity Reservation reserves capacity.

  • availability-zone - The Availability Zone ID of the Capacity Reservation.

  • tenancy - Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:

    • default - The Capacity Reservation is created on hardware that is shared with other Amazon Web Services accounts.

    • dedicated - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single Amazon Web Services account.

  • outpost-arn - The Amazon Resource Name (ARN) of the Outpost on which the Capacity Reservation was created.

  • state - The current state of the Capacity Reservation. A Capacity Reservation can be in one of the following states:

    • active- The Capacity Reservation is active and the capacity is available for your use.

    • expired - The Capacity Reservation expired automatically at the date and time specified in your request. The reserved capacity is no longer available for your use.

    • cancelled - The Capacity Reservation was cancelled. The reserved capacity is no longer available for your use.

    • pending - The Capacity Reservation request was successful but the capacity provisioning is still pending.

    • failed - The Capacity Reservation request has failed. A request might fail due to invalid request parameters, capacity constraints, or instance limit constraints. Failed requests are retained for 60 minutes.

  • start-date - The date and time at which the Capacity Reservation was started.

  • end-date - The date and time at which the Capacity Reservation expires. When a Capacity Reservation expires, the reserved capacity is released and you can no longer launch instances into it. The Capacity Reservation's state changes to expired when it reaches its end date and time.

  • end-date-type - Indicates the way in which the Capacity Reservation ends. A Capacity Reservation can have one of the following end types:

    • unlimited - The Capacity Reservation remains active until you explicitly cancel it.

    • limited - The Capacity Reservation expires automatically at a specified date and time.

  • instance-match-criteria - Indicates the type of instance launches that the Capacity Reservation accepts. The options include:

    • open - The Capacity Reservation accepts all instances that have matching attributes (instance type, platform, and Availability Zone). Instances that have matching attributes launch into the Capacity Reservation automatically without specifying any additional parameters.

    • targeted - The Capacity Reservation only accepts instances that have matching attributes (instance type, platform, and Availability Zone), and explicitly target the Capacity Reservation. This ensures that only permitted instances can use the reserved capacity.

", "locationName":"Filter" }, "DryRun":{ @@ -13821,7 +14134,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • carrier-gateway-id - The ID of the carrier gateway.

  • state - The state of the carrier gateway (pending | failed | available | deleting | deleted).

  • owner-id - The AWS account ID of the owner of the carrier gateway.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC associated with the carrier gateway.

", + "documentation":"

One or more filters.

  • carrier-gateway-id - The ID of the carrier gateway.

  • state - The state of the carrier gateway (pending | failed | available | deleting | deleted).

  • owner-id - The Amazon Web Services account ID of the owner of the carrier gateway.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC associated with the carrier gateway.

", "locationName":"Filter" }, "MaxResults":{ @@ -14263,7 +14576,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • dhcp-options-id - The ID of a DHCP options set.

  • key - The key for one of the options (for example, domain-name).

  • value - The value for one of the options.

  • owner-id - The ID of the AWS account that owns the DHCP options set.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", + "documentation":"

One or more filters.

  • dhcp-options-id - The ID of a DHCP options set.

  • key - The key for one of the options (for example, domain-name).

  • value - The value for one of the options.

  • owner-id - The ID of the Amazon Web Services account that owns the DHCP options set.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", "locationName":"Filter" }, "DryRun":{ @@ -14492,12 +14805,12 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that enabled fast snapshot restores on the snapshot.

", + "documentation":"

The ID of the Amazon Web Services account that enabled fast snapshot restores on the snapshot.

", "locationName":"ownerId" }, "OwnerAlias":{ "shape":"String", - "documentation":"

The AWS owner alias that enabled fast snapshot restores on the snapshot. This is intended for future use.

", + "documentation":"

The Amazon Web Services owner alias that enabled fast snapshot restores on the snapshot. This is intended for future use.

", "locationName":"ownerAlias" }, "EnablingTime":{ @@ -14545,7 +14858,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters. The possible values are:

  • availability-zone: The Availability Zone of the snapshot.

  • owner-id: The ID of the AWS account that enabled fast snapshot restore on the snapshot.

  • snapshot-id: The ID of the snapshot.

  • state: The state of fast snapshot restores for the snapshot (enabling | optimizing | enabled | disabling | disabled).

", + "documentation":"

The filters. The possible values are:

  • availability-zone: The Availability Zone of the snapshot.

  • owner-id: The ID of the Amazon Web Services account that enabled fast snapshot restore on the snapshot.

  • snapshot-id: The ID of the snapshot.

  • state: The state of fast snapshot restores for the snapshot (enabling | optimizing | enabled | disabling | disabled).

", "locationName":"Filter" }, "MaxResults":{ @@ -15376,6 +15689,49 @@ } } }, + "DescribeInstanceEventWindowsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "InstanceEventWindowIds":{ + "shape":"InstanceEventWindowIdSet", + "documentation":"

The IDs of the event windows.

", + "locationName":"InstanceEventWindowId" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

One or more filters.

  • dedicated-host-id - The event windows associated with the specified Dedicated Host ID.

  • event-window-name - The event windows associated with the specified names.

  • instance-id - The event windows associated with the specified instance ID.

  • instance-tag - The event windows associated with the specified tag and value.

  • instance-tag-key - The event windows associated with the specified tag key, regardless of the value.

  • instance-tag-value - The event windows associated with the specified tag value, regardless of the key.

  • tag:<key> - The key/value combination of a tag assigned to the event window. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value CMX, specify tag:Owner for the filter name and CMX for the filter value.

  • tag-key - The key of a tag assigned to the event window. Use this filter to find all event windows that have a tag with a specific key, regardless of the tag value.

  • tag-value - The value of a tag assigned to the event window. Use this filter to find all event windows that have a tag with a specific value, regardless of the tag key.

", + "locationName":"Filter" + }, + "MaxResults":{ + "shape":"ResultRange", + "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. This value can be between 20 and 500. You cannot specify this parameter and the event window IDs parameter in the same call.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to request the next page of results.

" + } + }, + "documentation":"Describe instance event windows by InstanceEventWindow." + }, + "DescribeInstanceEventWindowsResult":{ + "type":"structure", + "members":{ + "InstanceEventWindows":{ + "shape":"InstanceEventWindowSet", + "documentation":"

Information about the event windows.

", + "locationName":"instanceEventWindowSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, "DescribeInstanceStatusRequest":{ "type":"structure", "members":{ @@ -15512,7 +15868,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • affinity - The affinity setting for an instance running on a Dedicated Host (default | host).

  • architecture - The instance architecture (i386 | x86_64 | arm64).

  • availability-zone - The Availability Zone of the instance.

  • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

  • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

  • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

  • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

  • block-device-mapping.volume-id - The volume ID of the EBS volume.

  • client-token - The idempotency token you provided when you launched the instance.

  • dns-name - The public DNS name of the instance.

  • group-id - The ID of the security group for the instance. EC2-Classic only.

  • group-name - The name of the security group for the instance. EC2-Classic only.

  • hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation.

  • host-id - The ID of the Dedicated Host on which the instance is running, if applicable.

  • hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors.

  • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

  • image-id - The ID of the image used to launch the instance.

  • instance-id - The ID of the instance.

  • instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled).

  • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

  • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

  • instance-type - The type of instance (for example, t2.micro).

  • instance.group-id - The ID of the security group for the instance.

  • instance.group-name - The name of the security group for the instance.

  • ip-address - The public IPv4 address of the instance.

  • kernel-id - The kernel ID.

  • key-name - The name of the key pair used when the instance was launched.

  • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

  • launch-time - The time when the instance was launched.

  • metadata-options.http-tokens - The metadata request authorization state (optional | required)

  • metadata-options.http-put-response-hop-limit - The http metadata request put response hop limit (integer, possible values 1 to 64)

  • metadata-options.http-endpoint - Enable or disable metadata access on http endpoint (enabled | disabled)

  • monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled).

  • network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface.

  • network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.

  • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface.

  • network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface.

  • network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • network-interface.attachment.attachment-id - The ID of the interface attachment.

  • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

  • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • network-interface.attachment.device-index - The device index to which the network interface is attached.

  • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

  • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

  • network-interface.availability-zone - The Availability Zone for the network interface.

  • network-interface.description - The description of the network interface.

  • network-interface.group-id - The ID of a security group associated with the network interface.

  • network-interface.group-name - The name of a security group associated with the network interface.

  • network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface.

  • network-interface.mac-address - The MAC address of the network interface.

  • network-interface.network-interface-id - The ID of the network interface.

  • network-interface.owner-id - The ID of the owner of the network interface.

  • network-interface.private-dns-name - The private DNS name of the network interface.

  • network-interface.requester-id - The requester ID for the network interface.

  • network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services.

  • network-interface.status - The status of the network interface (available) | in-use).

  • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • network-interface.subnet-id - The ID of the subnet for the network interface.

  • network-interface.vpc-id - The ID of the VPC for the network interface.

  • outpost-arn - The Amazon Resource Name (ARN) of the Outpost.

  • owner-id - The account ID of the instance owner.

  • placement-group-name - The name of the placement group for the instance.

  • placement-partition-number - The partition in which the instance is located.

  • platform - The platform. To list only Windows instances, use windows.

  • private-dns-name - The private IPv4 DNS name of the instance.

  • private-ip-address - The private IPv4 address of the instance.

  • product-code - The product code associated with the AMI used to launch the instance.

  • product-code.type - The type of product code (devpay | marketplace).

  • ramdisk-id - The RAM disk ID.

  • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

  • requester-id - The ID of the entity that launched the instance on your behalf (for example, Management Console, Auto Scaling, and so on).

  • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.

  • root-device-name - The device name of the root device volume (for example, /dev/sda1).

  • root-device-type - The type of the root device volume (ebs | instance-store).

  • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

  • spot-instance-request-id - The ID of the Spot Instance request.

  • state-reason-code - The reason code for the state change.

  • state-reason-message - A message that describes the state change.

  • subnet-id - The ID of the subnet for the instance.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

  • tenancy - The tenancy of an instance (dedicated | default | host).

  • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

  • vpc-id - The ID of the VPC that the instance is running in.

", + "documentation":"

The filters.

  • affinity - The affinity setting for an instance running on a Dedicated Host (default | host).

  • architecture - The instance architecture (i386 | x86_64 | arm64).

  • availability-zone - The Availability Zone of the instance.

  • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

  • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

  • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

  • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

  • block-device-mapping.volume-id - The volume ID of the EBS volume.

  • client-token - The idempotency token you provided when you launched the instance.

  • dns-name - The public DNS name of the instance.

  • group-id - The ID of the security group for the instance. EC2-Classic only.

  • group-name - The name of the security group for the instance. EC2-Classic only.

  • hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation.

  • host-id - The ID of the Dedicated Host on which the instance is running, if applicable.

  • hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors.

  • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

  • image-id - The ID of the image used to launch the instance.

  • instance-id - The ID of the instance.

  • instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled).

  • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

  • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

  • instance-type - The type of instance (for example, t2.micro).

  • instance.group-id - The ID of the security group for the instance.

  • instance.group-name - The name of the security group for the instance.

  • ip-address - The public IPv4 address of the instance.

  • kernel-id - The kernel ID.

  • key-name - The name of the key pair used when the instance was launched.

  • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

  • launch-time - The time when the instance was launched.

  • metadata-options.http-tokens - The metadata request authorization state (optional | required)

  • metadata-options.http-put-response-hop-limit - The http metadata request put response hop limit (integer, possible values 1 to 64)

  • metadata-options.http-endpoint - Enable or disable metadata access on http endpoint (enabled | disabled)

  • monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled).

  • network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface.

  • network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.

  • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface.

  • network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface.

  • network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • network-interface.attachment.attachment-id - The ID of the interface attachment.

  • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

  • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • network-interface.attachment.device-index - The device index to which the network interface is attached.

  • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

  • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

  • network-interface.availability-zone - The Availability Zone for the network interface.

  • network-interface.description - The description of the network interface.

  • network-interface.group-id - The ID of a security group associated with the network interface.

  • network-interface.group-name - The name of a security group associated with the network interface.

  • network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface.

  • network-interface.mac-address - The MAC address of the network interface.

  • network-interface.network-interface-id - The ID of the network interface.

  • network-interface.owner-id - The ID of the owner of the network interface.

  • network-interface.private-dns-name - The private DNS name of the network interface.

  • network-interface.requester-id - The requester ID for the network interface.

  • network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services.

  • network-interface.status - The status of the network interface (available) | in-use).

  • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • network-interface.subnet-id - The ID of the subnet for the network interface.

  • network-interface.vpc-id - The ID of the VPC for the network interface.

  • outpost-arn - The Amazon Resource Name (ARN) of the Outpost.

  • owner-id - The Amazon Web Services account ID of the instance owner.

  • placement-group-name - The name of the placement group for the instance.

  • placement-partition-number - The partition in which the instance is located.

  • platform - The platform. To list only Windows instances, use windows.

  • private-dns-name - The private IPv4 DNS name of the instance.

  • private-ip-address - The private IPv4 address of the instance.

  • product-code - The product code associated with the AMI used to launch the instance.

  • product-code.type - The type of product code (devpay | marketplace).

  • ramdisk-id - The RAM disk ID.

  • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

  • requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

  • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.

  • root-device-name - The device name of the root device volume (for example, /dev/sda1).

  • root-device-type - The type of the root device volume (ebs | instance-store).

  • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

  • spot-instance-request-id - The ID of the Spot Instance request.

  • state-reason-code - The reason code for the state change.

  • state-reason-message - A message that describes the state change.

  • subnet-id - The ID of the subnet for the instance.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

  • tenancy - The tenancy of an instance (dedicated | default | host).

  • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

  • vpc-id - The ID of the VPC that the instance is running in.

", "locationName":"Filter" }, "InstanceIds":{ @@ -15562,7 +15918,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • attachment.state - The current state of the attachment between the gateway and the VPC (available). Present only if a VPC is attached.

  • attachment.vpc-id - The ID of an attached VPC.

  • internet-gateway-id - The ID of the Internet gateway.

  • owner-id - The ID of the AWS account that owns the internet gateway.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", + "documentation":"

One or more filters.

  • attachment.state - The current state of the attachment between the gateway and the VPC (available). Present only if a VPC is attached.

  • attachment.vpc-id - The ID of an attached VPC.

  • internet-gateway-id - The ID of the Internet gateway.

  • owner-id - The ID of the Amazon Web Services account that owns the internet gateway.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", "locationName":"Filter" }, "DryRun":{ @@ -16187,7 +16543,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • association.association-id - The ID of an association ID for the ACL.

  • association.network-acl-id - The ID of the network ACL involved in the association.

  • association.subnet-id - The ID of the subnet involved in the association.

  • default - Indicates whether the ACL is the default network ACL for the VPC.

  • entry.cidr - The IPv4 CIDR range specified in the entry.

  • entry.icmp.code - The ICMP code specified in the entry, if any.

  • entry.icmp.type - The ICMP type specified in the entry, if any.

  • entry.ipv6-cidr - The IPv6 CIDR range specified in the entry.

  • entry.port-range.from - The start of the port range specified in the entry.

  • entry.port-range.to - The end of the port range specified in the entry.

  • entry.protocol - The protocol specified in the entry (tcp | udp | icmp or a protocol number).

  • entry.rule-action - Allows or denies the matching traffic (allow | deny).

  • entry.rule-number - The number of an entry (in other words, rule) in the set of ACL entries.

  • network-acl-id - The ID of the network ACL.

  • owner-id - The ID of the AWS account that owns the network ACL.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the network ACL.

", + "documentation":"

One or more filters.

  • association.association-id - The ID of an association ID for the ACL.

  • association.network-acl-id - The ID of the network ACL involved in the association.

  • association.subnet-id - The ID of the subnet involved in the association.

  • default - Indicates whether the ACL is the default network ACL for the VPC.

  • entry.cidr - The IPv4 CIDR range specified in the entry.

  • entry.icmp.code - The ICMP code specified in the entry, if any.

  • entry.icmp.type - The ICMP type specified in the entry, if any.

  • entry.ipv6-cidr - The IPv6 CIDR range specified in the entry.

  • entry.port-range.from - The start of the port range specified in the entry.

  • entry.port-range.to - The end of the port range specified in the entry.

  • entry.protocol - The protocol specified in the entry (tcp | udp | icmp or a protocol number).

  • entry.rule-action - Allows or denies the matching traffic (allow | deny).

  • entry.rule-number - The number of an entry (in other words, rule) in the set of ACL entries.

  • network-acl-id - The ID of the network ACL.

  • owner-id - The ID of the Amazon Web Services account that owns the network ACL.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the network ACL.

", "locationName":"Filter" }, "DryRun":{ @@ -16389,7 +16745,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • network-interface-permission.network-interface-permission-id - The ID of the permission.

  • network-interface-permission.network-interface-id - The ID of the network interface.

  • network-interface-permission.aws-account-id - The account ID.

  • network-interface-permission.aws-service - The Amazon Web Service.

  • network-interface-permission.permission - The type of permission (INSTANCE-ATTACH | EIP-ASSOCIATE).

", + "documentation":"

One or more filters.

  • network-interface-permission.network-interface-permission-id - The ID of the permission.

  • network-interface-permission.network-interface-id - The ID of the network interface.

  • network-interface-permission.aws-account-id - The Amazon Web Services account ID.

  • network-interface-permission.aws-service - The Amazon Web Service.

  • network-interface-permission.permission - The type of permission (INSTANCE-ATTACH | EIP-ASSOCIATE).

", "locationName":"Filter" }, "NextToken":{ @@ -16429,7 +16785,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • addresses.private-ip-address - The private IPv4 addresses associated with the network interface.

  • addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface.

  • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).

  • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

  • association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • association.public-dns-name - The public DNS name for the network interface (IPv4).

  • attachment.attachment-id - The ID of the interface attachment.

  • attachment.attach-time - The time that the network interface was attached to an instance.

  • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

  • attachment.device-index - The device index to which the network interface is attached.

  • attachment.instance-id - The ID of the instance to which the network interface is attached.

  • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • availability-zone - The Availability Zone of the network interface.

  • description - The description of the network interface.

  • group-id - The ID of a security group associated with the network interface.

  • group-name - The name of a security group associated with the network interface.

  • ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface.

  • mac-address - The MAC address of the network interface.

  • network-interface-id - The ID of the network interface.

  • owner-id - The account ID of the network interface owner.

  • private-ip-address - The private IPv4 address or addresses of the network interface.

  • private-dns-name - The private DNS name of the network interface (IPv4).

  • requester-id - The alias or account ID of the principal or service that created the network interface.

  • requester-managed - Indicates whether the network interface is being managed by an Amazon Web Service (for example, Management Console, Auto Scaling, and so on).

  • source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

  • subnet-id - The ID of the subnet for the network interface.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the network interface.

", + "documentation":"

One or more filters.

  • addresses.private-ip-address - The private IPv4 addresses associated with the network interface.

  • addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface.

  • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).

  • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

  • association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • association.public-dns-name - The public DNS name for the network interface (IPv4).

  • attachment.attachment-id - The ID of the interface attachment.

  • attachment.attach-time - The time that the network interface was attached to an instance.

  • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

  • attachment.device-index - The device index to which the network interface is attached.

  • attachment.instance-id - The ID of the instance to which the network interface is attached.

  • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • availability-zone - The Availability Zone of the network interface.

  • description - The description of the network interface.

  • group-id - The ID of a security group associated with the network interface.

  • group-name - The name of a security group associated with the network interface.

  • ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface.

  • mac-address - The MAC address of the network interface.

  • network-interface-id - The ID of the network interface.

  • owner-id - The Amazon Web Services account ID of the network interface owner.

  • private-ip-address - The private IPv4 address or addresses of the network interface.

  • private-dns-name - The private DNS name of the network interface (IPv4).

  • requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

  • requester-managed - Indicates whether the network interface is being managed by an Amazon Web Service (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

  • source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

  • subnet-id - The ID of the subnet for the network interface.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the network interface.

", "locationName":"filter" }, "DryRun":{ @@ -16916,7 +17272,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • association.route-table-association-id - The ID of an association ID for the route table.

  • association.route-table-id - The ID of the route table involved in the association.

  • association.subnet-id - The ID of the subnet involved in the association.

  • association.main - Indicates whether the route table is the main route table for the VPC (true | false). Route tables that do not have an association ID are not returned in the response.

  • owner-id - The ID of the AWS account that owns the route table.

  • route-table-id - The ID of the route table.

  • route.destination-cidr-block - The IPv4 CIDR range specified in a route in the table.

  • route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in the route table.

  • route.destination-prefix-list-id - The ID (prefix) of the AWS service specified in a route in the table.

  • route.egress-only-internet-gateway-id - The ID of an egress-only Internet gateway specified in a route in the route table.

  • route.gateway-id - The ID of a gateway specified in a route in the table.

  • route.instance-id - The ID of an instance specified in a route in the table.

  • route.nat-gateway-id - The ID of a NAT gateway.

  • route.transit-gateway-id - The ID of a transit gateway.

  • route.origin - Describes how the route was created. CreateRouteTable indicates that the route was automatically created when the route table was created; CreateRoute indicates that the route was manually added to the route table; EnableVgwRoutePropagation indicates that the route was propagated by route propagation.

  • route.state - The state of a route in the route table (active | blackhole). The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, the specified NAT instance has been terminated, and so on).

  • route.vpc-peering-connection-id - The ID of a VPC peering connection specified in a route in the table.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the route table.

", + "documentation":"

One or more filters.

  • association.route-table-association-id - The ID of an association ID for the route table.

  • association.route-table-id - The ID of the route table involved in the association.

  • association.subnet-id - The ID of the subnet involved in the association.

  • association.main - Indicates whether the route table is the main route table for the VPC (true | false). Route tables that do not have an association ID are not returned in the response.

  • owner-id - The ID of the Amazon Web Services account that owns the route table.

  • route-table-id - The ID of the route table.

  • route.destination-cidr-block - The IPv4 CIDR range specified in a route in the table.

  • route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in the route table.

  • route.destination-prefix-list-id - The ID (prefix) of the Amazon Web Service specified in a route in the table.

  • route.egress-only-internet-gateway-id - The ID of an egress-only Internet gateway specified in a route in the route table.

  • route.gateway-id - The ID of a gateway specified in a route in the table.

  • route.instance-id - The ID of an instance specified in a route in the table.

  • route.nat-gateway-id - The ID of a NAT gateway.

  • route.transit-gateway-id - The ID of a transit gateway.

  • route.origin - Describes how the route was created. CreateRouteTable indicates that the route was automatically created when the route table was created; CreateRoute indicates that the route was manually added to the route table; EnableVgwRoutePropagation indicates that the route was propagated by route propagation.

  • route.state - The state of a route in the route table (active | blackhole). The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, the specified NAT instance has been terminated, and so on).

  • route.vpc-peering-connection-id - The ID of a VPC peering connection specified in a route in the table.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the route table.

", "locationName":"Filter" }, "DryRun":{ @@ -17238,7 +17594,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • description - A description of the snapshot.

  • encrypted - Indicates whether the snapshot is encrypted (true | false)

  • owner-alias - The owner alias, from an Amazon-maintained list (amazon). This is not the user-configured AWS account alias set using the IAM console. We recommend that you use the related parameter instead of this filter.

  • owner-id - The AWS account ID of the owner. We recommend that you use the related parameter instead of this filter.

  • progress - The progress of the snapshot, as a percentage (for example, 80%).

  • snapshot-id - The snapshot ID.

  • start-time - The time stamp when the snapshot was initiated.

  • status - The status of the snapshot (pending | completed | error).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • volume-id - The ID of the volume the snapshot is for.

  • volume-size - The size of the volume, in GiB.

", + "documentation":"

The filters.

  • description - A description of the snapshot.

  • encrypted - Indicates whether the snapshot is encrypted (true | false)

  • owner-alias - The owner alias, from an Amazon-maintained list (amazon). This is not the user-configured Amazon Web Services account alias set using the IAM console. We recommend that you use the related parameter instead of this filter.

  • owner-id - The Amazon Web Services account ID of the owner. We recommend that you use the related parameter instead of this filter.

  • progress - The progress of the snapshot, as a percentage (for example, 80%).

  • snapshot-id - The snapshot ID.

  • start-time - The time stamp when the snapshot was initiated.

  • status - The status of the snapshot (pending | completed | error).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • volume-id - The ID of the volume the snapshot is for.

  • volume-size - The size of the volume, in GiB.

", "locationName":"Filter" }, "MaxResults":{ @@ -17251,12 +17607,12 @@ }, "OwnerIds":{ "shape":"OwnerStringList", - "documentation":"

Scopes the results to snapshots with the specified owners. You can specify a combination of AWS account IDs, self, and amazon.

", + "documentation":"

Scopes the results to snapshots with the specified owners. You can specify a combination of Amazon Web Services account IDs, self, and amazon.

", "locationName":"Owner" }, "RestorableByUserIds":{ "shape":"RestorableByStringList", - "documentation":"

The IDs of the AWS accounts that can create volumes from the snapshot.

", + "documentation":"

The IDs of the Amazon Web Services accounts that can create volumes from the snapshot.

", "locationName":"RestorableBy" }, "SnapshotIds":{ @@ -17695,7 +18051,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • availability-zone - The Availability Zone for the subnet. You can also use availabilityZone as the filter name.

  • availability-zone-id - The ID of the Availability Zone for the subnet. You can also use availabilityZoneId as the filter name.

  • available-ip-address-count - The number of IPv4 addresses in the subnet that are available.

  • cidr-block - The IPv4 CIDR block of the subnet. The CIDR block you specify must exactly match the subnet's CIDR block for information to be returned for the subnet. You can also use cidr or cidrBlock as the filter names.

  • default-for-az - Indicates whether this is the default subnet for the Availability Zone. You can also use defaultForAz as the filter name.

  • ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated with the subnet.

  • ipv6-cidr-block-association.association-id - An association ID for an IPv6 CIDR block associated with the subnet.

  • ipv6-cidr-block-association.state - The state of an IPv6 CIDR block associated with the subnet.

  • outpost-arn - The Amazon Resource Name (ARN) of the Outpost.

  • owner-id - The ID of the AWS account that owns the subnet.

  • state - The state of the subnet (pending | available).

  • subnet-arn - The Amazon Resource Name (ARN) of the subnet.

  • subnet-id - The ID of the subnet.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the subnet.

", + "documentation":"

One or more filters.

  • availability-zone - The Availability Zone for the subnet. You can also use availabilityZone as the filter name.

  • availability-zone-id - The ID of the Availability Zone for the subnet. You can also use availabilityZoneId as the filter name.

  • available-ip-address-count - The number of IPv4 addresses in the subnet that are available.

  • cidr-block - The IPv4 CIDR block of the subnet. The CIDR block you specify must exactly match the subnet's CIDR block for information to be returned for the subnet. You can also use cidr or cidrBlock as the filter names.

  • default-for-az - Indicates whether this is the default subnet for the Availability Zone. You can also use defaultForAz as the filter name.

  • ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated with the subnet.

  • ipv6-cidr-block-association.association-id - An association ID for an IPv6 CIDR block associated with the subnet.

  • ipv6-cidr-block-association.state - The state of an IPv6 CIDR block associated with the subnet.

  • outpost-arn - The Amazon Resource Name (ARN) of the Outpost.

  • owner-id - The ID of the Amazon Web Services account that owns the subnet.

  • state - The state of the subnet (pending | available).

  • subnet-arn - The Amazon Resource Name (ARN) of the subnet.

  • subnet-id - The ID of the subnet.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the subnet.

", "locationName":"Filter" }, "SubnetIds":{ @@ -18827,7 +19183,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • accepter-vpc-info.cidr-block - The IPv4 CIDR block of the accepter VPC.

  • accepter-vpc-info.owner-id - The AWS account ID of the owner of the accepter VPC.

  • accepter-vpc-info.vpc-id - The ID of the accepter VPC.

  • expiration-time - The expiration date and time for the VPC peering connection.

  • requester-vpc-info.cidr-block - The IPv4 CIDR block of the requester's VPC.

  • requester-vpc-info.owner-id - The AWS account ID of the owner of the requester VPC.

  • requester-vpc-info.vpc-id - The ID of the requester VPC.

  • status-code - The status of the VPC peering connection (pending-acceptance | failed | expired | provisioning | active | deleting | deleted | rejected).

  • status-message - A message that provides more information about the status of the VPC peering connection, if applicable.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-peering-connection-id - The ID of the VPC peering connection.

", + "documentation":"

One or more filters.

  • accepter-vpc-info.cidr-block - The IPv4 CIDR block of the accepter VPC.

  • accepter-vpc-info.owner-id - The ID of the Amazon Web Services account that owns the accepter VPC.

  • accepter-vpc-info.vpc-id - The ID of the accepter VPC.

  • expiration-time - The expiration date and time for the VPC peering connection.

  • requester-vpc-info.cidr-block - The IPv4 CIDR block of the requester's VPC.

  • requester-vpc-info.owner-id - The ID of the Amazon Web Services account that owns the requester VPC.

  • requester-vpc-info.vpc-id - The ID of the requester VPC.

  • status-code - The status of the VPC peering connection (pending-acceptance | failed | expired | provisioning | active | deleting | deleted | rejected).

  • status-message - A message that provides more information about the status of the VPC peering connection, if applicable.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-peering-connection-id - The ID of the VPC peering connection.

", "locationName":"Filter" }, "DryRun":{ @@ -18875,7 +19231,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • cidr - The primary IPv4 CIDR block of the VPC. The CIDR block you specify must exactly match the VPC's CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, /28).

  • cidr-block-association.cidr-block - An IPv4 CIDR block associated with the VPC.

  • cidr-block-association.association-id - The association ID for an IPv4 CIDR block associated with the VPC.

  • cidr-block-association.state - The state of an IPv4 CIDR block associated with the VPC.

  • dhcp-options-id - The ID of a set of DHCP options.

  • ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated with the VPC.

  • ipv6-cidr-block-association.ipv6-pool - The ID of the IPv6 address pool from which the IPv6 CIDR block is allocated.

  • ipv6-cidr-block-association.association-id - The association ID for an IPv6 CIDR block associated with the VPC.

  • ipv6-cidr-block-association.state - The state of an IPv6 CIDR block associated with the VPC.

  • isDefault - Indicates whether the VPC is the default VPC.

  • owner-id - The ID of the AWS account that owns the VPC.

  • state - The state of the VPC (pending | available).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC.

", + "documentation":"

One or more filters.

  • cidr - The primary IPv4 CIDR block of the VPC. The CIDR block you specify must exactly match the VPC's CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, /28).

  • cidr-block-association.cidr-block - An IPv4 CIDR block associated with the VPC.

  • cidr-block-association.association-id - The association ID for an IPv4 CIDR block associated with the VPC.

  • cidr-block-association.state - The state of an IPv4 CIDR block associated with the VPC.

  • dhcp-options-id - The ID of a set of DHCP options.

  • ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated with the VPC.

  • ipv6-cidr-block-association.ipv6-pool - The ID of the IPv6 address pool from which the IPv6 CIDR block is allocated.

  • ipv6-cidr-block-association.association-id - The association ID for an IPv6 CIDR block associated with the VPC.

  • ipv6-cidr-block-association.state - The state of an IPv6 CIDR block associated with the VPC.

  • is-default - Indicates whether the VPC is the default VPC.

  • owner-id - The ID of the Amazon Web Services account that owns the VPC.

  • state - The state of the VPC (pending | available).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC.

", "locationName":"Filter" }, "VpcIds":{ @@ -19159,7 +19515,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that owns the DHCP options set.

", + "documentation":"

The ID of the Amazon Web Services account that owns the DHCP options set.

", "locationName":"ownerId" }, "Tags":{ @@ -19312,12 +19668,12 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that enabled fast snapshot restores on the snapshot.

", + "documentation":"

The ID of the Amazon Web Services account that enabled fast snapshot restores on the snapshot.

", "locationName":"ownerId" }, "OwnerAlias":{ "shape":"String", - "documentation":"

The AWS owner alias that enabled fast snapshot restores on the snapshot. This is intended for future use.

", + "documentation":"

The Amazon Web Services owner alias that enabled fast snapshot restores on the snapshot. This is intended for future use.

", "locationName":"ownerAlias" }, "EnablingTime":{ @@ -19635,6 +19991,37 @@ } } }, + "DisassociateInstanceEventWindowRequest":{ + "type":"structure", + "required":[ + "InstanceEventWindowId", + "AssociationTarget" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "InstanceEventWindowId":{ + "shape":"InstanceEventWindowId", + "documentation":"

The ID of the event window.

" + }, + "AssociationTarget":{ + "shape":"InstanceEventWindowDisassociationRequest", + "documentation":"

One or more targets to disassociate from the specified event window.

" + } + } + }, + "DisassociateInstanceEventWindowResult":{ + "type":"structure", + "members":{ + "InstanceEventWindow":{ + "shape":"InstanceEventWindow", + "documentation":"

Information about the event window.

", + "locationName":"instanceEventWindow" + } + } + }, "DisassociateRouteTableRequest":{ "type":"structure", "required":["AssociationId"], @@ -20552,12 +20939,12 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that enabled fast snapshot restores on the snapshot.

", + "documentation":"

The ID of the Amazon Web Services account that enabled fast snapshot restores on the snapshot.

", "locationName":"ownerId" }, "OwnerAlias":{ "shape":"String", - "documentation":"

The AWS owner alias that enabled fast snapshot restores on the snapshot. This is intended for future use.

", + "documentation":"

The Amazon Web Services owner alias that enabled fast snapshot restores on the snapshot. This is intended for future use.

", "locationName":"ownerAlias" }, "EnablingTime":{ @@ -20609,7 +20996,7 @@ }, "SourceSnapshotIds":{ "shape":"SnapshotIdStringList", - "documentation":"

The IDs of one or more snapshots. For example, snap-1234567890abcdef0. You can specify a snapshot that was shared with you from another AWS account.

", + "documentation":"

The IDs of one or more snapshots. For example, snap-1234567890abcdef0. You can specify a snapshot that was shared with you from another Amazon Web Services account.

", "locationName":"SourceSnapshotId" }, "DryRun":{ @@ -22649,7 +23036,7 @@ "members":{ "KmsKeyId":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the default CMK for encryption by default.

", + "documentation":"

The Amazon Resource Name (ARN) of the default KMS key for encryption by default.

", "locationName":"kmsKeyId" } } @@ -23027,6 +23414,58 @@ } } }, + "GetSubnetCidrReservationsMaxResults":{ + "type":"integer", + "max":1000, + "min":5 + }, + "GetSubnetCidrReservationsRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "Filters":{ + "shape":"FilterList", + "documentation":"

One or more filters.

  • reservationType - The type of reservation (prefix | explicit).

  • subnet-id - The ID of the subnet.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", + "locationName":"Filter" + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

The ID of the subnet.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token for the next page of results.

" + }, + "MaxResults":{ + "shape":"GetSubnetCidrReservationsMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + } + } + }, + "GetSubnetCidrReservationsResult":{ + "type":"structure", + "members":{ + "SubnetIpv4CidrReservations":{ + "shape":"SubnetCidrReservationList", + "documentation":"

Information about the IPv4 subnet CIDR reservations.

", + "locationName":"subnetIpv4CidrReservationSet" + }, + "SubnetIpv6CidrReservations":{ + "shape":"SubnetCidrReservationList", + "documentation":"

Information about the IPv6 subnet CIDR reservations.

", + "locationName":"subnetIpv6CidrReservationSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, "GetTransitGatewayAttachmentPropagationsRequest":{ "type":"structure", "required":["TransitGatewayAttachmentId"], @@ -23504,7 +23943,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the account that owns the Dedicated Host.

", + "documentation":"

The ID of the Amazon Web Services account that owns the Dedicated Host.

", "locationName":"ownerId" }, "AvailabilityZoneId":{ @@ -23535,7 +23974,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the account that owns the instance.

", + "documentation":"

The ID of the Amazon Web Services account that owns the instance.

", "locationName":"ownerId" } }, @@ -23739,6 +24178,11 @@ "host" ] }, + "Hour":{ + "type":"integer", + "max":23, + "min":0 + }, "HttpTokensState":{ "type":"string", "enum":[ @@ -25476,6 +25920,210 @@ "documentation":"

Describes the credit option for CPU usage of a burstable performance instance.

" }, "InstanceEventId":{"type":"string"}, + "InstanceEventWindow":{ + "type":"structure", + "members":{ + "InstanceEventWindowId":{ + "shape":"InstanceEventWindowId", + "documentation":"

The ID of the event window.

", + "locationName":"instanceEventWindowId" + }, + "TimeRanges":{ + "shape":"InstanceEventWindowTimeRangeList", + "documentation":"

One or more time ranges defined for the event window.

", + "locationName":"timeRangeSet" + }, + "Name":{ + "shape":"String", + "documentation":"

The name of the event window.

", + "locationName":"name" + }, + "CronExpression":{ + "shape":"InstanceEventWindowCronExpression", + "documentation":"

The cron expression defined for the event window.

", + "locationName":"cronExpression" + }, + "AssociationTarget":{ + "shape":"InstanceEventWindowAssociationTarget", + "documentation":"

One or more targets associated with the event window.

", + "locationName":"associationTarget" + }, + "State":{ + "shape":"InstanceEventWindowState", + "documentation":"

The current state of the event window.

", + "locationName":"state" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The instance tags associated with the event window.

", + "locationName":"tagSet" + } + }, + "documentation":"

The event window.

" + }, + "InstanceEventWindowAssociationRequest":{ + "type":"structure", + "members":{ + "InstanceIds":{ + "shape":"InstanceIdList", + "documentation":"

The IDs of the instances to associate with the event window. If the instance is on a Dedicated Host, you can't specify the Instance ID parameter; you must use the Dedicated Host ID parameter.

", + "locationName":"InstanceId" + }, + "InstanceTags":{ + "shape":"TagList", + "documentation":"

The instance tags to associate with the event window. Any instances associated with the tags will be associated with the event window.

", + "locationName":"InstanceTag" + }, + "DedicatedHostIds":{ + "shape":"DedicatedHostIdList", + "documentation":"

The IDs of the Dedicated Hosts to associate with the event window.

", + "locationName":"DedicatedHostId" + } + }, + "documentation":"

One or more targets associated with the specified event window. Only one type of target (instance ID, instance tag, or Dedicated Host ID) can be associated with an event window.

" + }, + "InstanceEventWindowAssociationTarget":{ + "type":"structure", + "members":{ + "InstanceIds":{ + "shape":"InstanceIdList", + "documentation":"

The IDs of the instances associated with the event window.

", + "locationName":"instanceIdSet" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The instance tags associated with the event window. Any instances associated with the tags will be associated with the event window.

", + "locationName":"tagSet" + }, + "DedicatedHostIds":{ + "shape":"DedicatedHostIdList", + "documentation":"

The IDs of the Dedicated Hosts associated with the event window.

", + "locationName":"dedicatedHostIdSet" + } + }, + "documentation":"

One or more targets associated with the event window.

" + }, + "InstanceEventWindowCronExpression":{"type":"string"}, + "InstanceEventWindowDisassociationRequest":{ + "type":"structure", + "members":{ + "InstanceIds":{ + "shape":"InstanceIdList", + "documentation":"

The IDs of the instances to disassociate from the event window.

", + "locationName":"InstanceId" + }, + "InstanceTags":{ + "shape":"TagList", + "documentation":"

The instance tags to disassociate from the event window. Any instances associated with the tags will be disassociated from the event window.

", + "locationName":"InstanceTag" + }, + "DedicatedHostIds":{ + "shape":"DedicatedHostIdList", + "documentation":"

The IDs of the Dedicated Hosts to disassociate from the event window.

", + "locationName":"DedicatedHostId" + } + }, + "documentation":"

The targets to disassociate from the specified event window.

" + }, + "InstanceEventWindowId":{"type":"string"}, + "InstanceEventWindowIdSet":{ + "type":"list", + "member":{ + "shape":"InstanceEventWindowId", + "locationName":"InstanceEventWindowId" + } + }, + "InstanceEventWindowSet":{ + "type":"list", + "member":{ + "shape":"InstanceEventWindow", + "locationName":"item" + } + }, + "InstanceEventWindowState":{ + "type":"string", + "enum":[ + "creating", + "deleting", + "active", + "deleted" + ] + }, + "InstanceEventWindowStateChange":{ + "type":"structure", + "members":{ + "InstanceEventWindowId":{ + "shape":"InstanceEventWindowId", + "documentation":"

The ID of the event window.

", + "locationName":"instanceEventWindowId" + }, + "State":{ + "shape":"InstanceEventWindowState", + "documentation":"

The current state of the event window.

", + "locationName":"state" + } + }, + "documentation":"

The state of the event window.

" + }, + "InstanceEventWindowTimeRange":{ + "type":"structure", + "members":{ + "StartWeekDay":{ + "shape":"WeekDay", + "documentation":"

The day on which the time range begins.

", + "locationName":"startWeekDay" + }, + "StartHour":{ + "shape":"Hour", + "documentation":"

The hour when the time range begins.

", + "locationName":"startHour" + }, + "EndWeekDay":{ + "shape":"WeekDay", + "documentation":"

The day on which the time range ends.

", + "locationName":"endWeekDay" + }, + "EndHour":{ + "shape":"Hour", + "documentation":"

The hour when the time range ends.

", + "locationName":"endHour" + } + }, + "documentation":"

The start day and time and the end day and time of the time range, in UTC.

" + }, + "InstanceEventWindowTimeRangeList":{ + "type":"list", + "member":{ + "shape":"InstanceEventWindowTimeRange", + "locationName":"item" + } + }, + "InstanceEventWindowTimeRangeRequest":{ + "type":"structure", + "members":{ + "StartWeekDay":{ + "shape":"WeekDay", + "documentation":"

The day on which the time range begins.

" + }, + "StartHour":{ + "shape":"Hour", + "documentation":"

The hour when the time range begins.

" + }, + "EndWeekDay":{ + "shape":"WeekDay", + "documentation":"

The day on which the time range ends.

" + }, + "EndHour":{ + "shape":"Hour", + "documentation":"

The hour when the time range ends.

" + } + }, + "documentation":"

The start day and time and the end day and time of the time range, in UTC.

" + }, + "InstanceEventWindowTimeRangeRequestSet":{ + "type":"list", + "member":{"shape":"InstanceEventWindowTimeRangeRequest"} + }, "InstanceExportDetails":{ "type":"structure", "members":{ @@ -25516,6 +26164,13 @@ ] }, "InstanceId":{"type":"string"}, + "InstanceIdList":{ + "type":"list", + "member":{ + "shape":"InstanceId", + "locationName":"item" + } + }, "InstanceIdSet":{ "type":"list", "member":{ @@ -25545,6 +26200,24 @@ "terminate" ] }, + "InstanceIpv4Prefix":{ + "type":"structure", + "members":{ + "Ipv4Prefix":{ + "shape":"String", + "documentation":"

One or more IPv4 delegated prefixes assigned to the network interface.

", + "locationName":"ipv4Prefix" + } + }, + "documentation":"

Information about an IPv4 delegated prefix.

" + }, + "InstanceIpv4PrefixList":{ + "type":"list", + "member":{ + "shape":"InstanceIpv4Prefix", + "locationName":"item" + } + }, "InstanceIpv6Address":{ "type":"structure", "members":{ @@ -25580,6 +26253,24 @@ }, "documentation":"

Describes an IPv6 address.

" }, + "InstanceIpv6Prefix":{ + "type":"structure", + "members":{ + "Ipv6Prefix":{ + "shape":"String", + "documentation":"

One or more IPv6 delegated prefixes assigned to the network interface.

", + "locationName":"ipv6Prefix" + } + }, + "documentation":"

Information about an IPv6 delegated prefix.

" + }, + "InstanceIpv6PrefixList":{ + "type":"list", + "member":{ + "shape":"InstanceIpv6Prefix", + "locationName":"item" + } + }, "InstanceLifecycle":{ "type":"string", "enum":[ @@ -25743,7 +26434,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the account that created the network interface.

", + "documentation":"

The ID of the Amazon Web Services account that created the network interface.

", "locationName":"ownerId" }, "PrivateDnsName":{ @@ -25785,6 +26476,16 @@ "shape":"String", "documentation":"

Describes the type of network interface.

Valid values: interface | efa | trunk

", "locationName":"interfaceType" + }, + "Ipv4Prefixes":{ + "shape":"InstanceIpv4PrefixList", + "documentation":"

The IPv4 delegated prefixes that are assigned to the network interface.

", + "locationName":"ipv4PrefixSet" + }, + "Ipv6Prefixes":{ + "shape":"InstanceIpv6PrefixList", + "documentation":"

The IPv6 delegated prefixes that are assigned to the network interface.

", + "locationName":"ipv6PrefixSet" } }, "documentation":"

Describes a network interface.

" @@ -25934,6 +26635,24 @@ "NetworkCardIndex":{ "shape":"Integer", "documentation":"

The index of the network card. Some instance types support multiple network cards. The primary network interface must be assigned to network card index 0. The default is network card index 0.

" + }, + "Ipv4Prefixes":{ + "shape":"Ipv4PrefixList", + "documentation":"

One or more IPv4 delegated prefixes to be assigned to the network interface. You cannot use this option if you use the Ipv4PrefixCount option.

", + "locationName":"Ipv4Prefix" + }, + "Ipv4PrefixCount":{ + "shape":"Integer", + "documentation":"

The number of IPv4 delegated prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv4Prefix option.

" + }, + "Ipv6Prefixes":{ + "shape":"Ipv6PrefixList", + "documentation":"

One or more IPv6 delegated prefixes to be assigned to the network interface. You cannot use this option if you use the Ipv6PrefixCount option.

", + "locationName":"Ipv6Prefix" + }, + "Ipv6PrefixCount":{ + "shape":"Integer", + "documentation":"

The number of IPv6 delegated prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv6Prefix option.

" } }, "documentation":"

Describes a network interface.

" @@ -26465,6 +27184,8 @@ "g3.8xlarge", "g3.16xlarge", "g3s.xlarge", + "g4ad.xlarge", + "g4ad.2xlarge", "g4ad.4xlarge", "g4ad.8xlarge", "g4ad.16xlarge", @@ -26815,12 +27536,12 @@ "members":{ "AccountId":{ "shape":"String", - "documentation":"

The ID of the account that is making use of the Capacity Reservation.

", + "documentation":"

The ID of the Amazon Web Services account that is making use of the Capacity Reservation.

", "locationName":"accountId" }, "UsedInstanceCount":{ "shape":"Integer", - "documentation":"

The number of instances the account currently has in the Capacity Reservation.

", + "documentation":"

The number of instances the Amazon Web Services account currently has in the Capacity Reservation.

", "locationName":"usedInstanceCount" } }, @@ -26874,7 +27595,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that owns the internet gateway.

", + "documentation":"

The ID of the Amazon Web Services account that owns the internet gateway.

", "locationName":"ownerId" }, "Tags":{ @@ -26984,6 +27705,13 @@ "locationName":"item" } }, + "IpPrefixList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, "IpRange":{ "type":"structure", "members":{ @@ -27015,6 +27743,59 @@ } }, "Ipv4PoolEc2Id":{"type":"string"}, + "Ipv4PrefixList":{ + "type":"list", + "member":{ + "shape":"Ipv4PrefixSpecificationRequest", + "locationName":"item" + } + }, + "Ipv4PrefixListResponse":{ + "type":"list", + "member":{ + "shape":"Ipv4PrefixSpecificationResponse", + "locationName":"item" + } + }, + "Ipv4PrefixSpecification":{ + "type":"structure", + "members":{ + "Ipv4Prefix":{ + "shape":"String", + "documentation":"

The IPv4 Prefix Delegation prefix. For information, see Prefix Delegation in the Amazon Elastic Compute Cloud User Guide.

", + "locationName":"ipv4Prefix" + } + }, + "documentation":"

Describes an IPv4 Prefix Delegation.

" + }, + "Ipv4PrefixSpecificationRequest":{ + "type":"structure", + "members":{ + "Ipv4Prefix":{ + "shape":"String", + "documentation":"

The IPv4 Prefix Delegation prefix. For information, see Prefix Delegation in the Amazon Elastic Compute Cloud User Guide.

" + } + }, + "documentation":"

Describes the IPv4 Prefix Delegation option for a network interface.

" + }, + "Ipv4PrefixSpecificationResponse":{ + "type":"structure", + "members":{ + "Ipv4Prefix":{ + "shape":"String", + "documentation":"

One or more IPv4 delegated prefixes assigned to the network interface.

", + "locationName":"ipv4Prefix" + } + }, + "documentation":"

Information about the IPv4 delegated prefixes assigned to a network interface.

" + }, + "Ipv4PrefixesList":{ + "type":"list", + "member":{ + "shape":"Ipv4PrefixSpecification", + "locationName":"item" + } + }, "Ipv6Address":{"type":"string"}, "Ipv6AddressList":{ "type":"list", @@ -27111,6 +27892,59 @@ "locationName":"item" } }, + "Ipv6PrefixList":{ + "type":"list", + "member":{ + "shape":"Ipv6PrefixSpecificationRequest", + "locationName":"item" + } + }, + "Ipv6PrefixListResponse":{ + "type":"list", + "member":{ + "shape":"Ipv6PrefixSpecificationResponse", + "locationName":"item" + } + }, + "Ipv6PrefixSpecification":{ + "type":"structure", + "members":{ + "Ipv6Prefix":{ + "shape":"String", + "documentation":"

The IPv6 Prefix Delegation prefix.

", + "locationName":"ipv6Prefix" + } + }, + "documentation":"

Describes the IPv6 Prefix Delegation.

" + }, + "Ipv6PrefixSpecificationRequest":{ + "type":"structure", + "members":{ + "Ipv6Prefix":{ + "shape":"String", + "documentation":"

The IPv6 Prefix Delegation prefix.

" + } + }, + "documentation":"

Describes the IPv4 Prefix Delegation option for a network interface.

" + }, + "Ipv6PrefixSpecificationResponse":{ + "type":"structure", + "members":{ + "Ipv6Prefix":{ + "shape":"String", + "documentation":"

One or more IPv6 delegated prefixes assigned to the network interface.

", + "locationName":"ipv6Prefix" + } + }, + "documentation":"

Information about the IPv6 delegated prefixes assigned to a network interface.

" + }, + "Ipv6PrefixesList":{ + "type":"list", + "member":{ + "shape":"Ipv6PrefixSpecification", + "locationName":"item" + } + }, "Ipv6Range":{ "type":"structure", "members":{ @@ -27585,7 +28419,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The ARN of the AWS Key Management Service (AWS KMS) CMK used for encryption.

", + "documentation":"

The ARN of the Key Management Service (KMS) CMK used for encryption.

", "locationName":"kmsKeyId" }, "SnapshotId":{ @@ -27628,7 +28462,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The ARN of the symmetric AWS Key Management Service (AWS KMS) CMK used for encryption.

" + "documentation":"

The ARN of the symmetric Key Management Service (KMS) CMK used for encryption.

" }, "SnapshotId":{ "shape":"SnapshotId", @@ -27703,21 +28537,21 @@ "members":{ "Enabled":{ "shape":"Boolean", - "documentation":"

If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves; otherwise, it is not enabled for AWS Nitro Enclaves.

", + "documentation":"

If this parameter is set to true, the instance is enabled for Amazon Web Services Nitro Enclaves; otherwise, it is not enabled for Amazon Web Services Nitro Enclaves.

", "locationName":"enabled" } }, - "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves.

" + "documentation":"

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.

" }, "LaunchTemplateEnclaveOptionsRequest":{ "type":"structure", "members":{ "Enabled":{ "shape":"Boolean", - "documentation":"

To enable the instance for AWS Nitro Enclaves, set this parameter to true.

" + "documentation":"

To enable the instance for Amazon Web Services Nitro Enclaves, set this parameter to true.

" } }, - "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves. For more information, see What is AWS Nitro Enclaves? in the AWS Nitro Enclaves User Guide.

" + "documentation":"

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide.

" }, "LaunchTemplateErrorCode":{ "type":"string", @@ -27889,7 +28723,7 @@ "members":{ "AssociateCarrierIpAddress":{ "shape":"Boolean", - "documentation":"

Indicates whether to associate a Carrier IP address with eth0 for a new network interface.

Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. For more information about Carrier IP addresses, see Carrier IP addresses in the AWS Wavelength Developer Guide.

", + "documentation":"

Indicates whether to associate a Carrier IP address with eth0 for a new network interface.

Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. For more information about Carrier IP addresses, see Carrier IP addresses in the Wavelength Developer Guide.

", "locationName":"associateCarrierIpAddress" }, "AssociatePublicIpAddress":{ @@ -27961,6 +28795,26 @@ "shape":"Integer", "documentation":"

The index of the network card.

", "locationName":"networkCardIndex" + }, + "Ipv4Prefixes":{ + "shape":"Ipv4PrefixListResponse", + "documentation":"

One or more IPv4 delegated prefixes assigned to the network interface.

", + "locationName":"ipv4PrefixSet" + }, + "Ipv4PrefixCount":{ + "shape":"Integer", + "documentation":"

The number of IPv4 delegated prefixes that AWS automatically assigned to the network interface.

", + "locationName":"ipv4PrefixCount" + }, + "Ipv6Prefixes":{ + "shape":"Ipv6PrefixListResponse", + "documentation":"

One or more IPv6 delegated prefixes assigned to the network interface.

", + "locationName":"ipv6PrefixSet" + }, + "Ipv6PrefixCount":{ + "shape":"Integer", + "documentation":"

The number of IPv6 delegated prefixes that AWS automatically assigned to the network interface.

", + "locationName":"ipv6PrefixCount" } }, "documentation":"

Describes a network interface.

" @@ -27977,7 +28831,7 @@ "members":{ "AssociateCarrierIpAddress":{ "shape":"Boolean", - "documentation":"

Associates a Carrier IP address with eth0 for a new network interface.

Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. For more information about Carrier IP addresses, see Carrier IP addresses in the AWS Wavelength Developer Guide.

" + "documentation":"

Associates a Carrier IP address with eth0 for a new network interface.

Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. For more information about Carrier IP addresses, see Carrier IP addresses in the Wavelength Developer Guide.

" }, "AssociatePublicIpAddress":{ "shape":"Boolean", @@ -28035,6 +28889,24 @@ "NetworkCardIndex":{ "shape":"Integer", "documentation":"

The index of the network card. Some instance types support multiple network cards. The primary network interface must be assigned to network card index 0. The default is network card index 0.

" + }, + "Ipv4Prefixes":{ + "shape":"Ipv4PrefixList", + "documentation":"

One or more IPv4 delegated prefixes to be assigned to the network interface. You cannot use this option if you use the Ipv4PrefixCount option.

", + "locationName":"Ipv4Prefix" + }, + "Ipv4PrefixCount":{ + "shape":"Integer", + "documentation":"

The number of IPv4 delegated prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv4Prefix option.

" + }, + "Ipv6Prefixes":{ + "shape":"Ipv6PrefixList", + "documentation":"

One or more IPv6 delegated prefixes to be assigned to the network interface. You cannot use this option if you use the Ipv6PrefixCount option.

", + "locationName":"Ipv6Prefix" + }, + "Ipv6PrefixCount":{ + "shape":"Integer", + "documentation":"

The number of IPv6 delegated prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv6Prefix option.

" } }, "documentation":"

The parameters for a network interface.

" @@ -29276,7 +30148,7 @@ "members":{ "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

Amazon EBS does not support asymmetric CMKs.

" + "documentation":"

The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the KMS key using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

Amazon EBS does not support asymmetric KMS keys.

" }, "DryRun":{ "shape":"Boolean", @@ -29289,7 +30161,7 @@ "members":{ "KmsKeyId":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the default CMK for encryption by default.

", + "documentation":"

The Amazon Resource Name (ARN) of the default KMS key for encryption by default.

", "locationName":"kmsKeyId" } } @@ -29715,6 +30587,43 @@ } } }, + "ModifyInstanceEventWindowRequest":{ + "type":"structure", + "required":["InstanceEventWindowId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "Name":{ + "shape":"String", + "documentation":"

The name of the event window.

" + }, + "InstanceEventWindowId":{ + "shape":"InstanceEventWindowId", + "documentation":"

The ID of the event window.

" + }, + "TimeRanges":{ + "shape":"InstanceEventWindowTimeRangeRequestSet", + "documentation":"

The time ranges of the event window.

", + "locationName":"TimeRange" + }, + "CronExpression":{ + "shape":"InstanceEventWindowCronExpression", + "documentation":"

The cron expression of the event window, for example, * 0-4,20-23 * * 1,5.

Constraints:

  • Only hour and day of the week values are supported.

  • For day of the week values, you can specify either integers 0 through 6, or alternative single values SUN through SAT.

  • The minute, month, and year must be specified by *.

  • The hour value must be one or a multiple range, for example, 0-4 or 0-4,20-23.

  • Each hour range must be >= 2 hours, for example, 0-2 or 20-23.

  • The event window must be >= 4 hours. The combined total time ranges in the event window must be >= 4 hours.

For more information about cron expressions, see cron on the Wikipedia website.

" + } + } + }, + "ModifyInstanceEventWindowResult":{ + "type":"structure", + "members":{ + "InstanceEventWindow":{ + "shape":"InstanceEventWindow", + "documentation":"

Information about the event window.

", + "locationName":"instanceEventWindow" + } + } + }, "ModifyInstanceMetadataOptionsRequest":{ "type":"structure", "required":["InstanceId"], @@ -30448,19 +31357,19 @@ }, "Size":{ "shape":"Integer", - "documentation":"

The target size of the volume, in GiB. The target volume size must be greater than or equal to the existing size of the volume.

The following are the supported volumes sizes for each volume type:

  • gp2 and gp3: 1-16,384

  • io1 and io2: 4-16,384

  • st1 and sc1: 125-16,384

  • standard: 1-1,024

Default: If no size is specified, the existing size is retained.

" + "documentation":"

The target size of the volume, in GiB. The target volume size must be greater than or equal to the existing size of the volume.

The following are the supported volumes sizes for each volume type:

  • gp2 and gp3: 1-16,384

  • io1 and io2: 4-16,384

  • st1 and sc1: 125-16,384

  • standard: 1-1,024

Default: The existing size is retained.

" }, "VolumeType":{ "shape":"VolumeType", - "documentation":"

The target EBS volume type of the volume. For more information, see Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide.

Default: If no type is specified, the existing type is retained.

" + "documentation":"

The target EBS volume type of the volume. For more information, see Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide.

Default: The existing type is retained.

" }, "Iops":{ "shape":"Integer", - "documentation":"

The target IOPS rate of the volume. This parameter is valid only for gp3, io1, and io2 volumes.

The following are the supported values for each volume type:

  • gp3: 3,000-16,000 IOPS

  • io1: 100-64,000 IOPS

  • io2: 100-64,000 IOPS

Default: If no IOPS value is specified, the existing value is retained, unless a volume type is modified that supports different values.

" + "documentation":"

The target IOPS rate of the volume. This parameter is valid only for gp3, io1, and io2 volumes.

The following are the supported values for each volume type:

  • gp3: 3,000-16,000 IOPS

  • io1: 100-64,000 IOPS

  • io2: 100-64,000 IOPS

Default: The existing value is retained if you keep the same volume type. If you change the volume type to io1, io2, or gp3, the default is 3,000.

" }, "Throughput":{ "shape":"Integer", - "documentation":"

The target throughput of the volume, in MiB/s. This parameter is valid only for gp3 volumes. The maximum value is 1,000.

Default: If no throughput value is specified, the existing value is retained.

Valid Range: Minimum value of 125. Maximum value of 1000.

" + "documentation":"

The target throughput of the volume, in MiB/s. This parameter is valid only for gp3 volumes. The maximum value is 1,000.

Default: The existing value is retained if the source and target volume type is gp3. Otherwise, the default value is 125.

Valid Range: Minimum value of 125. Maximum value of 1000.

" }, "MultiAttachEnabled":{ "shape":"Boolean", @@ -31249,7 +32158,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that owns the network ACL.

", + "documentation":"

The ID of the Amazon Web Services account that owns the network ACL.

", "locationName":"ownerId" } }, @@ -31462,7 +32371,7 @@ }, "FilterInArns":{ "shape":"ArnList", - "documentation":"

The Amazon Resource Names (ARN) of the AWS resources that the path must traverse.

", + "documentation":"

The Amazon Resource Names (ARN) of the Amazon Web Services resources that the path must traverse.

", "locationName":"filterInArnSet" }, "StartDate":{ @@ -31553,22 +32462,22 @@ }, "Source":{ "shape":"String", - "documentation":"

The AWS resource that is the source of the path.

", + "documentation":"

The Amazon Web Services resource that is the source of the path.

", "locationName":"source" }, "Destination":{ "shape":"String", - "documentation":"

The AWS resource that is the destination of the path.

", + "documentation":"

The Amazon Web Services resource that is the destination of the path.

", "locationName":"destination" }, "SourceIp":{ "shape":"IpAddress", - "documentation":"

The IP address of the AWS resource that is the source of the path.

", + "documentation":"

The IP address of the Amazon Web Services resource that is the source of the path.

", "locationName":"sourceIp" }, "DestinationIp":{ "shape":"IpAddress", - "documentation":"

The IP address of the AWS resource that is the destination of the path.

", + "documentation":"

The IP address of the Amazon Web Services resource that is the destination of the path.

", "locationName":"destinationIp" }, "Protocol":{ @@ -31659,7 +32568,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The account ID of the owner of the network interface.

", + "documentation":"

The Amazon Web Services account ID of the owner of the network interface.

", "locationName":"ownerId" }, "PrivateDnsName":{ @@ -31677,9 +32586,19 @@ "documentation":"

The private IPv4 addresses associated with the network interface.

", "locationName":"privateIpAddressesSet" }, + "Ipv4Prefixes":{ + "shape":"Ipv4PrefixesList", + "documentation":"

The IPv4 Prefix Delegation prefixes that are assigned to the network interface.

", + "locationName":"ipv4PrefixSet" + }, + "Ipv6Prefixes":{ + "shape":"Ipv6PrefixesList", + "documentation":"

The IPv6 Prefix Delegation prefixes that are assigned to the network interface.

", + "locationName":"ipv6PrefixSet" + }, "RequesterId":{ "shape":"String", - "documentation":"

The alias or account ID of the principal or service that created the network interface.

", + "documentation":"

The alias or Amazon Web Services account ID of the principal or service that created the network interface.

", "locationName":"requesterId" }, "RequesterManaged":{ @@ -31791,7 +32710,7 @@ }, "InstanceOwnerId":{ "shape":"String", - "documentation":"

The account ID of the owner of the instance.

", + "documentation":"

The Amazon Web Services account ID of the owner of the instance.

", "locationName":"instanceOwnerId" }, "Status":{ @@ -31884,7 +32803,7 @@ }, "AwsAccountId":{ "shape":"String", - "documentation":"

The account ID.

", + "documentation":"

The Amazon Web Services account ID.

", "locationName":"awsAccountId" }, "AwsService":{ @@ -33661,7 +34580,7 @@ }, "UserId":{ "shape":"String", - "documentation":"

The account ID.

", + "documentation":"

The Amazon Web Services account ID.

", "locationName":"userId" }, "VpcId":{ @@ -34642,15 +35561,15 @@ }, "HibernationOptions":{ "shape":"LaunchTemplateHibernationOptionsRequest", - "documentation":"

Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide.

" }, "MetadataOptions":{ "shape":"LaunchTemplateInstanceMetadataOptionsRequest", - "documentation":"

The metadata options for the instance. For more information, see Instance Metadata and User Data in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon Elastic Compute Cloud User Guide.

" }, "EnclaveOptions":{ "shape":"LaunchTemplateEnclaveOptionsRequest", - "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves. For more information, see What is AWS Nitro Enclaves? in the AWS Nitro Enclaves User Guide.

You can't enable AWS Nitro Enclaves and hibernation on the same instance.

" + "documentation":"

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide.

You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance.

" } }, "documentation":"

The information to include in the launch template.

" @@ -34878,12 +35797,12 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the account that owns the reservation.

", + "documentation":"

The ID of the Amazon Web Services account that owns the reservation.

", "locationName":"ownerId" }, "RequesterId":{ "shape":"String", - "documentation":"

The ID of the requester that launched the instances on your behalf (for example, Management Console or Auto Scaling).

", + "documentation":"

The ID of the requester that launched the instances on your behalf (for example, Amazon Web Services Management Console or Auto Scaling).

", "locationName":"requesterId" }, "ReservationId":{ @@ -35446,7 +36365,7 @@ "members":{ "KmsKeyId":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the default CMK for EBS encryption by default.

", + "documentation":"

The Amazon Resource Name (ARN) of the default KMS key for EBS encryption by default.

", "locationName":"kmsKeyId" } } @@ -35613,6 +36532,7 @@ "import-image-task", "import-snapshot-task", "instance", + "instance-event-window", "internet-gateway", "key-pair", "launch-template", @@ -35787,7 +36707,7 @@ }, "CpuOptions":{ "shape":"LaunchTemplateCpuOptions", - "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU options in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"cpuOptions" }, "CapacityReservationSpecification":{ @@ -35802,17 +36722,17 @@ }, "HibernationOptions":{ "shape":"LaunchTemplateHibernationOptions", - "documentation":"

Indicates whether an instance is configured for hibernation. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Indicates whether an instance is configured for hibernation. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"hibernationOptions" }, "MetadataOptions":{ "shape":"LaunchTemplateInstanceMetadataOptions", - "documentation":"

The metadata options for the instance. For more information, see Instance Metadata and User Data in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"metadataOptions" }, "EnclaveOptions":{ "shape":"LaunchTemplateEnclaveOptions", - "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves.

", + "documentation":"

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.

", "locationName":"enclaveOptions" } }, @@ -35889,6 +36809,11 @@ } } }, + "ResultRange":{ + "type":"integer", + "max":500, + "min":20 + }, "RevokeClientVpnIngressRequest":{ "type":"structure", "required":[ @@ -36094,7 +37019,7 @@ }, "DestinationPrefixListId":{ "shape":"String", - "documentation":"

The prefix of the AWS service.

", + "documentation":"

The prefix of the Amazon Web Service.

", "locationName":"destinationPrefixListId" }, "EgressOnlyInternetGatewayId":{ @@ -36114,7 +37039,7 @@ }, "InstanceOwnerId":{ "shape":"String", - "documentation":"

The AWS account ID of the owner of the instance.

", + "documentation":"

The ID of Amazon Web Services account that owns the instance.

", "locationName":"instanceOwnerId" }, "NatGatewayId":{ @@ -36218,7 +37143,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that owns the route table.

", + "documentation":"

The ID of the Amazon Web Services account that owns the route table.

", "locationName":"ownerId" } }, @@ -37340,7 +38265,7 @@ }, "GroupOwnerId":{ "shape":"String", - "documentation":"

The ID of the account that owns the security group.

", + "documentation":"

The ID of the Amazon Web Services account that owns the security group.

", "locationName":"groupOwnerId" }, "IsEgress":{ @@ -37774,12 +38699,12 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the parent volume.

", + "documentation":"

The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key that was used to protect the volume encryption key for the parent volume.

", "locationName":"kmsKeyId" }, "OwnerId":{ "shape":"String", - "documentation":"

The AWS account ID of the EBS snapshot owner.

", + "documentation":"

The ID of the Amazon Web Services account that owns the EBS snapshot.

", "locationName":"ownerId" }, "Progress":{ @@ -37804,7 +38729,7 @@ }, "StateMessage":{ "shape":"String", - "documentation":"

Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by DescribeSnapshots.

", + "documentation":"

Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper Key Management Service (KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by DescribeSnapshots.

", "locationName":"statusMessage" }, "VolumeId":{ @@ -37819,12 +38744,12 @@ }, "OwnerAlias":{ "shape":"String", - "documentation":"

The AWS owner alias, from an Amazon-maintained list (amazon). This is not the user-configured AWS account alias set using the IAM console.

", + "documentation":"

The Amazon Web Services owner alias, from an Amazon-maintained list (amazon). This is not the user-configured Amazon Web Services account alias set using the IAM console.

", "locationName":"ownerAlias" }, "OutpostArn":{ "shape":"String", - "documentation":"

The ARN of the AWS Outpost on which the snapshot is stored. For more information, see EBS Local Snapshot on Outposts in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

The ARN of the Outpost on which the snapshot is stored. For more information, see Amazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"outpostArn" }, "Tags":{ @@ -37990,7 +38915,7 @@ }, "OutpostArn":{ "shape":"String", - "documentation":"

The ARN of the AWS Outpost on which the snapshot is stored. For more information, see EBS Local Snapshot on Outposts in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

The ARN of the Outpost on which the snapshot is stored. For more information, see Amazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"outpostArn" } }, @@ -38114,7 +39039,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The account ID of the account.

", + "documentation":"

The Amazon Web Services account ID of the account.

", "locationName":"ownerId" }, "Prefix":{ @@ -38648,7 +39573,7 @@ }, "BlockDurationMinutes":{ "shape":"Integer", - "documentation":"

The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

The duration period starts as soon as your Spot Instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates.

You can't specify an Availability Zone group or a launch group if you specify a duration.

New accounts or accounts with no previous billing history with Amazon Web Services are not eligible for Spot Instances with a defined duration (also known as Spot blocks).

" + "documentation":"

Deprecated.

" }, "ValidUntil":{ "shape":"DateTime", @@ -38948,7 +39873,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "idempotencyToken":true } } @@ -39219,7 +40144,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that owns the subnet.

", + "documentation":"

The ID of the Amazon Web Services account that owns the subnet.

", "locationName":"ownerId" }, "AssignIpv6AddressOnCreation":{ @@ -39301,6 +40226,62 @@ "failed" ] }, + "SubnetCidrReservation":{ + "type":"structure", + "members":{ + "SubnetCidrReservationId":{ + "shape":"SubnetCidrReservationId", + "documentation":"

The ID of the subnet CIDR reservation.

", + "locationName":"subnetCidrReservationId" + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

The ID of the subnet.

", + "locationName":"subnetId" + }, + "Cidr":{ + "shape":"String", + "documentation":"

The CIDR that has been reserved.

", + "locationName":"cidr" + }, + "ReservationType":{ + "shape":"SubnetCidrReservationType", + "documentation":"

The type of reservation.

", + "locationName":"reservationType" + }, + "OwnerId":{ + "shape":"String", + "documentation":"

The ID of the account that owns the subnet CIDR reservation.

", + "locationName":"ownerId" + }, + "Description":{ + "shape":"String", + "documentation":"

The description assigned to the subnet CIDR reservation.

", + "locationName":"description" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags assigned to the subnet CIDR reservation.

", + "locationName":"tagSet" + } + }, + "documentation":"

Describes a subnet CIDR reservation.

" + }, + "SubnetCidrReservationId":{"type":"string"}, + "SubnetCidrReservationList":{ + "type":"list", + "member":{ + "shape":"SubnetCidrReservation", + "locationName":"item" + } + }, + "SubnetCidrReservationType":{ + "type":"string", + "enum":[ + "prefix", + "explicit" + ] + }, "SubnetId":{"type":"string"}, "SubnetIdStringList":{ "type":"list", @@ -39458,7 +40439,7 @@ "members":{ "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are: capacity-reservation | carrier-gateway | client-vpn-endpoint | customer-gateway | dedicated-host | dhcp-options | egress-only-internet-gateway | elastic-ip | elastic-gpu | export-image-task | export-instance-task | fleet | fpga-image | host-reservation | image| import-image-task | import-snapshot-task | instance | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 | key-pair | launch-template | local-gateway-route-table-vpc-association | placement-group | prefix-list | natgateway | network-acl | network-interface | reserved-instances |route-table | security-group| snapshot | spot-fleet-request | spot-instances-request | snapshot | subnet | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment | transit-gateway-multicast-domain | transit-gateway-route-table | volume |vpc | vpc-peering-connection | vpc-endpoint (for interface and gateway endpoints) | vpc-endpoint-service (for AWS PrivateLink) | vpc-flow-log | vpn-connection | vpn-gateway.

To tag a resource after it has been created, see CreateTags.

", + "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are: capacity-reservation | carrier-gateway | client-vpn-endpoint | customer-gateway | dedicated-host | dhcp-options | egress-only-internet-gateway | elastic-ip | elastic-gpu | export-image-task | export-instance-task | fleet | fpga-image | host-reservation | image| import-image-task | import-snapshot-task | instance | instance-event-window | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 | key-pair | launch-template | local-gateway-route-table-vpc-association | placement-group | prefix-list | natgateway | network-acl | network-interface | reserved-instances |route-table | security-group| snapshot | spot-fleet-request | spot-instances-request | snapshot | subnet | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment | transit-gateway-multicast-domain | transit-gateway-route-table | volume |vpc | vpc-peering-connection | vpc-endpoint (for interface and gateway endpoints) | vpc-endpoint-service (for Amazon Web Services PrivateLink) | vpc-flow-log | vpn-connection | vpn-gateway.

To tag a resource after it has been created, see CreateTags.

", "locationName":"resourceType" }, "Tags":{ @@ -41656,20 +42637,22 @@ }, "UnassignIpv6AddressesRequest":{ "type":"structure", - "required":[ - "NetworkInterfaceId", - "Ipv6Addresses" - ], + "required":["NetworkInterfaceId"], "members":{ - "NetworkInterfaceId":{ - "shape":"NetworkInterfaceId", - "documentation":"

The ID of the network interface.

", - "locationName":"networkInterfaceId" - }, "Ipv6Addresses":{ "shape":"Ipv6AddressList", "documentation":"

The IPv6 addresses to unassign from the network interface.

", "locationName":"ipv6Addresses" + }, + "Ipv6Prefixes":{ + "shape":"IpPrefixList", + "documentation":"

One or moreIPv6 Prefix Delegation prefixes to unassign from the network interface.

", + "locationName":"Ipv6Prefix" + }, + "NetworkInterfaceId":{ + "shape":"NetworkInterfaceId", + "documentation":"

The ID of the network interface.

", + "locationName":"networkInterfaceId" } } }, @@ -41685,15 +42668,17 @@ "shape":"Ipv6AddressList", "documentation":"

The IPv6 addresses that have been unassigned from the network interface.

", "locationName":"unassignedIpv6Addresses" + }, + "UnassignedIpv6Prefixes":{ + "shape":"IpPrefixList", + "documentation":"

The IPv4 Prefix Delegation prefixes that have been unassigned from the network interface.

", + "locationName":"unassignedIpv6PrefixSet" } } }, "UnassignPrivateIpAddressesRequest":{ "type":"structure", - "required":[ - "NetworkInterfaceId", - "PrivateIpAddresses" - ], + "required":["NetworkInterfaceId"], "members":{ "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", @@ -41704,6 +42689,11 @@ "shape":"PrivateIpAddressStringList", "documentation":"

The secondary private IP addresses to unassign from the network interface. You can specify this option multiple times to unassign more than one IP address.

", "locationName":"privateIpAddress" + }, + "Ipv4Prefixes":{ + "shape":"IpPrefixList", + "documentation":"

The IPv4 Prefix Delegation prefixes to unassign from the network interface.

", + "locationName":"Ipv4Prefix" } }, "documentation":"

Contains the parameters for UnassignPrivateIpAddresses.

" @@ -41821,7 +42811,7 @@ "locationName":"message" } }, - "documentation":"

Information about the error that occurred. For more information about errors, see Error Codes.

" + "documentation":"

Information about the error that occurred. For more information about errors, see Error codes.

" }, "UnsuccessfulItemList":{ "type":"list", @@ -42194,7 +43184,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the volume.

", + "documentation":"

The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key that was used to protect the volume encryption key for the volume.

", "locationName":"kmsKeyId" }, "OutpostArn":{ @@ -42693,7 +43683,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that owns the VPC.

", + "documentation":"

The ID of the Amazon Web Services account that owns the VPC.

", "locationName":"ownerId" }, "InstanceTenancy":{ @@ -43217,7 +44207,7 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The AWS account ID of the VPC owner.

", + "documentation":"

The ID of the Amazon Web Services account that owns the VPC.

", "locationName":"ownerId" }, "PeeringOptions":{ @@ -43612,6 +44602,18 @@ "type":"list", "member":{"shape":"VpnTunnelOptionsSpecification"} }, + "WeekDay":{ + "type":"string", + "enum":[ + "sunday", + "monday", + "tuesday", + "wednesday", + "thursday", + "friday", + "saturday" + ] + }, "WithdrawByoipCidrRequest":{ "type":"structure", "required":["Cidr"], diff --git a/services/ec2instanceconnect/pom.xml b/services/ec2instanceconnect/pom.xml index c9f8105ae562..4e1dd84e483c 100644 --- a/services/ec2instanceconnect/pom.xml +++ b/services/ec2instanceconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ec2instanceconnect AWS Java SDK :: Services :: EC2 Instance Connect diff --git a/services/ecr/pom.xml b/services/ecr/pom.xml index 6c14c9c9c728..9d7531032513 100644 --- a/services/ecr/pom.xml +++ b/services/ecr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ecr AWS Java SDK :: Services :: Amazon EC2 Container Registry diff --git a/services/ecrpublic/pom.xml b/services/ecrpublic/pom.xml index 1c28b9d42a6c..ee29794b9140 100644 --- a/services/ecrpublic/pom.xml +++ b/services/ecrpublic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ecrpublic AWS Java SDK :: Services :: ECR PUBLIC diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index 3b26f9e210c8..d352522b8306 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ecs AWS Java SDK :: Services :: Amazon EC2 Container Service diff --git a/services/ecs/src/main/resources/codegen-resources/service-2.json b/services/ecs/src/main/resources/codegen-resources/service-2.json index 9582dbb1c02a..fb31d62b97ee 100644 --- a/services/ecs/src/main/resources/codegen-resources/service-2.json +++ b/services/ecs/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"LimitExceededException"}, {"shape":"UpdateInProgressException"} ], - "documentation":"

Creates a new capacity provider. Capacity providers are associated with an Amazon ECS cluster and are used in capacity provider strategies to facilitate cluster auto scaling.

Only capacity providers using an Auto Scaling group can be created. Amazon ECS tasks on AWS Fargate use the FARGATE and FARGATE_SPOT capacity providers which are already created and available to all accounts in Regions supported by AWS Fargate.

" + "documentation":"

Creates a new capacity provider. Capacity providers are associated with an Amazon ECS cluster and are used in capacity provider strategies to facilitate cluster auto scaling.

Only capacity providers using an Auto Scaling group can be created. Amazon ECS tasks on Fargate use the FARGATE and FARGATE_SPOT capacity providers which are already created and available to all accounts in Regions supported by Fargate.

" }, "CreateCluster":{ "name":"CreateCluster", @@ -43,7 +43,7 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Creates a new Amazon ECS cluster. By default, your account receives a default cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the CreateCluster action.

When you call the CreateCluster API operation, Amazon ECS attempts to create the Amazon ECS service-linked role for your account so that required resources in other AWS services can be managed on your behalf. However, if the IAM user that makes the call does not have permissions to create the service-linked role, it is not created. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Creates a new Amazon ECS cluster. By default, your account receives a default cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the CreateCluster action.

When you call the CreateCluster API operation, Amazon ECS attempts to create the Amazon ECS service-linked role for your account so that required resources in other Amazon Web Services services can be managed on your behalf. However, if the IAM user that makes the call does not have permissions to create the service-linked role, it is not created. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" }, "CreateService":{ "name":"CreateService", @@ -597,7 +597,7 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Registers a new task definition from the supplied family and containerDefinitions. Optionally, you can add data volumes to your containers with the volumes parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

You can specify an IAM role for your task with the taskRoleArn parameter. When you specify an IAM role for a task, its containers can then use the latest versions of the AWS CLI or SDKs to make API requests to the AWS services that are specified in the IAM policy associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

You can specify a Docker networking mode for the containers in your task definition with the networkMode parameter. The available network modes correspond to those described in Network settings in the Docker run reference. If you specify the awsvpc network mode, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Registers a new task definition from the supplied family and containerDefinitions. Optionally, you can add data volumes to your containers with the volumes parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

You can specify an IAM role for your task with the taskRoleArn parameter. When you specify an IAM role for a task, its containers can then use the latest versions of the CLI or SDKs to make API requests to the Amazon Web Services services that are specified in the IAM policy associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

You can specify a Docker networking mode for the containers in your task definition with the networkMode parameter. The available network modes correspond to those described in Network settings in the Docker run reference. If you specify the awsvpc network mode, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

" }, "RunTask":{ "name":"RunTask", @@ -834,7 +834,7 @@ {"shape":"PlatformTaskDefinitionIncompatibilityException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Updating the task placement strategies and constraints on an Amazon ECS service remains in preview and is a Beta Service as defined by and subject to the Beta Service Participation Service Terms located at https://aws.amazon.com/service-terms (\"Beta Terms\"). These Beta Terms apply to your participation in this preview.

Modifies the parameters of a service.

For services using the rolling update (ECS) deployment controller, the desired count, deployment configuration, network configuration, task placement constraints and strategies, or task definition used can be updated.

For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, task placement constraints and strategies, and health check grace period can be updated using this API. If the network configuration, platform version, or task definition need to be updated, a new AWS CodeDeploy deployment should be created. For more information, see CreateDeployment in the AWS CodeDeploy API Reference.

For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, and health check grace period using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, you should create a new task set. For more information, see CreateTaskSet.

You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you do not need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

  • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer.

  • The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment, which enables you to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout, after which SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic:

  • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy):

    • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

  • Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.

  • Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.

" + "documentation":"

Updating the task placement strategies and constraints on an Amazon ECS service remains in preview and is a Beta Service as defined by and subject to the Beta Service Participation Service Terms located at https://aws.amazon.com/service-terms (\"Beta Terms\"). These Beta Terms apply to your participation in this preview.

Modifies the parameters of a service.

For services using the rolling update (ECS) deployment controller, the desired count, deployment configuration, network configuration, task placement constraints and strategies, or task definition used can be updated.

For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, task placement constraints and strategies, and health check grace period can be updated using this API. If the network configuration, platform version, or task definition need to be updated, a new CodeDeploy deployment should be created. For more information, see CreateDeployment in the CodeDeploy API Reference.

For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, and health check grace period using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, you should create a new task set. For more information, see CreateTaskSet.

You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you do not need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

  • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer.

  • The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment, which enables you to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout, after which SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic:

  • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy):

    • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

  • Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.

  • Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.

" }, "UpdateServicePrimaryTaskSet":{ "name":"UpdateServicePrimaryTaskSet", @@ -1005,7 +1005,7 @@ }, "managedTerminationProtection":{ "shape":"ManagedTerminationProtection", - "documentation":"

The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection.

When using managed termination protection, managed scaling must also be used otherwise managed termination protection will not work.

When managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions enabled as well. For more information, see Instance Protection in the AWS Auto Scaling User Guide.

When managed termination protection is disabled, your Amazon EC2 instances are not protected from termination when the Auto Scaling group scales in.

" + "documentation":"

The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection.

When using managed termination protection, managed scaling must also be used otherwise managed termination protection will not work.

When managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions enabled as well. For more information, see Instance Protection in the Auto Scaling User Guide.

When managed termination protection is disabled, your Amazon EC2 instances are not protected from termination when the Auto Scaling group scales in.

" } }, "documentation":"

The details of the Auto Scaling group for the capacity provider.

" @@ -1019,7 +1019,7 @@ }, "managedTerminationProtection":{ "shape":"ManagedTerminationProtection", - "documentation":"

The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection.

When using managed termination protection, managed scaling must also be used otherwise managed termination protection will not work.

When managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions enabled as well. For more information, see Instance Protection in the AWS Auto Scaling User Guide.

When managed termination protection is disabled, your Amazon EC2 instances are not protected from termination when the Auto Scaling group scales in.

" + "documentation":"

The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection.

When using managed termination protection, managed scaling must also be used otherwise managed termination protection will not work.

When managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions enabled as well. For more information, see Instance Protection in the Auto Scaling User Guide.

When managed termination protection is disabled, your Amazon EC2 instances are not protected from termination when the Auto Scaling group scales in.

" } }, "documentation":"

The details of the Auto Scaling group capacity provider to update.

" @@ -1047,7 +1047,7 @@ "type":"structure", "members":{ }, - "documentation":"

Your AWS account has been blocked. For more information, contact AWS Support.

", + "documentation":"

Your Amazon Web Services account has been blocked. For more information, contact Amazon Web Services Support.

", "exception":true }, "Boolean":{"type":"boolean"}, @@ -1088,7 +1088,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the capacity provider to help you categorize and organize it. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the capacity provider to help you categorize and organize it. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" } }, "documentation":"

The details of a capacity provider.

" @@ -1129,7 +1129,7 @@ "documentation":"

The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. If no value is specified, the default value of 0 is used.

" } }, - "documentation":"

The details of a capacity provider strategy. A capacity provider strategy can be set when using the RunTask or CreateCluster APIs or as the default capacity provider strategy for a cluster with the CreateCluster API.

Only capacity providers that are already associated with a cluster and have an ACTIVE or UPDATING status can be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New Auto Scaling group capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used in a capacity provider strategy.

" + "documentation":"

The details of a capacity provider strategy. A capacity provider strategy can be set when using the RunTask or CreateCluster APIs or as the default capacity provider strategy for a cluster with the CreateCluster API.

Only capacity providers that are already associated with a cluster and have an ACTIVE or UPDATING status can be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New Auto Scaling group capacity providers can be created with the CreateCapacityProvider API operation.

To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used in a capacity provider strategy.

" }, "CapacityProviderStrategyItemBase":{ "type":"integer", @@ -1169,7 +1169,7 @@ "members":{ "clusterArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains the arn:aws:ecs namespace, followed by the Region of the cluster, the AWS account ID of the cluster owner, the cluster namespace, and then the cluster name. For example, arn:aws:ecs:region:012345678910:cluster/test.

" + "documentation":"

The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains the arn:aws:ecs namespace, followed by the Region of the cluster, the account ID of the cluster owner, the cluster namespace, and then the cluster name. For example, arn:aws:ecs:region:012345678910:cluster/test.

" }, "clusterName":{ "shape":"String", @@ -1205,7 +1205,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" }, "settings":{ "shape":"ClusterSettings", @@ -1511,7 +1511,7 @@ }, "privileged":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks run on AWS Fargate.

" + "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks run on Fargate.

" }, "readonlyRootFilesystem":{ "shape":"BoxedBoolean", @@ -1603,11 +1603,11 @@ "members":{ "containerInstanceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID.

" + "documentation":"

The Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID.

" }, "ec2InstanceId":{ "shape":"String", - "documentation":"

The ID of the container instance. For Amazon EC2 instances, this value is the Amazon EC2 instance ID. For external instances, this value is the AWS Systems Manager managed instance ID.

" + "documentation":"

The ID of the container instance. For Amazon EC2 instances, this value is the Amazon EC2 instance ID. For external instances, this value is the Amazon Web Services Systems Manager managed instance ID.

" }, "capacityProviderName":{ "shape":"String", @@ -1667,7 +1667,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" } }, "documentation":"

An EC2 instance that is running the Amazon ECS agent and has been registered with a cluster.

" @@ -1795,7 +1795,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the capacity provider to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the capacity provider to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" } } }, @@ -1817,7 +1817,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" }, "settings":{ "shape":"ClusterSettings", @@ -1829,7 +1829,7 @@ }, "capacityProviders":{ "shape":"StringList", - "documentation":"

The short name of one or more capacity providers to associate with the cluster. A capacity provider must be associated with a cluster before it can be included as part of the default capacity provider strategy of the cluster or used in a capacity provider strategy when calling the CreateService or RunTask actions.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created and not already associated with another cluster. New Auto Scaling group capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" + "documentation":"

The short name of one or more capacity providers to associate with the cluster. A capacity provider must be associated with a cluster before it can be included as part of the default capacity provider strategy of the cluster or used in a capacity provider strategy when calling the CreateService or RunTask actions.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created and not already associated with another cluster. New Auto Scaling group capacity providers can be created with the CreateCapacityProvider API operation.

To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" }, "defaultCapacityProviderStrategy":{ "shape":"CapacityProviderStrategy", @@ -1864,7 +1864,7 @@ }, "loadBalancers":{ "shape":"LoadBalancers", - "documentation":"

A load balancer object representing the load balancers to use with your service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

If the service is using the rolling update (ECS) deployment controller and using either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach to the service. The service-linked role is required for services that make use of multiple target groups. For more information, see Using service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, AWS CodeDeploy determines which task set in your service has the status PRIMARY and associates one target group with it, and then associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that allows you perform validation tests with Lambda functions before routing production traffic to it.

After you create a service using the ECS deployment controller, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable. If you are using the CODE_DEPLOY deployment controller, these values can be changed when updating the service.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. The load balancer name parameter must be omitted. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. The target group ARN parameter must be omitted. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" + "documentation":"

A load balancer object representing the load balancers to use with your service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

If the service is using the rolling update (ECS) deployment controller and using either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach to the service. The service-linked role is required for services that make use of multiple target groups. For more information, see Using service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, CodeDeploy determines which task set in your service has the status PRIMARY and associates one target group with it, and then associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that allows you perform validation tests with Lambda functions before routing production traffic to it.

After you create a service using the ECS deployment controller, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable. If you are using the CODE_DEPLOY deployment controller, these values can be changed when updating the service.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. The load balancer name parameter must be omitted. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. The target group ARN parameter must be omitted. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" }, "serviceRegistries":{ "shape":"ServiceRegistries", @@ -1880,7 +1880,7 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The infrastructure on which to run your service. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

The FARGATE launch type runs your tasks on AWS Fargate On-Demand infrastructure.

Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more information, see AWS Fargate capacity providers in the Amazon ECS User Guide for AWS Fargate.

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your cluster.

The EXTERNAL launch type runs your tasks on your on-premise server or virtual machine (VM) capacity registered to your cluster.

A service can use either a launch type or a capacity provider strategy. If a launchType is specified, the capacityProviderStrategy parameter must be omitted.

" + "documentation":"

The infrastructure on which to run your service. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure.

Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more information, see Fargate capacity providers in the Amazon ECS User Guide for Fargate.

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your cluster.

The EXTERNAL launch type runs your tasks on your on-premise server or virtual machine (VM) capacity registered to your cluster.

A service can use either a launch type or a capacity provider strategy. If a launchType is specified, the capacityProviderStrategy parameter must be omitted.

" }, "capacityProviderStrategy":{ "shape":"CapacityProviderStrategy", @@ -1888,7 +1888,7 @@ }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version that your tasks in the service are running on. A platform version is specified only for tasks using the Fargate launch type. If one isn't specified, the LATEST platform version is used by default. For more information, see AWS Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version that your tasks in the service are running on. A platform version is specified only for tasks using the Fargate launch type. If one isn't specified, the LATEST platform version is used by default. For more information, see Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" }, "role":{ "shape":"String", @@ -1924,7 +1924,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" }, "enableECSManagedTags":{ "shape":"Boolean", @@ -1945,7 +1945,7 @@ "members":{ "service":{ "shape":"Service", - "documentation":"

The full description of your service following the create call.

If a service is using the ECS deployment controller, the deploymentController and taskSets parameters will not be returned.

If the service is using the CODE_DEPLOY deployment controller, the deploymentController, taskSets and deployments parameters will be returned, however the deployments parameter will be an empty list.

" + "documentation":"

The full description of your service following the create call.

A service will return either a capacityProviderStrategy or launchType parameter, but not both, depending on which one was specified during creation.

If a service is using the ECS deployment controller, the deploymentController and taskSets parameters will not be returned.

If the service is using the CODE_DEPLOY deployment controller, the deploymentController, taskSets and deployments parameters will be returned, however the deployments parameter will be an empty list.

" } } }, @@ -1967,7 +1967,7 @@ }, "externalId":{ "shape":"String", - "documentation":"

An optional non-unique tag that identifies this task set in external systems. If the task set is associated with a service discovery registry, the tasks in this task set will have the ECS_TASK_SET_EXTERNAL_ID AWS Cloud Map attribute set to the provided value.

" + "documentation":"

An optional non-unique tag that identifies this task set in external systems. If the task set is associated with a service discovery registry, the tasks in this task set will have the ECS_TASK_SET_EXTERNAL_ID Cloud Map attribute set to the provided value.

" }, "taskDefinition":{ "shape":"String", @@ -1991,7 +1991,7 @@ }, "capacityProviderStrategy":{ "shape":"CapacityProviderStrategy", - "documentation":"

The capacity provider strategy to use for the task set.

A capacity provider strategy consists of one or more capacity providers along with the base and weight to assign to them. A capacity provider must be associated with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.

If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" + "documentation":"

The capacity provider strategy to use for the task set.

A capacity provider strategy consists of one or more capacity providers along with the base and weight to assign to them. A capacity provider must be associated with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.

If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" }, "platformVersion":{ "shape":"String", @@ -2007,7 +2007,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" } } }, @@ -2016,7 +2016,7 @@ "members":{ "taskSet":{ "shape":"TaskSet", - "documentation":"

Information about a set of Amazon ECS tasks in either an AWS CodeDeploy or an EXTERNAL deployment. A task set includes details such as the desired number of tasks, how many tasks are running, and whether the task set serves production traffic.

" + "documentation":"

Information about a set of Amazon ECS tasks in either an CodeDeploy or an EXTERNAL deployment. A task set includes details such as the desired number of tasks, how many tasks are running, and whether the task set serves production traffic.

" } } }, @@ -2215,7 +2215,7 @@ }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "networkConfiguration":{ "shape":"NetworkConfiguration", @@ -2274,7 +2274,7 @@ "members":{ "type":{ "shape":"DeploymentControllerType", - "documentation":"

The deployment controller type to use.

There are three deployment controller types available:

ECS

The rolling update (ECS) deployment type involves replacing the current running version of the container with the latest version. The number of containers Amazon ECS adds or removes from the service during a rolling update is controlled by adjusting the minimum and maximum number of healthy tasks allowed during a service deployment, as specified in the DeploymentConfiguration.

CODE_DEPLOY

The blue/green (CODE_DEPLOY) deployment type uses the blue/green deployment model powered by AWS CodeDeploy, which allows you to verify a new deployment of a service before sending production traffic to it.

EXTERNAL

The external (EXTERNAL) deployment type enables you to use any third-party deployment controller for full control over the deployment process for an Amazon ECS service.

" + "documentation":"

The deployment controller type to use.

There are three deployment controller types available:

ECS

The rolling update (ECS) deployment type involves replacing the current running version of the container with the latest version. The number of containers Amazon ECS adds or removes from the service during a rolling update is controlled by adjusting the minimum and maximum number of healthy tasks allowed during a service deployment, as specified in the DeploymentConfiguration.

CODE_DEPLOY

The blue/green (CODE_DEPLOY) deployment type uses the blue/green deployment model powered by CodeDeploy, which allows you to verify a new deployment of a service before sending production traffic to it.

EXTERNAL

The external (EXTERNAL) deployment type enables you to use any third-party deployment controller for full control over the deployment process for an Amazon ECS service.

" } }, "documentation":"

The deployment controller to use for the service. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

" @@ -2309,7 +2309,7 @@ }, "containerInstance":{ "shape":"String", - "documentation":"

The container instance ID or full ARN of the container instance to deregister. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID.

" + "documentation":"

The container instance ID or full ARN of the container instance to deregister. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID.

" }, "force":{ "shape":"BoxedBoolean", @@ -2494,7 +2494,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that is applied to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that is applied to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" } } }, @@ -2615,7 +2615,7 @@ "members":{ "containerInstance":{ "shape":"String", - "documentation":"

The container instance ID or full ARN of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID.

" + "documentation":"

The container instance ID or full ARN of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID.

" }, "cluster":{ "shape":"String", @@ -2762,14 +2762,14 @@ "documentation":"

The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 21 GiB and the maximum supported value is 200 GiB.

" } }, - "documentation":"

The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate. For more information, see Fargate task storage in the Amazon ECS User Guide for AWS Fargate.

This parameter is only supported for tasks hosted on AWS Fargate using platform version 1.4.0 or later.

" + "documentation":"

The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For more information, see Fargate task storage in the Amazon ECS User Guide for Fargate.

This parameter is only supported for tasks hosted on Fargate using platform version 1.4.0 or later.

" }, "ExecuteCommandConfiguration":{ "type":"structure", "members":{ "kmsKeyId":{ "shape":"String", - "documentation":"

Specify an AWS Key Management Service key ID to encrypt the data between the local client and the container.

" + "documentation":"

Specify an Key Management Service key ID to encrypt the data between the local client and the container.

" }, "logging":{ "shape":"ExecuteCommandLogging", @@ -2884,11 +2884,11 @@ "members":{ "credentialsParameter":{ "shape":"String", - "documentation":"

The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or AWS Systems Manager Parameter Store parameter. The ARNs refer to the stored credentials.

" + "documentation":"

The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an Secrets Manager secret or SSM Parameter Store parameter. The ARNs refer to the stored credentials.

" }, "domain":{ "shape":"String", - "documentation":"

A fully qualified domain name hosted by an AWS Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2.

" + "documentation":"

A fully qualified domain name hosted by an Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2.

" } }, "documentation":"

The authorization configuration details for Amazon FSx for Windows File Server file system. See FSxWindowsFileServerVolumeConfiguration in the Amazon Elastic Container Service API Reference.

For more information and the input format, see Amazon FSx for Windows File Server Volumes in the Amazon Elastic Container Service Developer Guide.

" @@ -2948,7 +2948,7 @@ }, "options":{ "shape":"FirelensConfigurationOptionsMap", - "documentation":"

The options to use when configuring the log router. This field is optional and can be used to specify a custom configuration file or to add additional metadata, such as the task, task definition, cluster, and container instance details to the log event. If specified, the syntax to use is \"options\":{\"enable-ecs-log-metadata\":\"true|false\",\"config-file-type:\"s3|file\",\"config-file-value\":\"arn:aws:s3:::mybucket/fluent.conf|filepath\"}. For more information, see Creating a Task Definition that Uses a FireLens Configuration in the Amazon Elastic Container Service Developer Guide.

Tasks hosted on AWS Fargate only support the file configuration file type.

" + "documentation":"

The options to use when configuring the log router. This field is optional and can be used to specify a custom configuration file or to add additional metadata, such as the task, task definition, cluster, and container instance details to the log event. If specified, the syntax to use is \"options\":{\"enable-ecs-log-metadata\":\"true|false\",\"config-file-type:\"s3|file\",\"config-file-value\":\"arn:aws:s3:::mybucket/fluent.conf|filepath\"}. For more information, see Creating a Task Definition that Uses a FireLens Configuration in the Amazon Elastic Container Service Developer Guide.

Tasks hosted on Fargate only support the file configuration file type.

" } }, "documentation":"

The FireLens configuration for the container. This is used to specify and configure a log router for container logs. For more information, see Custom Log Routing in the Amazon Elastic Container Service Developer Guide.

" @@ -2994,7 +2994,7 @@ "documentation":"

The optional grace period within which to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You may specify between 0 and 300 seconds. The startPeriod is disabled by default.

If a health check succeeds within the startPeriod, then the container is considered healthy and any subsequent failures count toward the maximum number of retries.

" } }, - "documentation":"

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile).

You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.

The following describes the possible healthStatus values for a container:

  • HEALTHY-The container health check has passed successfully.

  • UNHEALTHY-The container health check has failed.

  • UNKNOWN-The container health check is being evaluated or there is no container health check defined.

The following describes the possible healthStatus values for a task. The container health check status of nonessential containers do not have an effect on the health status of a task.

  • HEALTHY-All essential containers within the task have passed their health checks.

  • UNHEALTHY-One or more essential containers have failed their health check.

  • UNKNOWN-The essential containers within the task are still having their health checks evaluated or there are no container health checks defined.

If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.

The following are notes about container health check support:

  • Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS Container Agent.

  • Container health checks are supported for Fargate tasks if you are using platform version 1.1.0 or greater. For more information, see AWS Fargate Platform Versions.

  • Container health checks are not supported for tasks that are part of a service that is configured to use a Classic Load Balancer.

" + "documentation":"

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile).

You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.

The following describes the possible healthStatus values for a container:

  • HEALTHY-The container health check has passed successfully.

  • UNHEALTHY-The container health check has failed.

  • UNKNOWN-The container health check is being evaluated or there is no container health check defined.

The following describes the possible healthStatus values for a task. The container health check status of nonessential containers do not have an effect on the health status of a task.

  • HEALTHY-All essential containers within the task have passed their health checks.

  • UNHEALTHY-One or more essential containers have failed their health check.

  • UNKNOWN-The essential containers within the task are still having their health checks evaluated or there are no container health checks defined.

If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.

The following are notes about container health check support:

  • Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS Container Agent.

  • Container health checks are supported for Fargate tasks if you are using platform version 1.1.0 or greater. For more information, see Fargate Platform Versions.

  • Container health checks are not supported for tasks that are part of a service that is configured to use a Classic Load Balancer.

" }, "HealthStatus":{ "type":"string", @@ -3097,7 +3097,7 @@ "members":{ "add":{ "shape":"StringList", - "documentation":"

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

Tasks launched on AWS Fargate only support adding the SYS_PTRACE kernel capability.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" + "documentation":"

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

Tasks launched on Fargate only support adding the SYS_PTRACE kernel capability.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" }, "drop":{ "shape":"StringList", @@ -3533,7 +3533,7 @@ "members":{ "logDriver":{ "shape":"LogDriver", - "documentation":"

The log driver to use for the container.

For tasks on AWS Fargate, the supported log drivers are awslogs, splunk, and awsfirelens.

For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens.

For more information about using the awslogs log driver, see Using the awslogs log driver in the Amazon Elastic Container Service Developer Guide.

For more information about using the awsfirelens log driver, see Custom log routing in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we do not currently provide support for running modified copies of this software.

" + "documentation":"

The log driver to use for the container.

For tasks on Fargate, the supported log drivers are awslogs, splunk, and awsfirelens.

For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens.

For more information about using the awslogs log driver, see Using the awslogs log driver in the Amazon Elastic Container Service Developer Guide.

For more information about using the awsfirelens log driver, see Custom log routing in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we do not currently provide support for running modified copies of this software.

" }, "options":{ "shape":"LogConfigurationOptionsMap", @@ -3544,7 +3544,7 @@ "documentation":"

The secrets to pass to the log configuration. For more information, see Specifying Sensitive Data in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

The log configuration for the container. This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run .

By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

The following should be noted when specifying a log configuration for your containers:

  • Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the valid values below). Additional log drivers may be available in future releases of the Amazon ECS container agent.

  • This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.

  • For tasks hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

  • For tasks on AWS Fargate, because you do not have access to the underlying infrastructure your tasks are hosted on, any additional software needed will have to be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.

" + "documentation":"

The log configuration for the container. This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run .

By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

The following should be noted when specifying a log configuration for your containers:

  • Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the valid values below). Additional log drivers may be available in future releases of the Amazon ECS container agent.

  • This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.

  • For tasks hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

  • For tasks on Fargate, because you do not have access to the underlying infrastructure your tasks are hosted on, any additional software needed will have to be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.

" }, "LogConfigurationOptionsMap":{ "type":"map", @@ -4031,11 +4031,11 @@ }, "capacityProviders":{ "shape":"StringList", - "documentation":"

The name of one or more capacity providers to associate with the cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

" + "documentation":"

The name of one or more capacity providers to associate with the cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

" }, "defaultCapacityProviderStrategy":{ "shape":"CapacityProviderStrategy", - "documentation":"

The capacity provider strategy to use by default for the cluster.

When creating a service or running a task on a cluster, if no capacity provider or launch type is specified then the default capacity provider strategy for the cluster is used.

A capacity provider strategy consists of one or more capacity providers along with the base and weight to assign to them. A capacity provider must be associated with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

" + "documentation":"

The capacity provider strategy to use by default for the cluster.

When creating a service or running a task on a cluster, if no capacity provider or launch type is specified then the default capacity provider strategy for the cluster is used.

A capacity provider strategy consists of one or more capacity providers along with the base and weight to assign to them. A capacity provider must be associated with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

" } } }, @@ -4085,7 +4085,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" } } }, @@ -4115,11 +4115,11 @@ }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make AWS API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.

" }, "networkMode":{ "shape":"NetworkMode", - "documentation":"

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge.

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 instances, any network mode can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.

If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

Currently, only Amazon ECS-optimized AMIs, other Amazon Linux variants with the ecs-init package, or AWS Fargate infrastructure support the awsvpc network mode.

If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.

Docker for Windows uses different network modes than Docker for Linux. When you register a task definition with Windows containers, you must not specify a network mode. If you use the console to register a task definition with Windows containers, you must choose the <default> network mode object.

For more information, see Network settings in the Docker run reference.

" + "documentation":"

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge.

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, <default> or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.

If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.

For more information, see Network settings in the Docker run reference.

" }, "containerDefinitions":{ "shape":"ContainerDefinitions", @@ -4147,15 +4147,15 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" }, "pidMode":{ "shape":"PidMode", - "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

This parameter is not supported for Windows containers or tasks run on AWS Fargate.

" + "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

This parameter is not supported for Windows containers or tasks run on Fargate.

" }, "ipcMode":{ "shape":"IpcMode", - "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

  • For tasks that use the host IPC mode, IPC namespace related systemControls are not supported.

  • For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task.

This parameter is not supported for Windows containers or tasks run on AWS Fargate.

" + "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

  • For tasks that use the host IPC mode, IPC namespace related systemControls are not supported.

  • For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task.

This parameter is not supported for Windows containers or tasks run on Fargate.

" }, "proxyConfiguration":{ "shape":"ProxyConfiguration", @@ -4167,7 +4167,7 @@ }, "ephemeralStorage":{ "shape":"EphemeralStorage", - "documentation":"

The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate. For more information, see Fargate task storage in the Amazon ECS User Guide for AWS Fargate.

This parameter is only supported for tasks hosted on AWS Fargate using platform version 1.4.0 or later.

" + "documentation":"

The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For more information, see Fargate task storage in the Amazon ECS User Guide for Fargate.

This parameter is only supported for tasks hosted on Fargate using platform version 1.4.0 or later.

" } } }, @@ -4190,7 +4190,7 @@ "members":{ "credentialsParameter":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the secret containing the private repository credentials.

When you are using the Amazon ECS API, AWS CLI, or AWS SDK, if the secret exists in the same Region as the task that you are launching then you can use either the full ARN or the name of the secret. When you are using the AWS Management Console, you must specify the full ARN of the secret.

" + "documentation":"

The Amazon Resource Name (ARN) of the secret containing the private repository credentials.

When you are using the Amazon ECS API, CLI, or Amazon Web Services SDK, if the secret exists in the same Region as the task that you are launching then you can use either the full ARN or the name of the secret. When you are using the Amazon Web Services Management Console, you must specify the full ARN of the secret.

" } }, "documentation":"

The repository credentials for private registry authentication.

" @@ -4282,7 +4282,7 @@ "members":{ "capacityProviderStrategy":{ "shape":"CapacityProviderStrategy", - "documentation":"

The capacity provider strategy to use for the task.

If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used.

" + "documentation":"

The capacity provider strategy to use for the task.

If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used.

When you use cluster auto scaling, you must specify capacityProviderStrategy and not launchType.

" }, "cluster":{ "shape":"String", @@ -4306,7 +4306,7 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The infrastructure on which to run your standalone task. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

The FARGATE launch type runs your tasks on AWS Fargate On-Demand infrastructure.

Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more information, see AWS Fargate capacity providers in the Amazon ECS User Guide for AWS Fargate.

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your cluster.

The EXTERNAL launch type runs your tasks on your on-premise server or virtual machine (VM) capacity registered to your cluster.

A task can use either a launch type or a capacity provider strategy. If a launchType is specified, the capacityProviderStrategy parameter must be omitted.

" + "documentation":"

The infrastructure on which to run your standalone task. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure.

Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more information, see Fargate capacity providers in the Amazon ECS User Guide for Fargate.

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your cluster.

The EXTERNAL launch type runs your tasks on your on-premise server or virtual machine (VM) capacity registered to your cluster.

A task can use either a launch type or a capacity provider strategy. If a launchType is specified, the capacityProviderStrategy parameter must be omitted.

When you use cluster auto scaling, you must specify capacityProviderStrategy and not launchType.

" }, "networkConfiguration":{ "shape":"NetworkConfiguration", @@ -4326,7 +4326,7 @@ }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version the task should run. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version the task should run. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "propagateTags":{ "shape":"PropagateTags", @@ -4342,7 +4342,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" }, "taskDefinition":{ "shape":"String", @@ -4408,7 +4408,7 @@ }, "valueFrom":{ "shape":"String", - "documentation":"

The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store.

If the AWS Systems Manager Parameter Store parameter exists in the same Region as the task you are launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

" + "documentation":"

The secret to expose to the container. The supported values are either the full ARN of the Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store.

If the SSM Parameter Store parameter exists in the same Region as the task you are launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

" } }, "documentation":"

An object representing the secret to expose to your container. Secrets can be exposed to a container in the following ways:

  • To inject sensitive data into your containers as environment variables, use the secrets container definition parameter.

  • To reference sensitive information in the log configuration of a container, use the secretOptions container definition parameter.

For more information, see Specifying Sensitive Data in the Amazon Elastic Container Service Developer Guide.

" @@ -4435,7 +4435,7 @@ "members":{ "serviceArn":{ "shape":"String", - "documentation":"

The ARN that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the Region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region:012345678910:service/my-service.

" + "documentation":"

The ARN that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the Region of the service, the account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region:012345678910:service/my-service.

" }, "serviceName":{ "shape":"String", @@ -4471,15 +4471,15 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The infrastructure on which your service is running. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The launch type the service is using. When using the DescribeServices API, this field is omitted if the service was created using a capacity provider strategy.

" }, "capacityProviderStrategy":{ "shape":"CapacityProviderStrategy", - "documentation":"

The capacity provider strategy associated with the service.

" + "documentation":"

The capacity provider strategy the service is using. When using the DescribeServices API, this field is omitted if the service was created using a launch type.

" }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version on which to run your service. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version on which to run your service. A platform version is only specified for tasks hosted on Fargate. If one is not specified, the LATEST platform version is used by default. For more information, see Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "taskDefinition":{ "shape":"String", @@ -4491,7 +4491,7 @@ }, "taskSets":{ "shape":"TaskSets", - "documentation":"

Information about a set of Amazon ECS tasks in either an AWS CodeDeploy or an EXTERNAL deployment. An Amazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and whether the task set serves production traffic.

" + "documentation":"

Information about a set of Amazon ECS tasks in either an CodeDeploy or an EXTERNAL deployment. An Amazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and whether the task set serves production traffic.

" }, "deployments":{ "shape":"Deployments", @@ -4535,7 +4535,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" }, "createdBy":{ "shape":"String", @@ -4609,7 +4609,7 @@ "members":{ "registryArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is AWS Cloud Map. For more information, see CreateService.

" + "documentation":"

The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is Cloud Map. For more information, see CreateService.

" }, "port":{ "shape":"BoxedInteger", @@ -4743,7 +4743,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" }, "taskDefinition":{ "shape":"String", @@ -4958,7 +4958,7 @@ "documentation":"

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

" } }, - "documentation":"

The metadata that you apply to a resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to a resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" }, "TagKey":{ "type":"string", @@ -4983,7 +4983,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The tags to add to the resource. A tag is an array of key-value pairs.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The tags to add to the resource. A tag is an array of key-value pairs.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" } } }, @@ -5111,7 +5111,7 @@ }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version on which your task is running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version on which your task is running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "pullStartedAt":{ "shape":"Timestamp", @@ -5147,7 +5147,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" }, "taskArn":{ "shape":"String", @@ -5185,15 +5185,15 @@ }, "taskRoleArn":{ "shape":"String", - "documentation":"

The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM roles for tasks in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the task permission to call Amazon Web Services APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM roles for tasks in the Amazon Elastic Container Service Developer Guide.

" }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make AWS API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.

" }, "networkMode":{ "shape":"NetworkMode", - "documentation":"

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge.

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 instances, any network mode can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.

If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

Currently, only Amazon ECS-optimized AMIs, other Amazon Linux variants with the ecs-init package, or AWS Fargate infrastructure support the awsvpc network mode.

If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.

Docker for Windows uses different network modes than Docker for Linux. When you register a task definition with Windows containers, you must not specify a network mode. If you use the console to register a task definition with Windows containers, you must choose the <default> network mode object.

For more information, see Network settings in the Docker run reference.

" + "documentation":"

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge.

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, <default> or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.

If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.

For more information, see Network settings in the Docker run reference.

" }, "revision":{ "shape":"Integer", @@ -5201,7 +5201,7 @@ }, "volumes":{ "shape":"VolumeList", - "documentation":"

The list of data volume definitions for the task. For more information, see Using data volumes in tasks in the Amazon Elastic Container Service Developer Guide.

The host and sourcePath parameters are not supported for tasks run on AWS Fargate.

" + "documentation":"

The list of data volume definitions for the task. For more information, see Using data volumes in tasks in the Amazon Elastic Container Service Developer Guide.

The host and sourcePath parameters are not supported for tasks run on Fargate.

" }, "status":{ "shape":"TaskDefinitionStatus", @@ -5209,11 +5209,11 @@ }, "requiresAttributes":{ "shape":"RequiresAttributes", - "documentation":"

The container instance attributes required by your task. When an Amazon EC2 instance is registered to your cluster, the Amazon ECS container agent assigns some standard attributes to the instance. You can apply custom attributes, specified as key-value pairs using the Amazon ECS console or the PutAttributes API. These attributes are used when considering task placement for tasks hosted on Amazon EC2 instances. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for tasks run on AWS Fargate.

" + "documentation":"

The container instance attributes required by your task. When an Amazon EC2 instance is registered to your cluster, the Amazon ECS container agent assigns some standard attributes to the instance. You can apply custom attributes, specified as key-value pairs using the Amazon ECS console or the PutAttributes API. These attributes are used when considering task placement for tasks hosted on Amazon EC2 instances. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for tasks run on Fargate.

" }, "placementConstraints":{ "shape":"TaskDefinitionPlacementConstraints", - "documentation":"

An array of placement constraint objects to use for tasks.

This parameter is not supported for tasks run on AWS Fargate.

" + "documentation":"

An array of placement constraint objects to use for tasks.

This parameter is not supported for tasks run on Fargate.

" }, "compatibilities":{ "shape":"CompatibilityList", @@ -5229,7 +5229,7 @@ }, "memory":{ "shape":"String", - "documentation":"

The amount (in MiB) of memory used by the task.

If your tasks will be run on Amazon EC2 instances, you must specify either a task-level memory value or a container-level memory value. This field is optional and any value can be used. If a task-level memory value is specified then the container-level memory value is optional. For more information regarding container-level memory and memory reservation, see ContainerDefinition.

If your tasks will be run on AWS Fargate, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

  • 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

  • 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

  • 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

  • Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

  • Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

" + "documentation":"

The amount (in MiB) of memory used by the task.

If your tasks will be run on Amazon EC2 instances, you must specify either a task-level memory value or a container-level memory value. This field is optional and any value can be used. If a task-level memory value is specified then the container-level memory value is optional. For more information regarding container-level memory and memory reservation, see ContainerDefinition.

If your tasks will be run on Fargate, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

  • 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

  • 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

  • 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

  • Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

  • Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

" }, "inferenceAccelerators":{ "shape":"InferenceAccelerators", @@ -5237,11 +5237,11 @@ }, "pidMode":{ "shape":"PidMode", - "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

This parameter is not supported for Windows containers or tasks run on AWS Fargate.

" + "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

This parameter is not supported for Windows containers or tasks run on Fargate.

" }, "ipcMode":{ "shape":"IpcMode", - "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

  • For tasks that use the host IPC mode, IPC namespace related systemControls are not supported.

  • For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task.

This parameter is not supported for Windows containers or tasks run on AWS Fargate.

" + "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

  • For tasks that use the host IPC mode, IPC namespace related systemControls are not supported.

  • For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task.

This parameter is not supported for Windows containers or tasks run on Fargate.

" }, "proxyConfiguration":{ "shape":"ProxyConfiguration", @@ -5294,7 +5294,7 @@ "documentation":"

A cluster query language expression to apply to the constraint. For more information, see Cluster query language in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

An object representing a constraint on task placement in the task definition. For more information, see Task placement constraints in the Amazon Elastic Container Service Developer Guide.

Task placement constraints are not supported for tasks run on AWS Fargate.

" + "documentation":"

An object representing a constraint on task placement in the task definition. For more information, see Task placement constraints in the Amazon Elastic Container Service Developer Guide.

Task placement constraints are not supported for tasks run on Fargate.

" }, "TaskDefinitionPlacementConstraintType":{ "type":"string", @@ -5348,7 +5348,7 @@ }, "ephemeralStorage":{ "shape":"EphemeralStorage", - "documentation":"

The ephemeral storage setting override for the task.

This parameter is only supported for tasks hosted on AWS Fargate using platform version 1.4.0 or later.

" + "documentation":"

The ephemeral storage setting override for the task.

This parameter is only supported for tasks hosted on Fargate using platform version 1.4.0 or later.

" } }, "documentation":"

The overrides associated with a task.

" @@ -5374,11 +5374,11 @@ }, "startedBy":{ "shape":"String", - "documentation":"

The tag specified when a task set is started. If the task set is created by an AWS CodeDeploy deployment, the startedBy parameter is CODE_DEPLOY. For a task set created for an external deployment, the startedBy field isn't used.

" + "documentation":"

The tag specified when a task set is started. If the task set is created by an CodeDeploy deployment, the startedBy parameter is CODE_DEPLOY. For a task set created for an external deployment, the startedBy field isn't used.

" }, "externalId":{ "shape":"String", - "documentation":"

The external ID associated with the task set.

If a task set is created by an AWS CodeDeploy deployment, the externalId parameter contains the AWS CodeDeploy deployment ID.

If a task set is created for an external deployment and is associated with a service discovery registry, the externalId parameter contains the ECS_TASK_SET_EXTERNAL_ID AWS Cloud Map attribute.

" + "documentation":"

The external ID associated with the task set.

If a task set is created by an CodeDeploy deployment, the externalId parameter contains the CodeDeploy deployment ID.

If a task set is created for an external deployment and is associated with a service discovery registry, the externalId parameter contains the ECS_TASK_SET_EXTERNAL_ID Cloud Map attribute.

" }, "status":{ "shape":"String", @@ -5418,7 +5418,7 @@ }, "platformVersion":{ "shape":"String", - "documentation":"

The AWS Fargate platform version on which the tasks in the task set are running. A platform version is only specified for tasks run on AWS Fargate. For more information, see AWS Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The Fargate platform version on which the tasks in the task set are running. A platform version is only specified for tasks run on Fargate. For more information, see Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" }, "networkConfiguration":{ "shape":"NetworkConfiguration", @@ -5446,10 +5446,10 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" + "documentation":"

The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" } }, - "documentation":"

Information about a set of Amazon ECS tasks in either an AWS CodeDeploy or an EXTERNAL deployment. An Amazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and whether the task set serves production traffic.

" + "documentation":"

Information about a set of Amazon ECS tasks in either an CodeDeploy or an EXTERNAL deployment. An Amazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and whether the task set serves production traffic.

" }, "TaskSetField":{ "type":"string", @@ -5788,7 +5788,7 @@ }, "capacityProviderStrategy":{ "shape":"CapacityProviderStrategy", - "documentation":"

The capacity provider strategy to update the service to use.

If the service is using the default capacity provider strategy for the cluster, the service can be updated to use one or more capacity providers as opposed to the default capacity provider strategy. However, when a service is using a capacity provider strategy that is not the default capacity provider strategy, the service cannot be updated to use the cluster's default capacity provider strategy.

A capacity provider strategy consists of one or more capacity providers along with the base and weight to assign to them. A capacity provider must be associated with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" + "documentation":"

The capacity provider strategy to update the service to use.

If the service is using the default capacity provider strategy for the cluster, the service can be updated to use one or more capacity providers as opposed to the default capacity provider strategy. However, when a service is using a capacity provider strategy that is not the default capacity provider strategy, the service cannot be updated to use the cluster's default capacity provider strategy.

A capacity provider strategy consists of one or more capacity providers along with the base and weight to assign to them. A capacity provider must be associated with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" }, "deploymentConfiguration":{ "shape":"DeploymentConfiguration", @@ -5808,7 +5808,7 @@ }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If a platform version is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If a platform version is not specified, the LATEST platform version is used by default. For more information, see Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "forceNewDeployment":{ "shape":"Boolean", @@ -5900,7 +5900,7 @@ }, "dockerVolumeConfiguration":{ "shape":"DockerVolumeConfiguration", - "documentation":"

This parameter is specified when you are using Docker volumes.

Windows containers only support the use of the local driver. To use bind mounts, specify the host parameter instead.

Docker volumes are not supported by tasks run on AWS Fargate.

" + "documentation":"

This parameter is specified when you are using Docker volumes.

Windows containers only support the use of the local driver. To use bind mounts, specify the host parameter instead.

Docker volumes are not supported by tasks run on Fargate.

" }, "efsVolumeConfiguration":{ "shape":"EFSVolumeConfiguration", @@ -5936,5 +5936,5 @@ "member":{"shape":"Volume"} } }, - "documentation":"Amazon Elastic Container Service

Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes it easy to run, stop, and manage Docker containers on a cluster. You can host your cluster on a serverless infrastructure that is managed by Amazon ECS by launching your services or tasks on AWS Fargate. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage.

Amazon ECS makes it easy to launch and stop container-based applications with simple API calls, allows you to get the state of your cluster from a centralized service, and gives you access to many familiar Amazon EC2 features.

You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource needs, isolation policies, and availability requirements. Amazon ECS eliminates the need for you to operate your own cluster management and configuration management systems or worry about scaling your management infrastructure.

" + "documentation":"Amazon Elastic Container Service

Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes it easy to run, stop, and manage Docker containers on a cluster. You can host your cluster on a serverless infrastructure that is managed by Amazon ECS by launching your services or tasks on Fargate. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage.

Amazon ECS makes it easy to launch and stop container-based applications with simple API calls, allows you to get the state of your cluster from a centralized service, and gives you access to many familiar Amazon EC2 features.

You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource needs, isolation policies, and availability requirements. Amazon ECS eliminates the need for you to operate your own cluster management and configuration management systems or worry about scaling your management infrastructure.

" } diff --git a/services/efs/pom.xml b/services/efs/pom.xml index 7ed8790aaa89..4219eae4e18a 100644 --- a/services/efs/pom.xml +++ b/services/efs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT efs AWS Java SDK :: Services :: Amazon Elastic File System diff --git a/services/eks/pom.xml b/services/eks/pom.xml index f0458b760a93..6b0afc52a6db 100644 --- a/services/eks/pom.xml +++ b/services/eks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT eks AWS Java SDK :: Services :: EKS diff --git a/services/elasticache/pom.xml b/services/elasticache/pom.xml index 8870c7090243..6c885e71e655 100644 --- a/services/elasticache/pom.xml +++ b/services/elasticache/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT elasticache AWS Java SDK :: Services :: Amazon ElastiCache diff --git a/services/elasticbeanstalk/pom.xml b/services/elasticbeanstalk/pom.xml index 6916bb6ed51d..1796d82d444e 100644 --- a/services/elasticbeanstalk/pom.xml +++ b/services/elasticbeanstalk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT elasticbeanstalk AWS Java SDK :: Services :: AWS Elastic Beanstalk diff --git a/services/elasticinference/pom.xml b/services/elasticinference/pom.xml index 4bca62cae19b..c029874ba088 100644 --- a/services/elasticinference/pom.xml +++ b/services/elasticinference/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT elasticinference AWS Java SDK :: Services :: Elastic Inference diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index 20a791ee5888..f0e80b35d4a2 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT elasticloadbalancing AWS Java SDK :: Services :: Elastic Load Balancing diff --git a/services/elasticloadbalancingv2/pom.xml b/services/elasticloadbalancingv2/pom.xml index 41101eaec03f..e219be477e4e 100644 --- a/services/elasticloadbalancingv2/pom.xml +++ b/services/elasticloadbalancingv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT elasticloadbalancingv2 AWS Java SDK :: Services :: Elastic Load Balancing V2 diff --git a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json index 343366a96294..64b595f15106 100644 --- a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json @@ -2095,7 +2095,7 @@ "members":{ "Key":{ "shape":"LoadBalancerAttributeKey", - "documentation":"

The name of the attribute.

The following attribute is supported by all load balancers:

  • deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false. The default is false.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

  • access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false. The default is false.

  • access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.

  • access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs.

The following attributes are supported by only Application Load Balancers:

  • idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.

  • routing.http.desync_mitigation_mode - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor, defensive, and strictest. The default is defensive.

  • routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). The default is false.

  • routing.http2.enabled - Indicates whether HTTP/2 is enabled. The value is true or false. The default is true. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.

  • waf.fail_open.enabled - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The value is true or false. The default is false.

The following attribute is supported by Network Load Balancers and Gateway Load Balancers:

  • load_balancing.cross_zone.enabled - Indicates whether cross-zone load balancing is enabled. The value is true or false. The default is false.

" + "documentation":"

The name of the attribute.

The following attribute is supported by all load balancers:

  • deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false. The default is false.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

  • access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false. The default is false.

  • access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.

  • access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs.

The following attributes are supported by only Application Load Balancers:

  • idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.

  • routing.http.desync_mitigation_mode - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor, defensive, and strictest. The default is defensive.

  • routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). The default is false.

  • routing.http.x_amzn_tls_version_and_cipher_suite.enabled - Indicates whether the two headers (x-amzn-tls-version and x-amzn-tls-cipher-suite), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The x-amzn-tls-version header has information about the TLS protocol version negotiated with the client, and the x-amzn-tls-cipher-suite header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are true and false. The default is false.

  • routing.http.xff_client_port.enabled - Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. The possible values are true and false. The default is false.

  • routing.http2.enabled - Indicates whether HTTP/2 is enabled. The possible values are true and false. The default is true. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.

  • waf.fail_open.enabled - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true and false. The default is false.

The following attribute is supported by Network Load Balancers and Gateway Load Balancers:

  • load_balancing.cross_zone.enabled - Indicates whether cross-zone load balancing is enabled. The possible values are true and false. The default is false.

" }, "Value":{ "shape":"LoadBalancerAttributeValue", diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml index 2f4a60ae477b..5d160fba99f7 100644 --- a/services/elasticsearch/pom.xml +++ b/services/elasticsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT elasticsearch AWS Java SDK :: Services :: Amazon Elasticsearch Service diff --git a/services/elastictranscoder/pom.xml b/services/elastictranscoder/pom.xml index cb5635b5fa88..197587df8239 100644 --- a/services/elastictranscoder/pom.xml +++ b/services/elastictranscoder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT elastictranscoder AWS Java SDK :: Services :: Amazon Elastic Transcoder diff --git a/services/emr/pom.xml b/services/emr/pom.xml index 9be4a8626985..b32380f69931 100644 --- a/services/emr/pom.xml +++ b/services/emr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT emr AWS Java SDK :: Services :: Amazon EMR diff --git a/services/emr/src/main/resources/codegen-resources/paginators-1.json b/services/emr/src/main/resources/codegen-resources/paginators-1.json index d5b5407b3464..5ea61f92522c 100644 --- a/services/emr/src/main/resources/codegen-resources/paginators-1.json +++ b/services/emr/src/main/resources/codegen-resources/paginators-1.json @@ -33,6 +33,11 @@ "output_token": "Marker", "result_key": "NotebookExecutions" }, + "ListReleaseLabels": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, "ListSecurityConfigurations": { "input_token": "Marker", "output_token": "Marker", diff --git a/services/emr/src/main/resources/codegen-resources/service-2.json b/services/emr/src/main/resources/codegen-resources/service-2.json index e905589f1ff3..640b4429dfa1 100644 --- a/services/emr/src/main/resources/codegen-resources/service-2.json +++ b/services/emr/src/main/resources/codegen-resources/service-2.json @@ -79,7 +79,7 @@ {"shape":"InternalServerError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee that a step will be canceled, even if the request is successfully submitted. You can only cancel steps that are in a PENDING state.

" + "documentation":"

Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee that a step will be canceled, even if the request is successfully submitted. When you use Amazon EMR versions 5.28.0 and later, you can cancel steps that are in a PENDING or RUNNING state. In earlier versions of Amazon EMR, you can only cancel steps that are in a PENDING state.

" }, "CreateSecurityConfiguration":{ "name":"CreateSecurityConfiguration", @@ -204,6 +204,20 @@ ], "documentation":"

Provides details of a notebook execution.

" }, + "DescribeReleaseLabel":{ + "name":"DescribeReleaseLabel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReleaseLabelInput"}, + "output":{"shape":"DescribeReleaseLabelOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Provides EMR release label details, such as releases available the region where the API request is run, and the available applications for a specific EMR release label. Can also list EMR release versions that support a specified version of Spark.

" + }, "DescribeSecurityConfiguration":{ "name":"DescribeSecurityConfiguration", "http":{ @@ -258,7 +272,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Returns the Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

" + "documentation":"

Returns the Amazon EMR block public access configuration for your account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

" }, "GetManagedScalingPolicy":{ "name":"GetManagedScalingPolicy", @@ -310,7 +324,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Provides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.

" + "documentation":"

Provides the status of all clusters visible to this account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters in unsorted order per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.

" }, "ListInstanceFleets":{ "name":"ListInstanceFleets", @@ -368,6 +382,20 @@ ], "documentation":"

Provides summaries of all notebook executions. You can filter the list based on multiple criteria such as status, time range, and editor id. Returns a maximum of 50 notebook executions and a marker to track the paging of a longer notebook execution list across multiple ListNotebookExecution calls.

" }, + "ListReleaseLabels":{ + "name":"ListReleaseLabels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListReleaseLabelsInput"}, + "output":{"shape":"ListReleaseLabelsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Retrieves release labels of EMR services in the region where the API is called.

" + }, "ListSecurityConfigurations":{ "name":"ListSecurityConfigurations", "http":{ @@ -394,7 +422,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request of filter by StepStates. You can specify a maximum of 10 stepIDs.

" + "documentation":"

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request or filter by StepStates. You can specify a maximum of 10 stepIDs. The CLI automatically paginates results to return a list greater than 50 steps. To return more than 50 steps using the CLI, specify a Marker, which is a pagination token that indicates the next set of steps to retrieve.

" }, "ListStudioSessionMappings":{ "name":"ListStudioSessionMappings", @@ -422,7 +450,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Returns a list of all Amazon EMR Studios associated with the AWS account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.

" + "documentation":"

Returns a list of all Amazon EMR Studios associated with the account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.

" }, "ModifyCluster":{ "name":"ModifyCluster", @@ -485,7 +513,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Creates or updates an Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

" + "documentation":"

Creates or updates an Amazon EMR block public access configuration for your account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

" }, "PutManagedScalingPolicy":{ "name":"PutManagedScalingPolicy", @@ -566,7 +594,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Sets the Cluster$VisibleToAllUsers value, which determines whether the cluster is visible to all IAM users of the AWS account associated with the cluster. Only the IAM user who created the cluster or the AWS account root user can call this action. The default value, true, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If set to false, only the IAM user that created the cluster can perform actions. This action works on running clusters. You can override the default true setting when you create a cluster by using the VisibleToAllUsers parameter with RunJobFlow.

" + "documentation":"

Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When true, IAM principals in the account can perform EMR cluster actions that their IAM policies allow. When false, only the IAM principal that created the cluster and the account root user can perform EMR actions on the cluster, regardless of IAM permissions policies attached to other IAM principals.

This action works on running clusters. When you create a cluster, use the RunJobFlowInput$VisibleToAllUsers parameter.

For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.

" }, "StartNotebookExecution":{ "name":"StartNotebookExecution", @@ -926,7 +954,7 @@ "documentation":"

The Amazon Resource Name that created or last modified the configuration.

" } }, - "documentation":"

Properties that describe the AWS principal that created the BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.

" + "documentation":"

Properties that describe the Amazon Web Services principal that created the BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.

" }, "Boolean":{"type":"boolean"}, "BooleanObject":{"type":"boolean"}, @@ -1104,7 +1132,7 @@ }, "LogEncryptionKmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

" + "documentation":"

The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

" }, "RequestedAmiVersion":{ "shape":"String", @@ -1128,7 +1156,7 @@ }, "VisibleToAllUsers":{ "shape":"Boolean", - "documentation":"

Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. The default value, true, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If this value is false, only the IAM user that created the cluster can perform actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of true when you create a cluster by using the VisibleToAllUsers parameter of the RunJobFlow action.

" + "documentation":"

Indicates whether the cluster is visible to IAM principals in the account associated with the cluster. When true, IAM principals in the account can perform EMR cluster actions on the cluster that their IAM policies allow. When false, only the IAM principal that created the cluster and the account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.

The default value is false if a value is not provided when creating a cluster using the EMR API RunJobFlow command or the CLI create-cluster command. The default value is true when a cluster is created using the Management Console. IAM principals that are allowed to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.

" }, "Applications":{ "shape":"ApplicationList", @@ -1140,7 +1168,7 @@ }, "ServiceRole":{ "shape":"String", - "documentation":"

The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.

" + "documentation":"

The IAM role that will be assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.

" }, "NormalizedInstanceHours":{ "shape":"Integer", @@ -1477,7 +1505,7 @@ }, "ServiceRole":{ "shape":"XmlString", - "documentation":"

The IAM role that will be assumed by the Amazon EMR Studio. The service role provides a way for Amazon EMR Studio to interoperate with other AWS services.

" + "documentation":"

The IAM role that will be assumed by the Amazon EMR Studio. The service role provides a way for Amazon EMR Studio to interoperate with other Amazon Web Services services.

" }, "UserRole":{ "shape":"XmlString", @@ -1528,11 +1556,11 @@ }, "IdentityId":{ "shape":"XmlStringMaxLen256", - "documentation":"

The globally unique identifier (GUID) of the user or group from the AWS SSO Identity Store. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + "documentation":"

The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity Store. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" }, "IdentityName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" }, "IdentityType":{ "shape":"IdentityType", @@ -1540,7 +1568,7 @@ }, "SessionPolicyArn":{ "shape":"XmlStringMaxLen256", - "documentation":"

The Amazon Resource Name (ARN) for the session policy that will be applied to the user or group. Session policies refine Studio user permissions without the need to use multiple IAM user roles.

" + "documentation":"

The Amazon Resource Name (ARN) for the session policy that will be applied to the user or group. You should specify the ARN for the session policy that you want to apply, not the ARN of your user role. For more information, see Create an EMR Studio User Role with Session Policies.

" } } }, @@ -1583,11 +1611,11 @@ }, "IdentityId":{ "shape":"XmlStringMaxLen256", - "documentation":"

The globally unique identifier (GUID) of the user or group to remove from the Amazon EMR Studio. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + "documentation":"

The globally unique identifier (GUID) of the user or group to remove from the Amazon EMR Studio. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" }, "IdentityName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the user name or group to remove from the Amazon EMR Studio. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + "documentation":"

The name of the user name or group to remove from the Amazon EMR Studio. For more information, see UserName and DisplayName in the Amazon Web Services SSO Store API Reference. Either IdentityName or IdentityId must be specified.

" }, "IdentityType":{ "shape":"IdentityType", @@ -1667,6 +1695,40 @@ } } }, + "DescribeReleaseLabelInput":{ + "type":"structure", + "members":{ + "ReleaseLabel":{ + "shape":"String", + "documentation":"

The target release label to be described.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The pagination token. Reserved for future use. Currently set to null.

" + }, + "MaxResults":{ + "shape":"MaxResultsNumber", + "documentation":"

Reserved for future use. Currently set to null.

" + } + } + }, + "DescribeReleaseLabelOutput":{ + "type":"structure", + "members":{ + "ReleaseLabel":{ + "shape":"String", + "documentation":"

The target release label described in the response.

" + }, + "Applications":{ + "shape":"SimplifiedApplicationList", + "documentation":"

The list of applications available for the target release label. Name is the name of the application. Version is the concise version of the application.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The pagination token. Reserved for future use. Currently set to null.

" + } + } + }, "DescribeSecurityConfigurationInput":{ "type":"structure", "required":["Name"], @@ -1933,7 +1995,7 @@ }, "BlockPublicAccessConfigurationMetadata":{ "shape":"BlockPublicAccessConfigurationMetadata", - "documentation":"

Properties that describe the AWS principal that created the BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.

" + "documentation":"

Properties that describe the Amazon Web Services principal that created the BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.

" } } }, @@ -1969,11 +2031,11 @@ }, "IdentityId":{ "shape":"XmlStringMaxLen256", - "documentation":"

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + "documentation":"

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" }, "IdentityName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the user or group to fetch. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + "documentation":"

The name of the user or group to fetch. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" }, "IdentityType":{ "shape":"IdentityType", @@ -2091,7 +2153,7 @@ }, "EbsVolumes":{ "shape":"EbsVolumeList", - "documentation":"

The list of EBS volumes that are attached to this instance.

" + "documentation":"

The list of Amazon EBS volumes that are attached to this instance.

" } }, "documentation":"

Represents an EC2 instance provisioned as part of cluster.

" @@ -2140,7 +2202,7 @@ }, "InstanceTypeSpecifications":{ "shape":"InstanceTypeSpecificationList", - "documentation":"

The specification for the instance types that comprise an instance fleet. Up to five unique instance specifications may be defined for each instance fleet.

" + "documentation":"

An array of specifications for the instance types that comprise an instance fleet.

" }, "LaunchSpecifications":{ "shape":"InstanceFleetProvisioningSpecifications", @@ -2342,7 +2404,7 @@ }, "Configurations":{ "shape":"ConfigurationList", - "documentation":"

Amazon EMR releases 4.x or later.

The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

" + "documentation":"

Amazon EMR releases 4.x or later.

The list of configurations supplied for an Amazon EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

" }, "ConfigurationsVersion":{ "shape":"Long", @@ -2761,14 +2823,14 @@ }, "EbsConfiguration":{ "shape":"EbsConfiguration", - "documentation":"

The configuration of Amazon Elastic Block Storage (Amazon EBS) attached to each instance as defined by InstanceType.

" + "documentation":"

The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by InstanceType.

" }, "Configurations":{ "shape":"ConfigurationList", "documentation":"

A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster.

" } }, - "documentation":"

An instance type configuration for each instance type in an instance fleet, which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. There can be a maximum of five instance type configurations in a fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

" + "documentation":"

An instance type configuration for each instance type in an instance fleet, which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. When you use an allocation strategy, you can include a maximum of 30 instance type configurations for a fleet. For more information about how to use an allocation strategy, see Configure Instance Fleets. Without an allocation strategy, you may specify a maximum of five instance type configurations for a fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

" }, "InstanceTypeConfigList":{ "type":"list", @@ -2799,7 +2861,7 @@ }, "EbsBlockDevices":{ "shape":"EbsBlockDeviceList", - "documentation":"

The configuration of Amazon Elastic Block Storage (Amazon EBS) attached to each instance as defined by InstanceType.

" + "documentation":"

The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by InstanceType.

" }, "EbsOptimized":{ "shape":"BooleanObject", @@ -2870,7 +2932,7 @@ }, "LogEncryptionKmsKeyId":{ "shape":"XmlString", - "documentation":"

The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

" + "documentation":"

The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

" }, "AmiVersion":{ "shape":"XmlStringMaxLen256", @@ -2898,7 +2960,7 @@ }, "VisibleToAllUsers":{ "shape":"Boolean", - "documentation":"

Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. The default value, true, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If this value is false, only the IAM user that created the cluster can perform actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of true when you create a cluster by using the VisibleToAllUsers parameter of the RunJobFlow action.

" + "documentation":"

Indicates whether the cluster is visible to IAM principals in the account associated with the cluster. When true, IAM principals in the account can perform EMR cluster actions that their IAM policies allow. When false, only the IAM principal that created the cluster and the account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.

The default value is false if a value is not provided when creating a cluster using the EMR API RunJobFlow command or the CLI create-cluster command. The default value is true when a cluster is created using the Management Console. IAM principals that are authorized to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.

" }, "JobFlowRole":{ "shape":"XmlString", @@ -2906,7 +2968,7 @@ }, "ServiceRole":{ "shape":"XmlString", - "documentation":"

The IAM role that is assumed by the Amazon EMR service to access AWS resources on your behalf.

" + "documentation":"

The IAM role that is assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.

" }, "AutoScalingRole":{ "shape":"XmlString", @@ -3008,7 +3070,7 @@ }, "KeepJobFlowAliveWhenNoSteps":{ "shape":"Boolean", - "documentation":"

Specifies whether the cluster should remain available after completing all steps.

" + "documentation":"

Specifies whether the cluster should remain available after completing all steps. Defaults to true. For more information about configuring cluster termination, see Control Cluster Termination in the EMR Management Guide.

" }, "TerminationProtected":{ "shape":"Boolean", @@ -3028,11 +3090,11 @@ }, "EmrManagedMasterSecurityGroup":{ "shape":"XmlStringMaxLen256", - "documentation":"

The identifier of the Amazon EC2 security group for the master node.

" + "documentation":"

The identifier of the Amazon EC2 security group for the master node. If you specify EmrManagedMasterSecurityGroup, you must also specify EmrManagedSlaveSecurityGroup.

" }, "EmrManagedSlaveSecurityGroup":{ "shape":"XmlStringMaxLen256", - "documentation":"

The identifier of the Amazon EC2 security group for the core and task nodes.

" + "documentation":"

The identifier of the Amazon EC2 security group for the core and task nodes. If you specify EmrManagedSlaveSecurityGroup, you must also specify EmrManagedMasterSecurityGroup.

" }, "ServiceAccessSecurityGroup":{ "shape":"XmlStringMaxLen256", @@ -3202,7 +3264,7 @@ }, "ClusterStates":{ "shape":"ClusterStateList", - "documentation":"

The cluster state filters to apply when listing clusters.

" + "documentation":"

The cluster state filters to apply when listing clusters. Clusters that change state while this action runs may be not be returned as expected in the list of clusters.

" }, "Marker":{ "shape":"Marker", @@ -3368,6 +3430,36 @@ } } }, + "ListReleaseLabelsInput":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"ReleaseLabelFilter", + "documentation":"

Filters the results of the request. Prefix specifies the prefix of release labels to return. Application specifies the application (with/without version) of release labels to return.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Specifies the next page of results. If NextToken is not specified, which is usually the case for the first request of ListReleaseLabels, the first page of results are determined by other filtering parameters or by the latest version. The ListReleaseLabels request fails if the identity (AWS AccountID) and all filtering parameters are different from the original request, or if the NextToken is expired or tampered with.

" + }, + "MaxResults":{ + "shape":"MaxResultsNumber", + "documentation":"

Defines the maximum number of release labels to return in a single response. The default is 100.

" + } + } + }, + "ListReleaseLabelsOutput":{ + "type":"structure", + "members":{ + "ReleaseLabels":{ + "shape":"StringList", + "documentation":"

The returned release labels.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Used to paginate the next page of results if specified in the next ListReleaseLabels request.

" + } + } + }, "ListSecurityConfigurationsInput":{ "type":"structure", "members":{ @@ -3408,7 +3500,7 @@ }, "Marker":{ "shape":"Marker", - "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + "documentation":"

The maximum number of steps that a single ListSteps action returns is 50. To return a longer list of steps, use multiple ListSteps actions along with the Marker parameter, which is a pagination token that indicates the next set of results to retrieve.

" } }, "documentation":"

This input determines which steps to list.

" @@ -3422,7 +3514,7 @@ }, "Marker":{ "shape":"Marker", - "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + "documentation":"

The maximum number of steps that a single ListSteps action returns is 50. To return a longer list of steps, use multiple ListSteps actions along with the Marker parameter, which is a pagination token that indicates the next set of results to retrieve.

" } }, "documentation":"

This output contains the list of steps returned in reverse order. This means that the last step is the first element in the list.

" @@ -3498,6 +3590,11 @@ "SPOT" ] }, + "MaxResultsNumber":{ + "type":"integer", + "max":100, + "min":1 + }, "MetricDimension":{ "type":"structure", "members":{ @@ -3526,7 +3623,7 @@ }, "StepConcurrencyLevel":{ "shape":"Integer", - "documentation":"

The number of steps that can be executed concurrently. You can specify a minimum of 1 step and a maximum of 256 steps.

" + "documentation":"

The number of steps that can be executed concurrently. You can specify a minimum of 1 step and a maximum of 256 steps. We recommend that you do not change this parameter while steps are running or the ActionOnFailure setting may not behave as expected. For more information see Step$ActionOnFailure.

" } } }, @@ -3552,7 +3649,7 @@ }, "InstanceFleet":{ "shape":"InstanceFleetModifyConfig", - "documentation":"

The unique identifier of the instance fleet.

" + "documentation":"

The configuration parameters of the instance fleet.

" } } }, @@ -3679,7 +3776,7 @@ "documentation":"

The timestamp when notebook execution started.

" } }, - "documentation":"

" + "documentation":"

Details for a notebook execution. The details include information such as the unique ID and status of the notebook execution.

" }, "NotebookExecutionSummaryList":{ "type":"list", @@ -3690,7 +3787,7 @@ "members":{ "UsageStrategy":{ "shape":"OnDemandCapacityReservationUsageStrategy", - "documentation":"

Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.

If you specify use-capacity-reservations-first, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price).

If you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy.

" + "documentation":"

Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.

If you specify use-capacity-reservations-first, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price).

If you do not specify a value, the fleet fulfills the On-Demand capacity according to the chosen On-Demand allocation strategy.

" }, "CapacityReservationPreference":{ "shape":"OnDemandCapacityReservationPreference", @@ -3884,6 +3981,20 @@ "members":{ } }, + "ReleaseLabelFilter":{ + "type":"structure", + "members":{ + "Prefix":{ + "shape":"String", + "documentation":"

Optional release label version prefix filter. For example, emr-5.

" + }, + "Application":{ + "shape":"String", + "documentation":"

Optional release label application filter. For example, spark@2.1.0.

" + } + }, + "documentation":"

The release label filters by application or version prefix.

" + }, "RemoveAutoScalingPolicyInput":{ "type":"structure", "required":[ @@ -3970,7 +4081,7 @@ }, "LogEncryptionKmsKeyId":{ "shape":"XmlString", - "documentation":"

The AWS KMS customer master key (CMK) used for encrypting log files. If a value is not provided, the logs remain encrypted by AES-256. This attribute is only available with Amazon EMR version 5.30.0 and later, excluding Amazon EMR 6.0.0.

" + "documentation":"

The KMS key used for encrypting log files. If a value is not provided, the logs remain encrypted by AES-256. This attribute is only available with Amazon EMR version 5.30.0 and later, excluding Amazon EMR 6.0.0.

" }, "AdditionalInfo":{ "shape":"XmlString", @@ -4014,7 +4125,7 @@ }, "VisibleToAllUsers":{ "shape":"Boolean", - "documentation":"

A value of true indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. This is the default. A value of false indicates that only the IAM user who created the cluster can perform actions.

" + "documentation":"

Set this value to true so that IAM principals in the account associated with the cluster can perform EMR actions on the cluster that their IAM policies allow. This value defaults to false for clusters created using the EMR API or the CLI create-cluster command.

When set to false, only the IAM principal that created the cluster and the account root user can perform EMR actions for the cluster, regardless of the IAM permissions policies attached to other IAM principals. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.

" }, "JobFlowRole":{ "shape":"XmlString", @@ -4022,7 +4133,7 @@ }, "ServiceRole":{ "shape":"XmlString", - "documentation":"

The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.

" + "documentation":"

The IAM role that will be assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.

" }, "Tags":{ "shape":"TagList", @@ -4173,7 +4284,7 @@ "members":{ "Path":{ "shape":"XmlString", - "documentation":"

Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system.

" + "documentation":"

Location in Amazon S3 of the script to run during a bootstrap action.

" }, "Args":{ "shape":"XmlStringList", @@ -4217,7 +4328,7 @@ }, "IdentityName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

" + "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference.

" }, "IdentityType":{ "shape":"IdentityType", @@ -4247,11 +4358,11 @@ }, "IdentityId":{ "shape":"XmlStringMaxLen256", - "documentation":"

The globally unique identifier (GUID) of the user or group from the AWS SSO Identity Store.

" + "documentation":"

The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity Store.

" }, "IdentityName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

" + "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference.

" }, "IdentityType":{ "shape":"IdentityType", @@ -4303,7 +4414,7 @@ }, "VisibleToAllUsers":{ "shape":"Boolean", - "documentation":"

A value of true indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. This is the default. A value of false indicates that only the IAM user who created the cluster can perform actions.

" + "documentation":"

A value of true indicates that an IAM principal in the account can perform EMR actions on the cluster that the IAM policies attached to the principal allow. A value of false indicates that only the IAM principal that created the cluster and the Amazon Web Services root user can perform EMR actions on the cluster.

" } }, "documentation":"

The input to the SetVisibleToAllUsers action.

" @@ -4341,6 +4452,24 @@ }, "documentation":"

An automatic scaling configuration, which describes how the policy adds or removes instances, the cooldown period, and the number of EC2 instances that will be added each time the CloudWatch metric alarm condition is satisfied.

" }, + "SimplifiedApplication":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

The returned release label application name. For example, hadoop.

" + }, + "Version":{ + "shape":"String", + "documentation":"

The returned release label application version. For example, 3.2.1.

" + } + }, + "documentation":"

The returned release label application names or versions.

" + }, + "SimplifiedApplicationList":{ + "type":"list", + "member":{"shape":"SimplifiedApplication"} + }, "SpotProvisioningAllocationStrategy":{ "type":"string", "enum":["capacity-optimized"] @@ -4457,7 +4586,7 @@ }, "ActionOnFailure":{ "shape":"ActionOnFailure", - "documentation":"

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

" + "documentation":"

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

If a cluster's StepConcurrencyLevel is greater than 1, do not use AddJobFlowSteps to submit a step with this parameter set to CANCEL_AND_WAIT or TERMINATE_CLUSTER. The step is not submitted and the action fails with a message that the ActionOnFailure setting is not valid.

If you change a cluster's StepConcurrencyLevel to be greater than 1 while a step is running, the ActionOnFailure parameter may not behave as you expect. In this case, for a step that fails with this parameter set to CANCEL_AND_WAIT, pending steps and the running step are not canceled; for a step that fails with this parameter set to TERMINATE_CLUSTER, the cluster does not terminate.

" }, "Status":{ "shape":"StepStatus", @@ -4486,14 +4615,14 @@ }, "ActionOnFailure":{ "shape":"ActionOnFailure", - "documentation":"

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

" + "documentation":"

The action to take when the step fails. Use one of the following values:

  • TERMINATE_CLUSTER - Shuts down the cluster.

  • CANCEL_AND_WAIT - Cancels any pending steps and returns the cluster to the WAITING state.

  • CONTINUE - Continues to the next step in the queue.

  • TERMINATE_JOB_FLOW - Shuts down the cluster. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

If a cluster's StepConcurrencyLevel is greater than 1, do not use AddJobFlowSteps to submit a step with this parameter set to CANCEL_AND_WAIT or TERMINATE_CLUSTER. The step is not submitted and the action fails with a message that the ActionOnFailure setting is not valid.

If you change a cluster's StepConcurrencyLevel to be greater than 1 while a step is running, the ActionOnFailure parameter may not behave as you expect. In this case, for a step that fails with this parameter set to CANCEL_AND_WAIT, pending steps and the running step are not canceled; for a step that fails with this parameter set to TERMINATE_CLUSTER, the cluster does not terminate.

" }, "HadoopJarStep":{ "shape":"HadoopJarStepConfig", "documentation":"

The JAR file used for the step.

" } }, - "documentation":"

Specification of a cluster (job flow) step.

" + "documentation":"

Specification for a cluster (job flow) step.

" }, "StepConfigList":{ "type":"list", @@ -4641,7 +4770,7 @@ }, "ActionOnFailure":{ "shape":"ActionOnFailure", - "documentation":"

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

" + "documentation":"

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility.

" }, "Status":{ "shape":"StepStatus", @@ -4915,11 +5044,11 @@ }, "IdentityId":{ "shape":"XmlStringMaxLen256", - "documentation":"

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + "documentation":"

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" }, "IdentityName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the user or group to update. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + "documentation":"

The name of the user or group to update. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" }, "IdentityType":{ "shape":"IdentityType", @@ -4978,5 +5107,5 @@ "member":{"shape":"XmlStringMaxLen256"} } }, - "documentation":"

Amazon EMR is a web service that makes it easier to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several AWS services to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehouse management.

" + "documentation":"

Amazon EMR is a web service that makes it easier to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several Amazon Web Services services to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehouse management.

" } diff --git a/services/emrcontainers/pom.xml b/services/emrcontainers/pom.xml index 08f73511a24a..1078fbfa380b 100644 --- a/services/emrcontainers/pom.xml +++ b/services/emrcontainers/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT emrcontainers AWS Java SDK :: Services :: EMR Containers diff --git a/services/emrcontainers/src/main/resources/codegen-resources/service-2.json b/services/emrcontainers/src/main/resources/codegen-resources/service-2.json index 0db831a4b436..5929dab8e4e8 100644 --- a/services/emrcontainers/src/main/resources/codegen-resources/service-2.json +++ b/services/emrcontainers/src/main/resources/codegen-resources/service-2.json @@ -294,6 +294,12 @@ }, "documentation":"

A configuration for CloudWatch monitoring. You can configure your jobs to send log information to CloudWatch Logs.

" }, + "ClusterId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[0-9A-Za-z][A-Za-z0-9\\-_]*" + }, "Configuration":{ "type":"structure", "required":["classification"], @@ -355,7 +361,7 @@ "documentation":"

The type of the container provider. EKS is the only supported type as of now.

" }, "id":{ - "shape":"String256", + "shape":"ClusterId", "documentation":"

The ID of the container cluster.

" }, "info":{ @@ -631,7 +637,7 @@ "type":"structure", "members":{ "namespace":{ - "shape":"String256", + "shape":"KubernetesNamespace", "documentation":"

The namespaces of the EKS cluster.

" } }, @@ -696,6 +702,14 @@ "shape":"SubnetIds", "documentation":"

The subnet IDs of the endpoint.

" }, + "stateDetails":{ + "shape":"String256", + "documentation":"

Additional details of the endpoint state.

" + }, + "failureReason":{ + "shape":"FailureReason", + "documentation":"

The reasons why the endpoint has failed.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The tags of the endpoint.

" @@ -889,6 +903,12 @@ "type":"list", "member":{"shape":"JobRun"} }, + "KubernetesNamespace":{ + "type":"string", + "max":63, + "min":1, + "pattern":"[a-z0-9]([-a-z0-9]*[a-z0-9])?" + }, "ListJobRunsRequest":{ "type":"structure", "required":["virtualClusterId"], @@ -1207,7 +1227,7 @@ }, "SparkSubmitParameters":{ "type":"string", - "max":1024, + "max":102400, "min":1, "pattern":"(?!\\s*$)(^[^';|\\u0026\\u003C\\u003E*?`$(){}\\[\\]!#\\\\]*$)", "sensitive":true diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index 2a973747c203..bb263eeab7c4 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT eventbridge AWS Java SDK :: Services :: EventBridge diff --git a/services/eventbridge/src/main/resources/codegen-resources/service-2.json b/services/eventbridge/src/main/resources/codegen-resources/service-2.json index 92e5f65a0719..955488918e0a 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/service-2.json +++ b/services/eventbridge/src/main/resources/codegen-resources/service-2.json @@ -127,7 +127,7 @@ {"shape":"LimitExceededException"}, {"shape":"OperationDisabledException"} ], - "documentation":"

Called by an SaaS partner to create a partner event source. This operation is not used by AWS customers.

Each partner event source can be used by one AWS account to create a matching partner event bus in that AWS account. A SaaS partner must create one partner event source for each AWS account that wants to receive those event types.

A partner event source creates events based on resources within the SaaS partner's service or application.

An AWS account that creates a partner event bus that matches the partner event source can use that event bus to receive events from the partner, and then process them using AWS Events rules and targets.

Partner event source names follow this format:

partner_name/event_namespace/event_name

partner_name is determined during partner registration and identifies the partner to AWS customers. event_namespace is determined by the partner and is a way for the partner to categorize their events. event_name is determined by the partner, and should uniquely identify an event-generating resource within the partner system. The combination of event_namespace and event_name should help AWS customers decide whether to create an event bus to receive these events.

" + "documentation":"

Called by an SaaS partner to create a partner event source. This operation is not used by Amazon Web Services customers.

Each partner event source can be used by one Amazon Web Services account to create a matching partner event bus in that Amazon Web Services account. A SaaS partner must create one partner event source for each Amazon Web Services account that wants to receive those event types.

A partner event source creates events based on resources within the SaaS partner's service or application.

An Amazon Web Services account that creates a partner event bus that matches the partner event source can use that event bus to receive events from the partner, and then process them using Amazon Web Services Events rules and targets.

Partner event source names follow this format:

partner_name/event_namespace/event_name

partner_name is determined during partner registration and identifies the partner to Amazon Web Services customers. event_namespace is determined by the partner and is a way for the partner to categorize their events. event_name is determined by the partner, and should uniquely identify an event-generating resource within the partner system. The combination of event_namespace and event_name should help Amazon Web Services customers decide whether to create an event bus to receive these events.

" }, "DeactivateEventSource":{ "name":"DeactivateEventSource", @@ -143,7 +143,7 @@ {"shape":"InternalException"}, {"shape":"OperationDisabledException"} ], - "documentation":"

You can use this operation to temporarily stop receiving events from the specified partner event source. The matching event bus is not deleted.

When you deactivate a partner event source, the source goes into PENDING state. If it remains in PENDING state for more than two weeks, it is deleted.

To activate a deactivated partner event source, use ActivateEventSource.

" + "documentation":"

You can use this operation to temporarily stop receiving events from the specified partner event source. The matching event bus is not deleted.

When you deactivate a partner event source, the source goes into PENDING state. If it remains in PENDING state for more than two weeks, it is deleted.

To activate a deactivated partner event source, use ActivateEventSource.

" }, "DeauthorizeConnection":{ "name":"DeauthorizeConnection", @@ -230,7 +230,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"OperationDisabledException"} ], - "documentation":"

This operation is used by SaaS partners to delete a partner event source. This operation is not used by AWS customers.

When you delete an event source, the status of the corresponding partner event bus in the AWS customer account becomes DELETED.

" + "documentation":"

This operation is used by SaaS partners to delete a partner event source. This operation is not used by Amazon Web Services customers.

When you delete an event source, the status of the corresponding partner event bus in the Amazon Web Services customer account becomes DELETED.

" }, "DeleteRule":{ "name":"DeleteRule", @@ -245,7 +245,7 @@ {"shape":"InternalException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes the specified rule.

Before you can delete the rule, you must remove all targets, using RemoveTargets.

When you delete a rule, incoming events might continue to match to the deleted rule. Allow a short period of time for changes to take effect.

If you call delete rule multiple times for the same rule, all calls will succeed. When you call delete rule for a non-existent custom eventbus, ResourceNotFoundException is returned.

Managed rules are rules created and managed by another AWS service on your behalf. These rules are created by those other AWS services to support functionality in those services. You can delete these rules using the Force option, but you should do so only if you are sure the other service is not still using that rule.

" + "documentation":"

Deletes the specified rule.

Before you can delete the rule, you must remove all targets, using RemoveTargets.

When you delete a rule, incoming events might continue to match to the deleted rule. Allow a short period of time for changes to take effect.

If you call delete rule multiple times for the same rule, all calls will succeed. When you call delete rule for a non-existent custom eventbus, ResourceNotFoundException is returned.

Managed rules are rules created and managed by another Amazon Web Services service on your behalf. These rules are created by those other Amazon Web Services services to support functionality in those services. You can delete these rules using the Force option, but you should do so only if you are sure the other service is not still using that rule.

" }, "DescribeApiDestination":{ "name":"DescribeApiDestination", @@ -302,7 +302,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalException"} ], - "documentation":"

Displays details about an event bus in your account. This can include the external AWS accounts that are permitted to write events to your default event bus, and the associated policy. For custom event buses and partner event buses, it displays the name, ARN, policy, state, and creation time.

To enable your account to receive events from other accounts on its default event bus, use PutPermission.

For more information about partner event buses, see CreateEventBus.

" + "documentation":"

Displays details about an event bus in your account. This can include the external Amazon Web Services accounts that are permitted to write events to your default event bus, and the associated policy. For custom event buses and partner event buses, it displays the name, ARN, policy, state, and creation time.

To enable your account to receive events from other accounts on its default event bus, use PutPermission.

For more information about partner event buses, see CreateEventBus.

" }, "DescribeEventSource":{ "name":"DescribeEventSource", @@ -332,7 +332,7 @@ {"shape":"InternalException"}, {"shape":"OperationDisabledException"} ], - "documentation":"

An SaaS partner can use this operation to list details about a partner event source that they have created. AWS customers do not use this operation. Instead, AWS customers can use DescribeEventSource to see details about a partner event source that is shared with them.

" + "documentation":"

An SaaS partner can use this operation to list details about a partner event source that they have created. Amazon Web Services customers do not use this operation. Instead, Amazon Web Services customers can use DescribeEventSource to see details about a partner event source that is shared with them.

" }, "DescribeReplay":{ "name":"DescribeReplay", @@ -360,7 +360,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalException"} ], - "documentation":"

Describes the specified rule.

DescribeRule does not list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

" + "documentation":"

Describes the specified rule.

DescribeRule does not list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

" }, "DisableRule":{ "name":"DisableRule", @@ -457,7 +457,7 @@ {"shape":"InternalException"}, {"shape":"OperationDisabledException"} ], - "documentation":"

You can use this to see all the partner event sources that have been shared with your AWS account. For more information about partner event sources, see CreateEventBus.

" + "documentation":"

You can use this to see all the partner event sources that have been shared with your Amazon Web Services account. For more information about partner event sources, see CreateEventBus.

" }, "ListPartnerEventSourceAccounts":{ "name":"ListPartnerEventSourceAccounts", @@ -472,7 +472,7 @@ {"shape":"InternalException"}, {"shape":"OperationDisabledException"} ], - "documentation":"

An SaaS partner can use this operation to display the AWS account ID that a particular partner event source name is associated with. This operation is not used by AWS customers.

" + "documentation":"

An SaaS partner can use this operation to display the Amazon Web Services account ID that a particular partner event source name is associated with. This operation is not used by Amazon Web Services customers.

" }, "ListPartnerEventSources":{ "name":"ListPartnerEventSources", @@ -486,7 +486,7 @@ {"shape":"InternalException"}, {"shape":"OperationDisabledException"} ], - "documentation":"

An SaaS partner can use this operation to list all the partner event source names that they have created. This operation is not used by AWS customers.

" + "documentation":"

An SaaS partner can use this operation to list all the partner event source names that they have created. This operation is not used by Amazon Web Services customers.

" }, "ListReplays":{ "name":"ListReplays", @@ -527,7 +527,7 @@ {"shape":"InternalException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists your Amazon EventBridge rules. You can either list all the rules or you can provide a prefix to match to the rule names.

ListRules does not list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

" + "documentation":"

Lists your Amazon EventBridge rules. You can either list all the rules or you can provide a prefix to match to the rule names.

ListRules does not list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -582,7 +582,7 @@ {"shape":"InternalException"}, {"shape":"OperationDisabledException"} ], - "documentation":"

This is used by SaaS partners to write events to a customer's partner event bus. AWS customers do not use this operation.

" + "documentation":"

This is used by SaaS partners to write events to a customer's partner event bus. Amazon Web Services customers do not use this operation.

" }, "PutPermission":{ "name":"PutPermission", @@ -598,7 +598,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"OperationDisabledException"} ], - "documentation":"

Running PutPermission permits the specified AWS account or AWS organization to put events to the specified event bus. Amazon EventBridge (CloudWatch Events) rules in your account are triggered by these events arriving to an event bus in your account.

For another account to send events to your account, that external account must have an EventBridge rule with your account's event bus as a target.

To enable multiple AWS accounts to put events to your event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission once specifying Principal as \"*\" and specifying the AWS organization ID in Condition, to grant permissions to all accounts in that organization.

If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

The permission policy on the default event bus cannot exceed 10 KB in size.

" + "documentation":"

Running PutPermission permits the specified Amazon Web Services account or Amazon Web Services organization to put events to the specified event bus. Amazon EventBridge (CloudWatch Events) rules in your account are triggered by these events arriving to an event bus in your account.

For another account to send events to your account, that external account must have an EventBridge rule with your account's event bus as a target.

To enable multiple Amazon Web Services accounts to put events to your event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same Amazon Web Services organization, you can run PutPermission once specifying Principal as \"*\" and specifying the Amazon Web Services organization ID in Condition, to grant permissions to all accounts in that organization.

If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide.

The permission policy on the event bus cannot exceed 10 KB in size.

" }, "PutRule":{ "name":"PutRule", @@ -616,7 +616,7 @@ {"shape":"InternalException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Creates or updates the specified rule. Rules are enabled by default, or based on value of the state. You can disable a rule using DisableRule.

A single rule watches for events from a single event bus. Events generated by AWS services go to your account's default event bus. Events generated by SaaS partner services or applications go to the matching partner event bus. If you have custom applications or services, you can specify whether their events go to your default event bus or a custom event bus that you have created. For more information, see CreateEventBus.

If you are updating an existing rule, the rule is replaced with what you specify in this PutRule command. If you omit arguments in PutRule, the old values for those arguments are not kept. Instead, they are replaced with null values.

When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Allow a short period of time for changes to take effect.

A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule.

When you initially create a rule, you can optionally assign one or more tags to the rule. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only rules with certain tag values. To use the PutRule operation and assign tags, you must have both the events:PutRule and events:TagResource permissions.

If you are updating an existing rule, any tags you specify in the PutRule operation are ignored. To update the tags of an existing rule, use TagResource and UntagResource.

Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

In EventBridge, it is possible to create rules that lead to infinite loops, where a rule is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket, and trigger software to change them to the desired state. If the rule is not written carefully, the subsequent change to the ACLs fires the rule again, creating an infinite loop.

To prevent this, write the rules so that the triggered actions do not re-fire the same rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead of after any change.

An infinite loop can quickly cause higher than expected charges. We recommend that you use budgeting, which alerts you when charges exceed your specified limit. For more information, see Managing Your Costs with Budgets.

" + "documentation":"

Creates or updates the specified rule. Rules are enabled by default, or based on value of the state. You can disable a rule using DisableRule.

A single rule watches for events from a single event bus. Events generated by Amazon Web Services services go to your account's default event bus. Events generated by SaaS partner services or applications go to the matching partner event bus. If you have custom applications or services, you can specify whether their events go to your default event bus or a custom event bus that you have created. For more information, see CreateEventBus.

If you are updating an existing rule, the rule is replaced with what you specify in this PutRule command. If you omit arguments in PutRule, the old values for those arguments are not kept. Instead, they are replaced with null values.

When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Allow a short period of time for changes to take effect.

A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule.

When you initially create a rule, you can optionally assign one or more tags to the rule. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only rules with certain tag values. To use the PutRule operation and assign tags, you must have both the events:PutRule and events:TagResource permissions.

If you are updating an existing rule, any tags you specify in the PutRule operation are ignored. To update the tags of an existing rule, use TagResource and UntagResource.

Most services in Amazon Web Services treat : or / as the same character in Amazon Resource Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

In EventBridge, it is possible to create rules that lead to infinite loops, where a rule is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket, and trigger software to change them to the desired state. If the rule is not written carefully, the subsequent change to the ACLs fires the rule again, creating an infinite loop.

To prevent this, write the rules so that the triggered actions do not re-fire the same rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead of after any change.

An infinite loop can quickly cause higher than expected charges. We recommend that you use budgeting, which alerts you when charges exceed your specified limit. For more information, see Managing Your Costs with Budgets.

" }, "PutTargets":{ "name":"PutTargets", @@ -633,7 +633,7 @@ {"shape":"ManagedRuleException"}, {"shape":"InternalException"} ], - "documentation":"

Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets for Events:

  • API destination

  • Amazon API Gateway REST API endpoints

  • API Gateway

  • AWS Batch job queue

  • CloudWatch Logs group

  • CodeBuild project

  • CodePineline

  • Amazon EC2 CreateSnapshot API call

  • Amazon EC2 RebootInstances API call

  • Amazon EC2 StopInstances API call

  • Amazon EC2 TerminateInstances API call

  • Amazon ECS tasks

  • Event bus in a different AWS account or Region.

    You can use an event bus in the US East (N. Virginia) us-east-1, US West (Oregon) us-west-2, or Europe (Ireland) eu-west-1 Regions as a target for a rule.

  • Firehose delivery stream (Kinesis Data Firehose)

  • Inspector assessment template (Amazon Inspector)

  • Kinesis stream (Kinesis Data Stream)

  • AWS Lambda function

  • Redshift clusters (Data API statement execution)

  • Amazon SNS topic

  • Amazon SQS queues (includes FIFO queues

  • SSM Automation

  • SSM OpsItem

  • SSM Run Command

  • Step Functions state machines

Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon EventBridge (CloudWatch Events) needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, AWS Step Functions state machines and API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.

If another AWS account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge (CloudWatch Events) Pricing.

Input, InputPath, and InputTransformer are not available with PutTarget if the target is an event bus of a different AWS account.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

  • If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

  • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

  • If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

  • If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" + "documentation":"

Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets for Events:

  • API destination

  • Amazon API Gateway REST API endpoints

  • API Gateway

  • Batch job queue

  • CloudWatch Logs group

  • CodeBuild project

  • CodePipeline

  • Amazon EC2 CreateSnapshot API call

  • EC2 Image Builder

  • Amazon EC2 RebootInstances API call

  • Amazon EC2 StopInstances API call

  • Amazon EC2 TerminateInstances API call

  • Amazon ECS tasks

  • Event bus in a different Amazon Web Services account or Region.

    You can use an event bus in the US East (N. Virginia) us-east-1, US West (Oregon) us-west-2, or Europe (Ireland) eu-west-1 Regions as a target for a rule.

  • Firehose delivery stream (Kinesis Data Firehose)

  • Inspector assessment template (Amazon Inspector)

  • Kinesis stream (Kinesis Data Stream)

  • Lambda function

  • Redshift clusters (Data API statement execution)

  • Amazon SNS topic

  • Amazon SQS queues (includes FIFO queues

  • SSM Automation

  • SSM OpsItem

  • SSM Run Command

  • Step Functions state machines

Creating rules with built-in targets is supported only in the Amazon Web Services Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon EventBridge needs the appropriate permissions. For Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis Data Streams, Step Functions state machines and API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.

If another Amazon Web Services account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge Pricing.

Input, InputPath, and InputTransformer are not available with PutTarget if the target is an event bus of a different Amazon Web Services account.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

  • If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

  • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

  • If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

  • If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" }, "RemovePermission":{ "name":"RemovePermission", @@ -648,7 +648,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"OperationDisabledException"} ], - "documentation":"

Revokes the permission of another AWS account to be able to put events to the specified event bus. Specify the account to revoke by the StatementId value that you associated with the account when you granted it permission with PutPermission. You can find the StatementId by using DescribeEventBus.

" + "documentation":"

Revokes the permission of another Amazon Web Services account to be able to put events to the specified event bus. Specify the account to revoke by the StatementId value that you associated with the account when you granted it permission with PutPermission. You can find the StatementId by using DescribeEventBus.

" }, "RemoveTargets":{ "name":"RemoveTargets", @@ -697,7 +697,7 @@ {"shape":"InternalException"}, {"shape":"ManagedRuleException"} ], - "documentation":"

Assigns one or more tags (key-value pairs) to the specified EventBridge resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In EventBridge, rules and event buses can be tagged.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with a resource that already has tags. If you specify a new tag key, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a resource.

" + "documentation":"

Assigns one or more tags (key-value pairs) to the specified EventBridge resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In EventBridge, rules and event buses can be tagged.

Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

You can use the TagResource action with a resource that already has tags. If you specify a new tag key, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a resource.

" }, "TestEventPattern":{ "name":"TestEventPattern", @@ -711,7 +711,7 @@ {"shape":"InvalidEventPatternException"}, {"shape":"InternalException"} ], - "documentation":"

Tests whether the specified event pattern matches the provided event.

Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

" + "documentation":"

Tests whether the specified event pattern matches the provided event.

Most services in Amazon Web Services treat : or / as the same character in Amazon Resource Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

" }, "UntagResource":{ "name":"UntagResource", @@ -1008,7 +1008,7 @@ "documentation":"

The size of the array, if this is an array batch job. Valid values are integers between 2 and 10,000.

" } }, - "documentation":"

The array properties for the submitted job, such as the size of the array. The array size can be between 2 and 10,000. If you specify array properties for a job, it becomes an array job. This parameter is used only if the target is an AWS Batch job.

" + "documentation":"

The array properties for the submitted job, such as the size of the array. The array size can be between 2 and 10,000. If you specify array properties for a job, it becomes an array job. This parameter is used only if the target is an Batch job.

" }, "BatchParameters":{ "type":"structure", @@ -1019,22 +1019,22 @@ "members":{ "JobDefinition":{ "shape":"String", - "documentation":"

The ARN or name of the job definition to use if the event target is an AWS Batch job. This job definition must already exist.

" + "documentation":"

The ARN or name of the job definition to use if the event target is an Batch job. This job definition must already exist.

" }, "JobName":{ "shape":"String", - "documentation":"

The name to use for this execution of the job, if the target is an AWS Batch job.

" + "documentation":"

The name to use for this execution of the job, if the target is an Batch job.

" }, "ArrayProperties":{ "shape":"BatchArrayProperties", - "documentation":"

The array properties for the submitted job, such as the size of the array. The array size can be between 2 and 10,000. If you specify array properties for a job, it becomes an array job. This parameter is used only if the target is an AWS Batch job.

" + "documentation":"

The array properties for the submitted job, such as the size of the array. The array size can be between 2 and 10,000. If you specify array properties for a job, it becomes an array job. This parameter is used only if the target is an Batch job.

" }, "RetryStrategy":{ "shape":"BatchRetryStrategy", - "documentation":"

The retry strategy to use for failed jobs, if the target is an AWS Batch job. The retry strategy is the number of times to retry the failed job execution. Valid values are 1–10. When you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

" + "documentation":"

The retry strategy to use for failed jobs, if the target is an Batch job. The retry strategy is the number of times to retry the failed job execution. Valid values are 1–10. When you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

" } }, - "documentation":"

The custom parameters to be used when the target is an AWS Batch job.

" + "documentation":"

The custom parameters to be used when the target is an Batch job.

" }, "BatchRetryStrategy":{ "type":"structure", @@ -1044,7 +1044,7 @@ "documentation":"

The number of times to attempt to retry, if the job fails. Valid values are 1–10.

" } }, - "documentation":"

The retry strategy to use for failed jobs, if the target is an AWS Batch job. If you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

" + "documentation":"

The retry strategy to use for failed jobs, if the target is an Batch job. If you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

" }, "Boolean":{"type":"boolean"}, "CancelReplayRequest":{ @@ -1141,7 +1141,7 @@ "documentation":"

Specifies the value for the key. Currently, this must be the ID of the organization.

" } }, - "documentation":"

A JSON string which you can use to limit the event bus permissions you are granting to only accounts that fulfill the condition. Currently, the only supported condition is membership in a certain AWS organization. The string must contain Type, Key, and Value fields. The Value field specifies the ID of the AWS organization. Following is an example value for Condition:

'{\"Type\" : \"StringEquals\", \"Key\": \"aws:PrincipalOrgID\", \"Value\": \"o-1234567890\"}'

" + "documentation":"

A JSON string which you can use to limit the event bus permissions you are granting to only accounts that fulfill the condition. Currently, the only supported condition is membership in a certain Amazon Web Services organization. The string must contain Type, Key, and Value fields. The Value field specifies the ID of the Amazon Web Services organization. Following is an example value for Condition:

'{\"Type\" : \"StringEquals\", \"Key\": \"aws:PrincipalOrgID\", \"Value\": \"o-1234567890\"}'

" }, "Connection":{ "type":"structure", @@ -1468,7 +1468,7 @@ }, "EventSourceArn":{ "shape":"Arn", - "documentation":"

The ARN of the event source associated with the archive.

" + "documentation":"

The ARN of the event bus that sends events to the archive.

" }, "Description":{ "shape":"ArchiveDescription", @@ -1691,11 +1691,11 @@ "members":{ "Name":{ "shape":"EventSourceName", - "documentation":"

The name of the partner event source. This name must be unique and must be in the format partner_name/event_namespace/event_name . The AWS account that wants to use this partner event source must create a partner event bus with a name that matches the name of the partner event source.

" + "documentation":"

The name of the partner event source. This name must be unique and must be in the format partner_name/event_namespace/event_name . The Amazon Web Services account that wants to use this partner event source must create a partner event bus with a name that matches the name of the partner event source.

" }, "Account":{ "shape":"AccountId", - "documentation":"

The AWS account ID that is permitted to create a matching partner event bus for this partner event source.

" + "documentation":"

The Amazon Web Services account ID that is permitted to create a matching partner event bus for this partner event source.

" } } }, @@ -1866,7 +1866,7 @@ }, "Account":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the AWS customer that the event source was created for.

" + "documentation":"

The Amazon Web Services account ID of the Amazon Web Services customer that the event source was created for.

" } } }, @@ -1884,7 +1884,7 @@ }, "Force":{ "shape":"Boolean", - "documentation":"

If this is a managed rule, created by an AWS service on your behalf, you must specify Force as True to delete the rule. This parameter is ignored for rules that are not managed rules. You can check whether a rule is a managed rule by using DescribeRule or ListRules and checking the ManagedBy field of the response.

" + "documentation":"

If this is a managed rule, created by an Amazon Web Services service on your behalf, you must specify Force as True to delete the rule. This parameter is ignored for rules that are not managed rules. You can check whether a rule is a managed rule by using DescribeRule or ListRules and checking the ManagedBy field of the response.

" } } }, @@ -2259,7 +2259,7 @@ }, "ManagedBy":{ "shape":"ManagedBy", - "documentation":"

If this is a managed rule, created by an AWS service on your behalf, this field displays the principal name of the AWS service that created the rule.

" + "documentation":"

If this is a managed rule, created by an Amazon Web Services service on your behalf, this field displays the principal name of the Amazon Web Services service that created the rule.

" }, "EventBusName":{ "shape":"EventBusName", @@ -2299,15 +2299,15 @@ }, "LaunchType":{ "shape":"LaunchType", - "documentation":"

Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. The FARGATE value is supported only in the Regions where AWS Fargate with Amazon ECS is supported. For more information, see AWS Fargate on Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. The FARGATE value is supported only in the Regions where Fargate witt Amazon ECS is supported. For more information, see Fargate on Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" }, "NetworkConfiguration":{ "shape":"NetworkConfiguration", - "documentation":"

Use this structure if the ECS task uses the awsvpc network mode. This structure specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. This structure is required if LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks.

If you specify NetworkConfiguration when the target ECS task does not use the awsvpc network mode, the task fails.

" + "documentation":"

Use this structure if the Amazon ECS task uses the awsvpc network mode. This structure specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. This structure is required if LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks.

If you specify NetworkConfiguration when the target ECS task does not use the awsvpc network mode, the task fails.

" }, "PlatformVersion":{ "shape":"String", - "documentation":"

Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0.

This structure is used only if LaunchType is FARGATE. For more information about valid platform versions, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0.

This structure is used only if LaunchType is FARGATE. For more information about valid platform versions, see Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "Group":{ "shape":"String", @@ -2377,10 +2377,10 @@ }, "Policy":{ "shape":"String", - "documentation":"

The permissions policy of the event bus, describing which other AWS accounts can write events to this event bus.

" + "documentation":"

The permissions policy of the event bus, describing which other Amazon Web Services accounts can write events to this event bus.

" } }, - "documentation":"

An event bus receives events from a source and routes them to rules associated with that event bus. Your account's default event bus receives events from AWS services. A custom event bus can receive events from your custom applications and services. A partner event bus receives events from an event source created by an SaaS partner. These events come from the partners services or applications.

" + "documentation":"

An event bus receives events from a source and routes them to rules associated with that event bus. Your account's default event bus receives events from Amazon Web Services services. A custom event bus can receive events from your custom applications and services. A partner event bus receives events from an event source created by an SaaS partner. These events come from the partners services or applications.

" }, "EventBusList":{ "type":"list", @@ -2422,7 +2422,7 @@ }, "ExpirationTime":{ "shape":"Timestamp", - "documentation":"

The date and time that the event source will expire, if the AWS account doesn't create a matching event bus for it.

" + "documentation":"

The date and time that the event source will expire, if the Amazon Web Services account doesn't create a matching event bus for it.

" }, "Name":{ "shape":"String", @@ -2433,7 +2433,7 @@ "documentation":"

The state of the event source. If it is ACTIVE, you have already created a matching event bus for this event source, and that event bus is active. If it is PENDING, either you haven't yet created a matching event bus, or that event bus is deactivated. If it is DELETED, you have created a matching event bus, but the event source has since been deleted.

" } }, - "documentation":"

A partner event source is created by an SaaS partner. If a customer creates a partner event bus that matches this event source, that AWS account can receive events from the partner's applications or services.

" + "documentation":"

A partner event source is created by an SaaS partner. If a customer creates a partner event bus that matches this event source, that Amazon Web Services account can receive events from the partner's applications or services.

" }, "EventSourceList":{ "type":"list", @@ -2512,7 +2512,7 @@ "members":{ "InputPathsMap":{ "shape":"TransformerPaths", - "documentation":"

Map of JSON paths to be extracted from the event. You can then insert these in the template in InputTemplate to produce the output you want to be sent to the target.

InputPathsMap is an array key-value pairs, where each value is a valid JSON path. You can have as many as 100 key-value pairs. You must use JSON dot notation, not bracket notation.

The keys cannot start with \"AWS.\"

" + "documentation":"

Map of JSON paths to be extracted from the event. You can then insert these in the template in InputTemplate to produce the output you want to be sent to the target.

InputPathsMap is an array key-value pairs, where each value is a valid JSON path. You can have as many as 100 key-value pairs. You must use JSON dot notation, not bracket notation.

The keys cannot start with \"Amazon Web Services.\"

" }, "InputTemplate":{ "shape":"TransformerInput", @@ -2825,7 +2825,7 @@ }, "EventSourceArn":{ "shape":"Arn", - "documentation":"

The ARN of the event source associated with the replay.

" + "documentation":"

The ARN of the archive from which the events are replayed.

" }, "NextToken":{ "shape":"NextToken", @@ -2983,7 +2983,7 @@ "type":"structure", "members":{ }, - "documentation":"

This rule was created by an AWS service on behalf of your account. It is managed by that service. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You cannot modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

", + "documentation":"

This rule was created by an Amazon Web Services service on behalf of your account. It is managed by that service. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You cannot modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

", "exception":true }, "MaximumEventAgeInSeconds":{ @@ -3043,14 +3043,14 @@ "documentation":"

The name of the partner event source.

" } }, - "documentation":"

A partner event source is created by an SaaS partner. If a customer creates a partner event bus that matches this event source, that AWS account can receive events from the partner's applications or services.

" + "documentation":"

A partner event source is created by an SaaS partner. If a customer creates a partner event bus that matches this event source, that Amazon Web Services account can receive events from the partner's applications or services.

" }, "PartnerEventSourceAccount":{ "type":"structure", "members":{ "Account":{ "shape":"AccountId", - "documentation":"

The AWS account ID that the partner event source was offered to.

" + "documentation":"

The Amazon Web Services account ID that the partner event source was offered to.

" }, "CreationTime":{ "shape":"Timestamp", @@ -3058,14 +3058,14 @@ }, "ExpirationTime":{ "shape":"Timestamp", - "documentation":"

The date and time that the event source will expire, if the AWS account doesn't create a matching event bus for it.

" + "documentation":"

The date and time that the event source will expire, if the Amazon Web Services account doesn't create a matching event bus for it.

" }, "State":{ "shape":"EventSourceState", "documentation":"

The state of the event source. If it is ACTIVE, you have already created a matching event bus for this event source, and that event bus is active. If it is PENDING, either you haven't yet created a matching event bus, or that event bus is deactivated. If it is DELETED, you have created a matching event bus, but the event source has since been deleted.

" } }, - "documentation":"

The AWS account that a partner event source has been offered to.

" + "documentation":"

The Amazon Web Services account that a partner event source has been offered to.

" }, "PartnerEventSourceAccountList":{ "type":"list", @@ -3136,7 +3136,7 @@ "documentation":"

The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used.

" } }, - "documentation":"

The task placement strategy for a task or service. To learn more, see Task Placement Strategies in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The task placement strategy for a task or service. To learn more, see Task Placement Strategies in the Amazon Elastic Container Service Service Developer Guide.

" }, "PlacementStrategyField":{ "type":"string", @@ -3182,7 +3182,7 @@ "members":{ "Time":{ "shape":"EventTime", - "documentation":"

The time stamp of the event, per RFC3339. If no time stamp is provided, the time stamp of the PutEvents call is used.

" + "documentation":"

The time stamp of the event, per RFC3339. If no time stamp is provided, the time stamp of the PutEvents call is used.

" }, "Source":{ "shape":"String", @@ -3190,7 +3190,7 @@ }, "Resources":{ "shape":"EventResourceList", - "documentation":"

AWS resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.

" + "documentation":"

Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.

" }, "DetailType":{ "shape":"String", @@ -3206,7 +3206,7 @@ }, "TraceHeader":{ "shape":"TraceHeader", - "documentation":"

An AWS X-Ray trade header, which is an http header (X-Amzn-Trace-Id) that contains the trace-id associated with the event.

To learn more about X-Ray trace headers, see Tracing header in the AWS X-Ray Developer Guide.

" + "documentation":"

An X-Ray trade header, which is an http header (X-Amzn-Trace-Id) that contains the trace-id associated with the event.

To learn more about X-Ray trace headers, see Tracing header in the X-Ray Developer Guide.

" } }, "documentation":"

Represents an event to be submitted.

" @@ -3271,11 +3271,11 @@ }, "Source":{ "shape":"EventSourceName", - "documentation":"

The event source that is generating the evntry.

" + "documentation":"

The event source that is generating the entry.

" }, "Resources":{ "shape":"EventResourceList", - "documentation":"

AWS resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.

" + "documentation":"

Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.

" }, "DetailType":{ "shape":"String", @@ -3338,19 +3338,19 @@ }, "Action":{ "shape":"Action", - "documentation":"

The action that you are enabling the other account to perform. Currently, this must be events:PutEvents.

" + "documentation":"

The action that you are enabling the other account to perform.

" }, "Principal":{ "shape":"Principal", - "documentation":"

The 12-digit AWS account ID that you are permitting to put events to your default event bus. Specify \"*\" to permit any account to put events to your default event bus.

If you specify \"*\" without specifying Condition, avoid creating rules that may match undesirable events. To create more secure rules, make sure that the event pattern for each rule contains an account field with a specific account ID from which to receive events. Rules with an account field do not match any events sent from other accounts.

" + "documentation":"

The 12-digit Amazon Web Services account ID that you are permitting to put events to your default event bus. Specify \"*\" to permit any account to put events to your default event bus.

If you specify \"*\" without specifying Condition, avoid creating rules that may match undesirable events. To create more secure rules, make sure that the event pattern for each rule contains an account field with a specific account ID from which to receive events. Rules with an account field do not match any events sent from other accounts.

" }, "StatementId":{ "shape":"StatementId", - "documentation":"

An identifier string for the external account that you are granting permissions to. If you later want to revoke the permission for this external account, specify this StatementId when you run RemovePermission.

" + "documentation":"

An identifier string for the external account that you are granting permissions to. If you later want to revoke the permission for this external account, specify this StatementId when you run RemovePermission.

" }, "Condition":{ "shape":"Condition", - "documentation":"

This parameter enables you to limit the permission to accounts that fulfill a certain condition, such as being a member of a certain AWS organization. For more information about AWS Organizations, see What Is AWS Organizations in the AWS Organizations User Guide.

If you specify Condition with an AWS organization ID, and specify \"*\" as the value for Principal, you grant permission to all the accounts in the named organization.

The Condition is a JSON string which must contain Type, Key, and Value fields.

" + "documentation":"

This parameter enables you to limit the permission to accounts that fulfill a certain condition, such as being a member of a certain Amazon Web Services organization. For more information about Amazon Web Services Organizations, see What Is Amazon Web Services Organizations in the Amazon Web Services Organizations User Guide.

If you specify Condition with an Amazon Web Services organization ID, and specify \"*\" as the value for Principal, you grant permission to all the accounts in the named organization.

The Condition is a JSON string which must contain Type, Key, and Value fields.

" }, "Policy":{ "shape":"String", @@ -3384,7 +3384,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role associated with the rule.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role associated with the rule.

If you're setting an event bus in another account as the target and that account granted permission to your account through an organization instead of directly by the account ID, you must specify a RoleArn with proper permissions in the Target structure, instead of here in this parameter.

" }, "Tags":{ "shape":"TagList", @@ -3485,7 +3485,7 @@ "members":{ "SecretManagerArn":{ "shape":"RedshiftSecretManagerArn", - "documentation":"

The name or ARN of the secret that enables access to the database. Required when authenticating using AWS Secrets Manager.

" + "documentation":"

The name or ARN of the secret that enables access to the database. Required when authenticating using Amazon Web Services Secrets Manager.

" }, "Database":{ "shape":"Database", @@ -3508,7 +3508,7 @@ "documentation":"

Indicates whether to send an event back to EventBridge after the SQL statement runs.

" } }, - "documentation":"

These are custom parameters to be used when the target is a Redshift cluster to invoke the Redshift Data API ExecuteStatement based on EventBridge events.

" + "documentation":"

These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API ExecuteStatement based on EventBridge events.

" }, "RedshiftSecretManagerArn":{ "type":"string", @@ -3558,7 +3558,7 @@ }, "Force":{ "shape":"Boolean", - "documentation":"

If this is a managed rule, created by an AWS service on your behalf, you must specify Force as True to remove targets. This parameter is ignored for rules that are not managed rules. You can check whether a rule is a managed rule by using DescribeRule or ListRules and checking the ManagedBy field of the response.

" + "documentation":"

If this is a managed rule, created by an Amazon Web Services service on your behalf, you must specify Force as True to remove targets. This parameter is ignored for rules that are not managed rules. You can check whether a rule is a managed rule by using DescribeRule or ListRules and checking the ManagedBy field of the response.

" } } }, @@ -3762,15 +3762,15 @@ }, "ScheduleExpression":{ "shape":"ScheduleExpression", - "documentation":"

The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\".

" + "documentation":"

The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\". For more information, see Creating an Amazon EventBridge rule that runs on a schedule.

" }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the role that is used for target invocation.

" + "documentation":"

The Amazon Resource Name (ARN) of the role that is used for target invocation.

If you're setting an event bus in another account as the target and that account granted permission to your account through an organization instead of directly by the account ID, you must specify a RoleArn with proper permissions in the Target structure, instead of here in this parameter.

" }, "ManagedBy":{ "shape":"ManagedBy", - "documentation":"

If the rule was created on behalf of your account by an AWS service, this field displays the principal name of the service that created the rule.

" + "documentation":"

If the rule was created on behalf of your account by an Amazon Web Services service, this field displays the principal name of the service that created the rule.

" }, "EventBusName":{ "shape":"EventBusName", @@ -4019,7 +4019,7 @@ "documentation":"

The value for the specified tag key.

" } }, - "documentation":"

A key-value pair associated with an AWS resource. In EventBridge, rules and event buses support tagging.

" + "documentation":"

A key-value pair associated with an Amazon Web Services resource. In EventBridge, rules and event buses support tagging.

" }, "TagKey":{ "type":"string", @@ -4070,7 +4070,7 @@ "members":{ "Id":{ "shape":"TargetId", - "documentation":"

The ID of the target.

" + "documentation":"

The ID of the target. We recommend using a memorable and unique string.

" }, "Arn":{ "shape":"TargetArn", @@ -4106,7 +4106,7 @@ }, "BatchParameters":{ "shape":"BatchParameters", - "documentation":"

If the event target is an AWS Batch job, this contains the job definition, job name, and other parameters. For more information, see Jobs in the AWS Batch User Guide.

" + "documentation":"

If the event target is an Batch job, this contains the job definition, job name, and other parameters. For more information, see Jobs in the Batch User Guide.

" }, "SqsParameters":{ "shape":"SqsParameters", @@ -4118,7 +4118,7 @@ }, "RedshiftDataParameters":{ "shape":"RedshiftDataParameters", - "documentation":"

Contains the Redshift Data API parameters to use when the target is a Redshift cluster.

If you specify a Redshift Cluster as a Target, you can use this to specify parameters to invoke the Redshift Data API ExecuteStatement based on EventBridge events.

" + "documentation":"

Contains the Amazon Redshift Data API parameters to use when the target is a Amazon Redshift cluster.

If you specify a Amazon Redshift Cluster as a Target, you can use this to specify parameters to invoke the Amazon Redshift Data API ExecuteStatement based on EventBridge events.

" }, "SageMakerPipelineParameters":{ "shape":"SageMakerPipelineParameters", @@ -4133,7 +4133,7 @@ "documentation":"

The RetryPolicy object that contains the retry policy configuration to use for the dead-letter queue.

" } }, - "documentation":"

Targets are the resources to be invoked when a rule is triggered. For a complete list of services and resources that can be set as a target, see PutTargets.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

" + "documentation":"

Targets are the resources to be invoked when a rule is triggered. For a complete list of services and resources that can be set as a target, see PutTargets.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide.

" }, "TargetArn":{ "type":"string", @@ -4183,7 +4183,7 @@ }, "Event":{ "shape":"String", - "documentation":"

The event, in JSON format, to test against the event pattern. The JSON must follow the format specified in AWS Events, and the following fields are mandatory:

  • id

  • account

  • source

  • time

  • region

  • resources

  • detail-type

" + "documentation":"

The event, in JSON format, to test against the event pattern. The JSON must follow the format specified in Amazon Web Services Events, and the following fields are mandatory:

  • id

  • account

  • source

  • time

  • region

  • resources

  • detail-type

" } } }, @@ -4463,5 +4463,5 @@ } } }, - "documentation":"

Amazon EventBridge helps you to respond to state changes in your AWS resources. When your resources change state, they automatically send events into an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a predetermined schedule. For example, you can configure rules to:

  • Automatically invoke an AWS Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state.

  • Direct specific API records from AWS CloudTrail to an Amazon Kinesis data stream for detailed analysis of potential security or availability risks.

  • Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume.

For more information about the features of Amazon EventBridge, see the Amazon EventBridge User Guide.

" + "documentation":"

Amazon EventBridge helps you to respond to state changes in your Amazon Web Services resources. When your resources change state, they automatically send events to an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a predetermined schedule. For example, you can configure rules to:

  • Automatically invoke an Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state.

  • Direct specific API records from CloudTrail to an Amazon Kinesis data stream for detailed analysis of potential security or availability risks.

  • Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume.

For more information about the features of Amazon EventBridge, see the Amazon EventBridge User Guide.

" } diff --git a/services/finspace/pom.xml b/services/finspace/pom.xml index 733549b7c570..ef310b018040 100644 --- a/services/finspace/pom.xml +++ b/services/finspace/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT finspace AWS Java SDK :: Services :: Finspace diff --git a/services/finspacedata/pom.xml b/services/finspacedata/pom.xml index 85641bbbf6b1..60d7a7eba909 100644 --- a/services/finspacedata/pom.xml +++ b/services/finspacedata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT finspacedata AWS Java SDK :: Services :: Finspace Data diff --git a/services/firehose/pom.xml b/services/firehose/pom.xml index be11bbf934fb..f13fab238527 100644 --- a/services/firehose/pom.xml +++ b/services/firehose/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT firehose AWS Java SDK :: Services :: Amazon Kinesis Firehose diff --git a/services/fis/pom.xml b/services/fis/pom.xml index 89a4100f979e..9fe3f7ceda53 100644 --- a/services/fis/pom.xml +++ b/services/fis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT fis AWS Java SDK :: Services :: Fis diff --git a/services/fms/pom.xml b/services/fms/pom.xml index 7daaf10e4857..17d03789b4a2 100644 --- a/services/fms/pom.xml +++ b/services/fms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT fms AWS Java SDK :: Services :: FMS diff --git a/services/forecast/pom.xml b/services/forecast/pom.xml index b4f83e220d4c..dac18426501c 100644 --- a/services/forecast/pom.xml +++ b/services/forecast/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT forecast AWS Java SDK :: Services :: Forecast diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml index 1e4fd87f4abe..f89041706002 100644 --- a/services/forecastquery/pom.xml +++ b/services/forecastquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT forecastquery AWS Java SDK :: Services :: Forecastquery diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml index 0b0cd1468e24..e261d9094b9a 100644 --- a/services/frauddetector/pom.xml +++ b/services/frauddetector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT frauddetector AWS Java SDK :: Services :: FraudDetector diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index 4c3e3d303042..b29d34a6bd49 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT fsx AWS Java SDK :: Services :: FSx diff --git a/services/gamelift/pom.xml b/services/gamelift/pom.xml index 4118191f47c1..6fa48ad66fc8 100644 --- a/services/gamelift/pom.xml +++ b/services/gamelift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT gamelift AWS Java SDK :: Services :: AWS GameLift diff --git a/services/glacier/pom.xml b/services/glacier/pom.xml index dfdba3749603..dc69f23f00ba 100644 --- a/services/glacier/pom.xml +++ b/services/glacier/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT glacier AWS Java SDK :: Services :: Amazon Glacier diff --git a/services/globalaccelerator/pom.xml b/services/globalaccelerator/pom.xml index cd9b7e6a939c..e7008bcf1766 100644 --- a/services/globalaccelerator/pom.xml +++ b/services/globalaccelerator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT globalaccelerator AWS Java SDK :: Services :: Global Accelerator diff --git a/services/glue/pom.xml b/services/glue/pom.xml index 8f4b7890d5da..37ccb61cf1ee 100644 --- a/services/glue/pom.xml +++ b/services/glue/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 glue diff --git a/services/glue/src/main/resources/codegen-resources/service-2.json b/services/glue/src/main/resources/codegen-resources/service-2.json index 8fa72d85b88b..f4d887f3babd 100644 --- a/services/glue/src/main/resources/codegen-resources/service-2.json +++ b/services/glue/src/main/resources/codegen-resources/service-2.json @@ -310,7 +310,8 @@ {"shape":"ResourceNumberLimitExceededException"}, {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"}, - {"shape":"GlueEncryptionException"} + {"shape":"GlueEncryptionException"}, + {"shape":"ConcurrentModificationException"} ], "documentation":"

Creates a new database in a Data Catalog.

" }, @@ -492,7 +493,8 @@ {"shape":"ResourceNumberLimitExceededException"}, {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"}, - {"shape":"GlueEncryptionException"} + {"shape":"GlueEncryptionException"}, + {"shape":"ConcurrentModificationException"} ], "documentation":"

Creates a new table definition in the Data Catalog.

" }, @@ -643,7 +645,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"ConcurrentModificationException"} ], "documentation":"

Removes a specified database from a Data Catalog.

After completing this operation, you no longer have access to the tables (and all table versions and partitions that might belong to the tables) and the user-defined functions in the deleted database. Glue deletes these \"orphaned\" resources asynchronously in a timely manner, at the discretion of the service.

To ensure the immediate deletion of all related resources, before calling DeleteDatabase, use DeleteTableVersion or BatchDeleteTableVersion, DeletePartition or BatchDeletePartition, DeleteUserDefinedFunction, and DeleteTable or BatchDeleteTable, to delete any resources that belong to the database.

" }, @@ -821,7 +824,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"ConcurrentModificationException"} ], "documentation":"

Removes a table definition from the Data Catalog.

After completing this operation, you no longer have access to the table versions and partitions that belong to the deleted table. Glue deletes these \"orphaned\" resources asynchronously in a timely manner, at the discretion of the service.

To ensure the immediate deletion of all related resources, before calling DeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

" }, @@ -2400,7 +2404,8 @@ {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"}, - {"shape":"GlueEncryptionException"} + {"shape":"GlueEncryptionException"}, + {"shape":"ConcurrentModificationException"} ], "documentation":"

Updates an existing database definition in a Data Catalog.

" }, @@ -3016,6 +3021,11 @@ } } }, + "BatchSize":{ + "type":"integer", + "max":100, + "min":1 + }, "BatchStopJobRunError":{ "type":"structure", "members":{ @@ -3169,6 +3179,12 @@ } } }, + "BatchWindow":{ + "type":"integer", + "box":true, + "max":900, + "min":1 + }, "BinaryColumnStatisticsData":{ "type":"structure", "required":[ @@ -3881,7 +3897,7 @@ }, "ConnectionType":{ "shape":"ConnectionType", - "documentation":"

The type of the connection. Currently, these types are supported:

  • JDBC - Designates a connection to a database through Java Database Connectivity (JDBC).

  • KAFKA - Designates a connection to an Apache Kafka streaming platform.

  • MONGODB - Designates a connection to a MongoDB document database.

  • NETWORK - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).

  • MARKETPLACE - Uses configuration settings contained in a connector purchased from Marketplace to read from and write to data stores that are not natively supported by Glue.

  • CUSTOM - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue.

SFTP is not supported.

" + "documentation":"

The type of the connection. Currently, these types are supported:

  • JDBC - Designates a connection to a database through Java Database Connectivity (JDBC).

  • KAFKA - Designates a connection to an Apache Kafka streaming platform.

  • MONGODB - Designates a connection to a MongoDB document database.

  • NETWORK - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).

  • MARKETPLACE - Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue.

  • CUSTOM - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue.

SFTP is not supported.

" }, "MatchCriteria":{ "shape":"MatchCriteria", @@ -5116,7 +5132,8 @@ "Tags":{ "shape":"TagsMap", "documentation":"

The tags to use with this trigger. You may use tags to limit access to the trigger. For more information about tags in Glue, see Amazon Web Services Tags in Glue in the developer guide.

" - } + }, + "EventBatchingCondition":{"shape":"EventBatchingCondition"} } }, "CreateTriggerResponse":{ @@ -6198,7 +6215,7 @@ "documentation":"

The unique of the node within the workflow where the edge ends.

" } }, - "documentation":"

An edge represents a directed connection between two Glue components that are part of the workflow the edge belongs to.

" + "documentation":"

An edge represents a directed connection between two components on a workflow graph.

" }, "EdgeList":{ "type":"list", @@ -6306,6 +6323,21 @@ }, "documentation":"

Evaluation metrics provide an estimate of the quality of your machine learning transform.

" }, + "EventBatchingCondition":{ + "type":"structure", + "required":["BatchSize"], + "members":{ + "BatchSize":{ + "shape":"BatchSize", + "documentation":"

Number of events that must be received from Amazon EventBridge before EventBridge event trigger fires.

" + }, + "BatchWindow":{ + "shape":"BatchWindow", + "documentation":"

Window of time in seconds after which EventBridge event trigger fires. Window starts when first event is received.

" + } + }, + "documentation":"

Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires.

" + }, "ExecutionProperty":{ "type":"structure", "members":{ @@ -9569,7 +9601,7 @@ "documentation":"

Details of the crawler when the node represents a crawler.

" } }, - "documentation":"

A node represents an Glue component such as a trigger, or job, etc., that is part of a workflow.

" + "documentation":"

A node represents an Glue component (trigger, crawler, or job) on a workflow graph.

" }, "NodeIdList":{ "type":"list", @@ -10041,7 +10073,7 @@ }, "EnableHybrid":{ "shape":"EnableHybridValues", - "documentation":"

If 'TRUE', indicates that you are using both methods to grant cross-account access to Data Catalog resources:

  • By directly updating the resource policy with PutResourePolicy

  • By using the Grant permissions command on the Management Console.

Must be set to 'TRUE' if you have already used the Management Console to grant cross-account access, otherwise the call fails. Default is 'FALSE'.

" + "documentation":"

If 'TRUE', indicates that you are using both methods to grant cross-account access to Data Catalog resources:

  • By directly updating the resource policy with PutResourePolicy

  • By using the Grant permissions command on the Amazon Web Services Management Console.

Must be set to 'TRUE' if you have already used the Management Console to grant cross-account access, otherwise the call fails. Default is 'FALSE'.

" } } }, @@ -11201,6 +11233,20 @@ } } }, + "StartingEventBatchCondition":{ + "type":"structure", + "members":{ + "BatchSize":{ + "shape":"NullableInteger", + "documentation":"

Number of events in the batch.

" + }, + "BatchWindow":{ + "shape":"NullableInteger", + "documentation":"

Duration of the batch window in seconds.

" + } + }, + "documentation":"

The batch condition that started the workflow run. Either the number of events in the batch size arrived, in which case the BatchSize member is non-zero, or the batch window expired, in which case the BatchWindow member is non-zero.

" + }, "StopCrawlerRequest":{ "type":"structure", "required":["Name"], @@ -11956,6 +12002,10 @@ "Predicate":{ "shape":"Predicate", "documentation":"

The predicate of this trigger, which defines when it will fire.

" + }, + "EventBatchingCondition":{ + "shape":"EventBatchingCondition", + "documentation":"

Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires.

" } }, "documentation":"

Information about a specific trigger.

" @@ -11996,7 +12046,8 @@ "enum":[ "SCHEDULED", "CONDITIONAL", - "ON_DEMAND" + "ON_DEMAND", + "EVENT" ] }, "TriggerUpdate":{ @@ -12021,6 +12072,10 @@ "Predicate":{ "shape":"Predicate", "documentation":"

The predicate of this trigger, which defines when it will fire.

" + }, + "EventBatchingCondition":{ + "shape":"EventBatchingCondition", + "documentation":"

Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires.

" } }, "documentation":"

A structure used to provide information used to update a trigger. This object updates the previous trigger definition by overwriting it completely.

" @@ -12885,7 +12940,7 @@ "members":{ "Name":{ "shape":"NameString", - "documentation":"

The name of the workflow representing the flow.

" + "documentation":"

The name of the workflow.

" }, "Description":{ "shape":"GenericString", @@ -12893,7 +12948,7 @@ }, "DefaultRunProperties":{ "shape":"WorkflowRunProperties", - "documentation":"

A collection of properties to be used as part of each execution of the workflow.

" + "documentation":"

A collection of properties to be used as part of each execution of the workflow. The run properties are made available to each job in the workflow. A job can modify the properties for the next jobs in the flow.

" }, "CreatedOn":{ "shape":"TimestampValue", @@ -12916,7 +12971,7 @@ "documentation":"

You can use this parameter to prevent unwanted multiple updates to data, to control costs, or in some cases, to prevent exceeding the maximum number of concurrent runs of any of the component jobs. If you leave this parameter blank, there is no limit to the number of concurrent workflow runs.

" } }, - "documentation":"

A workflow represents a flow in which Glue components should be run to complete a logical task.

" + "documentation":"

A workflow is a collection of multiple dependent Glue jobs and crawlers that are run to complete a complex ETL task. A workflow manages the execution and monitoring of all its jobs and crawlers.

" }, "WorkflowGraph":{ "type":"structure", @@ -12980,6 +13035,10 @@ "Graph":{ "shape":"WorkflowGraph", "documentation":"

The graph representing all the Glue components that belong to the workflow as nodes and directed connections between them as edges.

" + }, + "StartingEventBatchCondition":{ + "shape":"StartingEventBatchCondition", + "documentation":"

The batch condition that started the workflow run.

" } }, "documentation":"

A workflow run is an execution of a workflow providing all the runtime information.

" diff --git a/services/greengrass/pom.xml b/services/greengrass/pom.xml index 48f280394760..b40559fb5641 100644 --- a/services/greengrass/pom.xml +++ b/services/greengrass/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT greengrass AWS Java SDK :: Services :: AWS Greengrass diff --git a/services/greengrassv2/pom.xml b/services/greengrassv2/pom.xml index 9248b51fd44e..c111301cb781 100644 --- a/services/greengrassv2/pom.xml +++ b/services/greengrassv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT greengrassv2 AWS Java SDK :: Services :: Greengrass V2 diff --git a/services/greengrassv2/src/main/resources/codegen-resources/service-2.json b/services/greengrassv2/src/main/resources/codegen-resources/service-2.json index 48a70a2cadd8..e90628d9c04f 100644 --- a/services/greengrassv2/src/main/resources/codegen-resources/service-2.json +++ b/services/greengrassv2/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Associate a list of client devices with a core device. Use this API operation to specify which client devices can discover a core device through cloud discovery. With cloud discovery, client devices connect to AWS IoT Greengrass to retrieve associated core devices' connectivity information and certificates. For more information, see Configure cloud discovery in the AWS IoT Greengrass V2 Developer Guide.

Client devices are local IoT devices that connect to and communicate with an AWS IoT Greengrass core device over MQTT. You can connect client devices to a core device to sync MQTT messages and data to AWS IoT Core and interact with client devices in AWS IoT Greengrass components. For more information, see Interact with local IoT devices in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

Associate a list of client devices with a core device. Use this API operation to specify which client devices can discover a core device through cloud discovery. With cloud discovery, client devices connect to IoT Greengrass to retrieve associated core devices' connectivity information and certificates. For more information, see Configure cloud discovery in the IoT Greengrass V2 Developer Guide.

Client devices are local IoT devices that connect to and communicate with an IoT Greengrass core device over MQTT. You can connect client devices to a core device to sync MQTT messages and data to Amazon Web Services IoT Core and interact with client devices in Greengrass components. For more information, see Interact with local IoT devices in the IoT Greengrass V2 Developer Guide.

" }, "BatchDisassociateClientDeviceFromCoreDevice":{ "name":"BatchDisassociateClientDeviceFromCoreDevice", @@ -81,9 +81,10 @@ {"shape":"AccessDeniedException"}, {"shape":"ConflictException"}, {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"RequestAlreadyInProgressException"} ], - "documentation":"

Creates a component. Components are software that run on AWS IoT Greengrass core devices. After you develop and test a component on your core device, you can use this operation to upload your component to AWS IoT Greengrass. Then, you can deploy the component to other core devices.

You can use this operation to do the following:

  • Create components from recipes

    Create a component from a recipe, which is a file that defines the component's metadata, parameters, dependencies, lifecycle, artifacts, and platform capability. For more information, see AWS IoT Greengrass component recipe reference in the AWS IoT Greengrass V2 Developer Guide.

    To create a component from a recipe, specify inlineRecipe when you call this operation.

  • Create components from Lambda functions

    Create a component from an AWS Lambda function that runs on AWS IoT Greengrass. This creates a recipe and artifacts from the Lambda function's deployment package. You can use this operation to migrate Lambda functions from AWS IoT Greengrass V1 to AWS IoT Greengrass V2.

    This function only accepts Lambda functions that use the following runtimes:

    • Python 2.7 – python2.7

    • Python 3.7 – python3.7

    • Python 3.8 – python3.8

    • Java 8 – java8

    • Node.js 10 – nodejs10.x

    • Node.js 12 – nodejs12.x

    To create a component from a Lambda function, specify lambdaFunction when you call this operation.

" + "documentation":"

Creates a component. Components are software that run on Greengrass core devices. After you develop and test a component on your core device, you can use this operation to upload your component to IoT Greengrass. Then, you can deploy the component to other core devices.

You can use this operation to do the following:

  • Create components from recipes

    Create a component from a recipe, which is a file that defines the component's metadata, parameters, dependencies, lifecycle, artifacts, and platform capability. For more information, see IoT Greengrass component recipe reference in the IoT Greengrass V2 Developer Guide.

    To create a component from a recipe, specify inlineRecipe when you call this operation.

  • Create components from Lambda functions

    Create a component from an Lambda function that runs on IoT Greengrass. This creates a recipe and artifacts from the Lambda function's deployment package. You can use this operation to migrate Lambda functions from IoT Greengrass V1 to IoT Greengrass V2.

    This function only accepts Lambda functions that use the following runtimes:

    • Python 2.7 – python2.7

    • Python 3.7 – python3.7

    • Python 3.8 – python3.8

    • Java 8 – java8

    • Node.js 10 – nodejs10.x

    • Node.js 12 – nodejs12.x

    To create a component from a Lambda function, specify lambdaFunction when you call this operation.

" }, "CreateDeployment":{ "name":"CreateDeployment", @@ -99,9 +100,10 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"RequestAlreadyInProgressException"} ], - "documentation":"

Creates a continuous deployment for a target, which is a AWS IoT Greengrass core device or group of core devices. When you add a new core device to a group of core devices that has a deployment, AWS IoT Greengrass deploys that group's deployment to the new device.

You can define one deployment for each target. When you create a new deployment for a target that has an existing deployment, you replace the previous deployment. AWS IoT Greengrass applies the new deployment to the target devices.

Every deployment has a revision number that indicates how many deployment revisions you define for a target. Use this operation to create a new revision of an existing deployment. This operation returns the revision number of the new deployment when you create it.

For more information, see the Create deployments in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

Creates a continuous deployment for a target, which is a Greengrass core device or group of core devices. When you add a new core device to a group of core devices that has a deployment, IoT Greengrass deploys that group's deployment to the new device.

You can define one deployment for each target. When you create a new deployment for a target that has an existing deployment, you replace the previous deployment. IoT Greengrass applies the new deployment to the target devices.

Every deployment has a revision number that indicates how many deployment revisions you define for a target. Use this operation to create a new revision of an existing deployment. This operation returns the revision number of the new deployment when you create it.

For more information, see the Create deployments in the IoT Greengrass V2 Developer Guide.

" }, "DeleteComponent":{ "name":"DeleteComponent", @@ -119,7 +121,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes a version of a component from AWS IoT Greengrass.

This operation deletes the component's recipe and artifacts. As a result, deployments that refer to this component version will fail. If you have deployments that use this component version, you can remove the component from the deployment or update the deployment to use a valid version.

" + "documentation":"

Deletes a version of a component from IoT Greengrass.

This operation deletes the component's recipe and artifacts. As a result, deployments that refer to this component version will fail. If you have deployments that use this component version, you can remove the component from the deployment or update the deployment to use a valid version.

" }, "DeleteCoreDevice":{ "name":"DeleteCoreDevice", @@ -137,7 +139,7 @@ {"shape":"ThrottlingException"}, {"shape":"ConflictException"} ], - "documentation":"

Deletes a AWS IoT Greengrass core device, which is an AWS IoT thing. This operation removes the core device from the list of core devices. This operation doesn't delete the AWS IoT thing. For more information about how to delete the AWS IoT thing, see DeleteThing in the AWS IoT API Reference.

" + "documentation":"

Deletes a Greengrass core device, which is an IoT thing. This operation removes the core device from the list of core devices. This operation doesn't delete the IoT thing. For more information about how to delete the IoT thing, see DeleteThing in the IoT API Reference.

" }, "DescribeComponent":{ "name":"DescribeComponent", @@ -205,7 +207,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves metadata for a AWS IoT Greengrass core device.

" + "documentation":"

Retrieves metadata for a Greengrass core device.

" }, "GetDeployment":{ "name":"GetDeployment", @@ -222,7 +224,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets a deployment. Deployments define the components that run on AWS IoT Greengrass core devices.

" + "documentation":"

Gets a deployment. Deployments define the components that run on Greengrass core devices.

" }, "ListClientDevicesAssociatedWithCoreDevice":{ "name":"ListClientDevicesAssociatedWithCoreDevice", @@ -289,7 +291,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves a paginated list of AWS IoT Greengrass core devices.

" + "documentation":"

Retrieves a paginated list of Greengrass core devices.

" }, "ListDeployments":{ "name":"ListDeployments", @@ -322,7 +324,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves a paginated list of deployment jobs that AWS IoT Greengrass sends to AWS IoT Greengrass core devices.

" + "documentation":"

Retrieves a paginated list of deployment jobs that IoT Greengrass sends to Greengrass core devices.

" }, "ListInstalledComponents":{ "name":"ListInstalledComponents", @@ -339,7 +341,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves a paginated list of the components that a AWS IoT Greengrass core device runs.

" + "documentation":"

Retrieves a paginated list of the components that a Greengrass core device runs.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -354,7 +356,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieves the list of tags for an AWS IoT Greengrass resource.

" + "documentation":"

Retrieves the list of tags for an IoT Greengrass resource.

" }, "ResolveComponentCandidates":{ "name":"ResolveComponentCandidates", @@ -372,7 +374,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Retrieves a list of components that meet the component, version, and platform requirements of a deployment. AWS IoT Greengrass core devices call this operation when they receive a deployment to identify the components to install.

This operation identifies components that meet all dependency requirements for a deployment. If the requirements conflict, then this operation returns an error and the deployment fails. For example, this occurs if component A requires version >2.0.0 and component B requires version <2.0.0 of a component dependency.

When you specify the component candidates to resolve, AWS IoT Greengrass compares each component's digest from the core device with the component's digest in the AWS Cloud. If the digests don't match, then AWS IoT Greengrass specifies to use the version from the AWS Cloud.

To use this operation, you must use the data plane API endpoint and authenticate with an AWS IoT device certificate. For more information, see AWS IoT Greengrass endpoints and quotas.

" + "documentation":"

Retrieves a list of components that meet the component, version, and platform requirements of a deployment. Greengrass core devices call this operation when they receive a deployment to identify the components to install.

This operation identifies components that meet all dependency requirements for a deployment. If the requirements conflict, then this operation returns an error and the deployment fails. For example, this occurs if component A requires version >2.0.0 and component B requires version <2.0.0 of a component dependency.

When you specify the component candidates to resolve, IoT Greengrass compares each component's digest from the core device with the component's digest in the Amazon Web Services Cloud. If the digests don't match, then IoT Greengrass specifies to use the version from the Amazon Web Services Cloud.

To use this operation, you must use the data plane API endpoint and authenticate with an IoT device certificate. For more information, see IoT Greengrass endpoints and quotas.

" }, "TagResource":{ "name":"TagResource", @@ -387,7 +389,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Adds tags to an AWS IoT Greengrass resource. If a tag already exists for the resource, this operation updates the tag's value.

" + "documentation":"

Adds tags to an IoT Greengrass resource. If a tag already exists for the resource, this operation updates the tag's value.

" }, "UntagResource":{ "name":"UntagResource", @@ -402,7 +404,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes a tag from an AWS IoT Greengrass resource.

" + "documentation":"

Removes a tag from an IoT Greengrass resource.

" } }, "shapes":{ @@ -422,7 +424,7 @@ "members":{ "thingName":{ "shape":"IoTThingName", - "documentation":"

The name of the AWS IoT thing that represents the client device to associate.

" + "documentation":"

The name of the IoT thing that represents the client device to associate.

" } }, "documentation":"

Contains a request to associate a client device with a core device. The BatchAssociateClientDeviceWithCoreDevice operation consumes a list of these requests.

" @@ -438,7 +440,7 @@ "members":{ "thingName":{ "shape":"IoTThingName", - "documentation":"

The name of the AWS IoT thing whose associate request failed.

" + "documentation":"

The name of the IoT thing whose associate request failed.

" }, "code":{ "shape":"NonEmptyString", @@ -462,7 +464,7 @@ "members":{ "thingName":{ "shape":"IoTThingName", - "documentation":"

The name of the AWS IoT thing that represents the associated client device.

" + "documentation":"

The name of the IoT thing that represents the associated client device.

" }, "associationTimestamp":{ "shape":"Timestamp", @@ -487,7 +489,7 @@ }, "coreDeviceThingName":{ "shape":"IoTThingName", - "documentation":"

The name of the core device. This is also the name of the AWS IoT thing.

", + "documentation":"

The name of the core device. This is also the name of the IoT thing.

", "location":"uri", "locationName":"coreDeviceThingName" } @@ -498,7 +500,7 @@ "members":{ "errorEntries":{ "shape":"AssociateClientDeviceWithCoreDeviceErrorList", - "documentation":"

The list of any errors for the entries in the request. Each error entry contains the name of the AWS IoT thing that failed to associate.

" + "documentation":"

The list of any errors for the entries in the request. Each error entry contains the name of the IoT thing that failed to associate.

" } } }, @@ -512,7 +514,7 @@ }, "coreDeviceThingName":{ "shape":"IoTThingName", - "documentation":"

The name of the core device. This is also the name of the AWS IoT thing.

", + "documentation":"

The name of the core device. This is also the name of the IoT thing.

", "location":"uri", "locationName":"coreDeviceThingName" } @@ -523,10 +525,14 @@ "members":{ "errorEntries":{ "shape":"DisassociateClientDeviceFromCoreDeviceErrorList", - "documentation":"

The list of errors (if any) for the entries in the request. Each error entry contains the name of the AWS IoT thing that failed to disassociate.

" + "documentation":"

The list of errors (if any) for the entries in the request. Each error entry contains the name of the IoT thing that failed to disassociate.

" } } }, + "CPU":{ + "type":"double", + "min":0 + }, "CancelDeploymentRequest":{ "type":"structure", "required":["deploymentId"], @@ -548,6 +554,12 @@ } } }, + "ClientTokenString":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, "CloudComponentState":{ "type":"string", "enum":[ @@ -571,10 +583,10 @@ }, "errors":{ "shape":"StringMap", - "documentation":"

A dictionary of errors that communicate why the component is in an error state. For example, if AWS IoT Greengrass can't access an artifact for the component, then errors contains the artifact's URI as a key, and the error message as the value for that key.

" + "documentation":"

A dictionary of errors that communicate why the component is in an error state. For example, if IoT Greengrass can't access an artifact for the component, then errors contains the artifact's URI as a key, and the error message as the value for that key.

" } }, - "documentation":"

Contains the status of a component in the AWS IoT Greengrass service.

" + "documentation":"

Contains the status of a component in the IoT Greengrass service.

" }, "Component":{ "type":"structure", @@ -611,10 +623,10 @@ }, "versionRequirements":{ "shape":"ComponentVersionRequirementMap", - "documentation":"

The version requirements for the component's dependencies. AWS IoT Greengrass core devices get the version requirements from component recipes.

AWS IoT Greengrass V2 uses semantic version constraints. For more information, see Semantic Versioning.

" + "documentation":"

The version requirements for the component's dependencies. Greengrass core devices get the version requirements from component recipes.

IoT Greengrass V2 uses semantic version constraints. For more information, see Semantic Versioning.

" } }, - "documentation":"

Contains information about a component that is a candidate to deploy to a AWS IoT Greengrass core device.

" + "documentation":"

Contains information about a component that is a candidate to deploy to a Greengrass core device.

" }, "ComponentCandidateList":{ "type":"list", @@ -639,14 +651,14 @@ "members":{ "merge":{ "shape":"ComponentConfigurationString", - "documentation":"

A serialized JSON string that contains the configuration object to merge to target devices. The core device merges this configuration with the component's existing configuration. If this is the first time a component deploys on a device, the core device merges this configuration with the component's default configuration. This means that the core device keeps it's existing configuration for keys and values that you don't specify in this object. For more information, see Merge configuration updates in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

A serialized JSON string that contains the configuration object to merge to target devices. The core device merges this configuration with the component's existing configuration. If this is the first time a component deploys on a device, the core device merges this configuration with the component's default configuration. This means that the core device keeps it's existing configuration for keys and values that you don't specify in this object. For more information, see Merge configuration updates in the IoT Greengrass V2 Developer Guide.

" }, "reset":{ "shape":"ComponentConfigurationPathList", - "documentation":"

The list of configuration nodes to reset to default values on target devices. Use JSON pointers to specify each node to reset. JSON pointers start with a forward slash (/) and use forward slashes to separate the key for each level in the object. For more information, see the JSON pointer specification and Reset configuration updates in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

The list of configuration nodes to reset to default values on target devices. Use JSON pointers to specify each node to reset. JSON pointers start with a forward slash (/) and use forward slashes to separate the key for each level in the object. For more information, see the JSON pointer specification and Reset configuration updates in the IoT Greengrass V2 Developer Guide.

" } }, - "documentation":"

Contains information about a deployment's update to a component's configuration on Greengrass core devices. For more information, see Update component configurations in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

Contains information about a deployment's update to a component's configuration on Greengrass core devices. For more information, see Update component configurations in the IoT Greengrass V2 Developer Guide.

" }, "ComponentDependencyMap":{ "type":"map", @@ -658,7 +670,7 @@ "members":{ "versionRequirement":{ "shape":"NonEmptyString", - "documentation":"

The component version requirement for the component dependency.

AWS IoT Greengrass V2 uses semantic version constraints. For more information, see Semantic Versioning.

" + "documentation":"

The component version requirement for the component dependency.

IoT Greengrass V2 uses semantic version constraints. For more information, see Semantic Versioning.

" }, "dependencyType":{ "shape":"ComponentDependencyType", @@ -683,11 +695,11 @@ }, "configurationUpdate":{ "shape":"ComponentConfigurationUpdate", - "documentation":"

The configuration updates to deploy for the component. You can define reset updates and merge updates. A reset updates the keys that you specify to the default configuration for the component. A merge updates the core device's component configuration with the keys and values that you specify. The AWS IoT Greengrass Core software applies reset updates before it applies merge updates. For more information, see Update component configurations in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

The configuration updates to deploy for the component. You can define reset updates and merge updates. A reset updates the keys that you specify to the default configuration for the component. A merge updates the core device's component configuration with the keys and values that you specify. The IoT Greengrass Core software applies reset updates before it applies merge updates. For more information, see Update component configurations in the IoT Greengrass V2 Developer Guide.

" }, "runWith":{ "shape":"ComponentRunWith", - "documentation":"

The system user and group that the AWS IoT Greengrass Core software uses to run component processes on the core device. If you omit this parameter, the AWS IoT Greengrass Core software uses the system user and group that you configure for the core device. For more information, see Configure the user and group that run components in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

The system user and group that the IoT Greengrass Core software uses to run component processes on the core device. If you omit this parameter, the IoT Greengrass Core software uses the system user and group that you configure for the core device. For more information, see Configure the user and group that run components in the IoT Greengrass V2 Developer Guide.

" } }, "documentation":"

Contains information about a component to deploy.

" @@ -742,11 +754,11 @@ "members":{ "name":{ "shape":"NonEmptyString", - "documentation":"

The friendly name of the platform. This name helps you identify the platform.

If you omit this parameter, AWS IoT Greengrass creates a friendly name from the os and architecture of the platform.

" + "documentation":"

The friendly name of the platform. This name helps you identify the platform.

If you omit this parameter, IoT Greengrass creates a friendly name from the os and architecture of the platform.

" }, "attributes":{ "shape":"PlatformAttributesMap", - "documentation":"

A dictionary of attributes for the platform. The AWS IoT Greengrass Core software defines the os and platform by default. You can specify additional platform attributes for a core device when you deploy the AWS IoT Greengrass nucleus component. For more information, see the AWS IoT Greengrass nucleus component in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

A dictionary of attributes for the platform. The IoT Greengrass Core software defines the os and platform by default. You can specify additional platform attributes for a core device when you deploy the Greengrass nucleus component. For more information, see the Greengrass nucleus component in the IoT Greengrass V2 Developer Guide.

" } }, "documentation":"

Contains information about a platform that a component supports.

" @@ -760,10 +772,14 @@ "members":{ "posixUser":{ "shape":"NonEmptyString", - "documentation":"

The POSIX system user and (optional) group to use to run this component. Specify the user and group separated by a colon (:) in the following format: user:group. The group is optional. If you don't specify a group, the AWS IoT Greengrass Core software uses the primary user for the group.

" + "documentation":"

The POSIX system user and (optional) group to use to run this component. Specify the user and group separated by a colon (:) in the following format: user:group. The group is optional. If you don't specify a group, the IoT Greengrass Core software uses the primary user for the group.

If you omit this parameter, the IoT Greengrass Core software uses the default system user and group that you configure on the Greengrass nucleus component. For more information, see Configure the user and group that run components.

" + }, + "systemResourceLimits":{ + "shape":"SystemResourceLimits", + "documentation":"

The system resource limits to apply to this component's process on the core device.

If you omit this parameter, the IoT Greengrass Core software uses the default system resource limits that you configure on the Greengrass nucleus component. For more information, see Configure system resource limits for components.

" } }, - "documentation":"

Contains information system user and group that the AWS IoT Greengrass Core software uses to run component processes on the core device. For more information, see Configure the user and group that run components in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

Contains information system user and group that the IoT Greengrass Core software uses to run component processes on the core device. For more information, see Configure the user and group that run components in the IoT Greengrass V2 Developer Guide.

" }, "ComponentVersionARN":{ "type":"string", @@ -836,18 +852,18 @@ "members":{ "coreDeviceThingName":{ "shape":"CoreDeviceThingName", - "documentation":"

The name of the core device. This is also the name of the AWS IoT thing.

" + "documentation":"

The name of the core device. This is also the name of the IoT thing.

" }, "status":{ "shape":"CoreDeviceStatus", - "documentation":"

The status of the core device. Core devices can have the following statuses:

  • HEALTHY – The AWS IoT Greengrass Core software and all components run on the core device without issue.

  • UNHEALTHY – The AWS IoT Greengrass Core software or a component is in a failed state on the core device.

" + "documentation":"

The status of the core device. Core devices can have the following statuses:

  • HEALTHY – The IoT Greengrass Core software and all components run on the core device without issue.

  • UNHEALTHY – The IoT Greengrass Core software or a component is in a failed state on the core device.

" }, "lastStatusUpdateTimestamp":{ "shape":"Timestamp", "documentation":"

The time at which the core device's status last updated, expressed in ISO 8601 format.

" } }, - "documentation":"

Contains information about a AWS IoT Greengrass core device, which is an AWS IoT thing that runs the AWS IoT Greengrass Core software.

" + "documentation":"

Contains information about a Greengrass core device, which is an IoT thing that runs the IoT Greengrass Core software.

" }, "CoreDeviceArchitectureString":{ "type":"string", @@ -888,7 +904,12 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the IoT Greengrass V2 Developer Guide.

" + }, + "clientToken":{ + "shape":"ClientTokenString", + "documentation":"

A unique, case-sensitive identifier that you can provide to ensure that the request is idempotent. Idempotency means that the request is successfully processed only once, even if you send the request multiple times. When a request succeeds, and you specify the same client token for subsequent successful requests, the IoT Greengrass V2 service returns the successful response that it caches from the previous request. IoT Greengrass V2 caches successful responses for idempotent requests for up to 8 hours.

", + "idempotencyToken":true } } }, @@ -919,7 +940,7 @@ }, "status":{ "shape":"CloudComponentStatus", - "documentation":"

The status of the component version in AWS IoT Greengrass V2. This status is different from the status of the component on a core device.

" + "documentation":"

The status of the component version in IoT Greengrass V2. This status is different from the status of the component on a core device.

" } } }, @@ -929,11 +950,11 @@ "members":{ "targetArn":{ "shape":"TargetARN", - "documentation":"

The ARN of the target AWS IoT thing or thing group.

" + "documentation":"

The ARN of the target IoT thing or thing group.

" }, "deploymentName":{ "shape":"NonEmptyString", - "documentation":"

The name of the deployment.

You can create deployments without names. If you create a deployment without a name, the AWS IoT Greengrass V2 console shows the deployment name as <targetType>:<targetName>, where targetType and targetName are the type and name of the deployment target.

" + "documentation":"

The name of the deployment.

" }, "components":{ "shape":"ComponentDeploymentSpecifications", @@ -949,7 +970,12 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the IoT Greengrass V2 Developer Guide.

" + }, + "clientToken":{ + "shape":"ClientTokenString", + "documentation":"

A unique, case-sensitive identifier that you can provide to ensure that the request is idempotent. Idempotency means that the request is successfully processed only once, even if you send the request multiple times. When a request succeeds, and you specify the same client token for subsequent successful requests, the IoT Greengrass V2 service returns the successful response that it caches from the previous request. IoT Greengrass V2 caches successful responses for idempotent requests for up to 8 hours.

", + "idempotencyToken":true } } }, @@ -962,11 +988,11 @@ }, "iotJobId":{ "shape":"NonEmptyString", - "documentation":"

The ID of the AWS IoT job that applies the deployment to target devices.

" + "documentation":"

The ID of the IoT job that applies the deployment to target devices.

" }, "iotJobArn":{ "shape":"IoTJobARN", - "documentation":"

The ARN of the AWS IoT job that applies the deployment to target devices.

" + "documentation":"

The ARN of the IoT job that applies the deployment to target devices.

" } } }, @@ -993,7 +1019,7 @@ "members":{ "coreDeviceThingName":{ "shape":"CoreDeviceThingName", - "documentation":"

The name of the core device. This is also the name of the AWS IoT thing.

", + "documentation":"

The name of the core device. This is also the name of the IoT thing.

", "location":"uri", "locationName":"coreDeviceThingName" } @@ -1004,7 +1030,7 @@ "members":{ "targetArn":{ "shape":"TargetARN", - "documentation":"

The ARN of the target AWS IoT thing or thing group.

" + "documentation":"

The ARN of the target IoT thing or thing group.

" }, "revisionId":{ "shape":"NonEmptyString", @@ -1016,7 +1042,7 @@ }, "deploymentName":{ "shape":"NonEmptyString", - "documentation":"

The name of the deployment.

You can create deployments without names. If you create a deployment without a name, the AWS IoT Greengrass V2 console shows the deployment name as <targetType>:<targetName>, where targetType and targetName are the type and name of the deployment target.

" + "documentation":"

The name of the deployment.

" }, "creationTimestamp":{ "shape":"Timestamp", @@ -1043,7 +1069,7 @@ }, "action":{ "shape":"DeploymentComponentUpdatePolicyAction", - "documentation":"

Whether or not to notify components and wait for components to become safe to update. Choose from the following options:

  • NOTIFY_COMPONENTS – The deployment notifies each component before it stops and updates that component. Components can use the SubscribeToComponentUpdates IPC operation to receive these notifications. Then, components can respond with the DeferComponentUpdate IPC operation. For more information, see Create deployments in the AWS IoT Greengrass V2 Developer Guide.

  • SKIP_NOTIFY_COMPONENTS – The deployment doesn't notify components or wait for them to be safe to update.

Default: NOTIFY_COMPONENTS

" + "documentation":"

Whether or not to notify components and wait for components to become safe to update. Choose from the following options:

  • NOTIFY_COMPONENTS – The deployment notifies each component before it stops and updates that component. Components can use the SubscribeToComponentUpdates IPC operation to receive these notifications. Then, components can respond with the DeferComponentUpdate IPC operation. For more information, see Create deployments in the IoT Greengrass V2 Developer Guide.

  • SKIP_NOTIFY_COMPONENTS – The deployment doesn't notify components or wait for them to be safe to update.

Default: NOTIFY_COMPONENTS

" } }, "documentation":"

Contains information about a deployment's policy that defines when components are safe to update.

Each component on a device can report whether or not it's ready to update. After a component and its dependencies are ready, they can apply the update in the deployment. You can configure whether or not the deployment notifies components of an update and waits for a response. You specify the amount of time each component has to respond to the update notification.

" @@ -1064,7 +1090,7 @@ "box":true } }, - "documentation":"

Contains information about how long a component on a core device can validate its configuration updates before it times out. Components can use the SubscribeToValidateConfigurationUpdates IPC operation to receive notifications when a deployment specifies a configuration update. Then, components can respond with the SendConfigurationValidityReport IPC operation. For more information, see Create deployments in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

Contains information about how long a component on a core device can validate its configuration updates before it times out. Components can use the SubscribeToValidateConfigurationUpdates IPC operation to receive notifications when a deployment specifies a configuration update. Then, components can respond with the SendConfigurationValidityReport IPC operation. For more information, see Create deployments in the IoT Greengrass V2 Developer Guide.

" }, "DeploymentFailureHandlingPolicy":{ "type":"string", @@ -1097,7 +1123,7 @@ "documentation":"

The timeout configuration for the job. This configuration defines the amount of time each device has to complete the job.

" } }, - "documentation":"

Contains information about an AWS IoT job configuration.

" + "documentation":"

Contains information about an IoT job configuration.

" }, "DeploymentList":{ "type":"list", @@ -1173,7 +1199,7 @@ }, "status":{ "shape":"CloudComponentStatus", - "documentation":"

The status of the component version in AWS IoT Greengrass V2. This status is different from the status of the component on a core device.

" + "documentation":"

The status of the component version in IoT Greengrass V2. This status is different from the status of the component on a core device.

" }, "platforms":{ "shape":"ComponentPlatformList", @@ -1181,7 +1207,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the IoT Greengrass V2 Developer Guide.

" } } }, @@ -1197,7 +1223,7 @@ "members":{ "thingName":{ "shape":"IoTThingName", - "documentation":"

The name of the AWS IoT thing that represents the client device to disassociate.

" + "documentation":"

The name of the IoT thing that represents the client device to disassociate.

" } }, "documentation":"

Contains a request to disassociate a client device from a core device. The BatchDisassociateClientDeviceWithCoreDevice operation consumes a list of these requests.

" @@ -1213,7 +1239,7 @@ "members":{ "thingName":{ "shape":"IoTThingName", - "documentation":"

The name of the AWS IoT thing whose disassociate request failed.

" + "documentation":"

The name of the IoT thing whose disassociate request failed.

" }, "code":{ "shape":"NonEmptyString", @@ -1249,15 +1275,15 @@ }, "deploymentName":{ "shape":"DeploymentName", - "documentation":"

The name of the deployment.

You can create deployments without names. If you create a deployment without a name, the AWS IoT Greengrass V2 console shows the deployment name as <targetType>:<targetName>, where targetType and targetName are the type and name of the deployment target.

" + "documentation":"

The name of the deployment.

" }, "iotJobId":{ "shape":"IoTJobId", - "documentation":"

The ID of the AWS IoT job that applies the deployment to target devices.

" + "documentation":"

The ID of the IoT job that applies the deployment to target devices.

" }, "iotJobArn":{ "shape":"IoTJobARN", - "documentation":"

The ARN of the AWS IoT job that applies the deployment to target devices.

" + "documentation":"

The ARN of the IoT job that applies the deployment to target devices.

" }, "description":{ "shape":"Description", @@ -1265,11 +1291,11 @@ }, "targetArn":{ "shape":"TargetARN", - "documentation":"

The ARN of the target AWS IoT thing or thing group.

" + "documentation":"

The ARN of the target IoT thing or thing group.

" }, "coreDeviceExecutionStatus":{ "shape":"EffectiveDeploymentExecutionStatus", - "documentation":"

The status of the deployment job on the AWS IoT Greengrass core device.

" + "documentation":"

The status of the deployment job on the Greengrass core device.

" }, "reason":{ "shape":"Reason", @@ -1284,7 +1310,7 @@ "documentation":"

The time at which the deployment job was last modified, expressed in ISO 8601 format.

" } }, - "documentation":"

Contains information about a deployment job that AWS IoT Greengrass sends to a AWS IoT Greengrass core device.

" + "documentation":"

Contains information about a deployment job that IoT Greengrass sends to a Greengrass core device.

" }, "EffectiveDeploymentExecutionStatus":{ "type":"string", @@ -1347,7 +1373,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the IoT Greengrass V2 Developer Guide.

" } } }, @@ -1388,7 +1414,7 @@ "members":{ "coreDeviceThingName":{ "shape":"CoreDeviceThingName", - "documentation":"

The name of the core device. This is also the name of the AWS IoT thing.

", + "documentation":"

The name of the core device. This is also the name of the IoT thing.

", "location":"uri", "locationName":"coreDeviceThingName" } @@ -1399,11 +1425,11 @@ "members":{ "coreDeviceThingName":{ "shape":"CoreDeviceThingName", - "documentation":"

The name of the core device. This is also the name of the AWS IoT thing.

" + "documentation":"

The name of the core device. This is also the name of the IoT thing.

" }, "coreVersion":{ "shape":"GGCVersion", - "documentation":"

The version of the AWS IoT Greengrass Core software that the core device runs. This version is equivalent to the version of the AWS IoT Greengrass nucleus component that runs on the core device. For more information, see the AWS IoT Greengrass nucleus component in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

The version of the IoT Greengrass Core software that the core device runs. This version is equivalent to the version of the Greengrass nucleus component that runs on the core device. For more information, see the Greengrass nucleus component in the IoT Greengrass V2 Developer Guide.

" }, "platform":{ "shape":"CoreDevicePlatformString", @@ -1415,7 +1441,7 @@ }, "status":{ "shape":"CoreDeviceStatus", - "documentation":"

The status of the core device. The core device status can be:

  • HEALTHY – The AWS IoT Greengrass Core software and all components run on the core device without issue.

  • UNHEALTHY – The AWS IoT Greengrass Core software or a component is in a failed state on the core device.

" + "documentation":"

The status of the core device. The core device status can be:

  • HEALTHY – The IoT Greengrass Core software and all components run on the core device without issue.

  • UNHEALTHY – The IoT Greengrass Core software or a component is in a failed state on the core device.

" }, "lastStatusUpdateTimestamp":{ "shape":"Timestamp", @@ -1423,7 +1449,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the IoT Greengrass V2 Developer Guide.

" } } }, @@ -1444,7 +1470,7 @@ "members":{ "targetArn":{ "shape":"TargetARN", - "documentation":"

The ARN of the target AWS IoT thing or thing group.

" + "documentation":"

The ARN of the target IoT thing or thing group.

" }, "revisionId":{ "shape":"NonEmptyString", @@ -1456,7 +1482,7 @@ }, "deploymentName":{ "shape":"NullableString", - "documentation":"

The name of the deployment.

You can create deployments without names. If you create a deployment without a name, the AWS IoT Greengrass V2 console shows the deployment name as <targetType>:<targetName>, where targetType and targetName are the type and name of the deployment target.

" + "documentation":"

The name of the deployment.

" }, "deploymentStatus":{ "shape":"DeploymentStatus", @@ -1464,11 +1490,11 @@ }, "iotJobId":{ "shape":"NullableString", - "documentation":"

The ID of the AWS IoT job that applies the deployment to target devices.

" + "documentation":"

The ID of the IoT job that applies the deployment to target devices.

" }, "iotJobArn":{ "shape":"IoTJobARN", - "documentation":"

The ARN of the AWS IoT job that applies the deployment to target devices.

" + "documentation":"

The ARN of the IoT job that applies the deployment to target devices.

" }, "components":{ "shape":"ComponentDeploymentSpecifications", @@ -1492,7 +1518,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the IoT Greengrass V2 Developer Guide.

" } } }, @@ -1520,7 +1546,7 @@ "documentation":"

Whether or not the component is a root component.

" } }, - "documentation":"

Contains information about a component on a AWS IoT Greengrass core device.

" + "documentation":"

Contains information about a component on a Greengrass core device.

" }, "InstalledComponentLifecycleState":{ "type":"string", @@ -1551,7 +1577,7 @@ "locationName":"Retry-After" } }, - "documentation":"

AWS IoT Greengrass can't process your request right now. Try again later.

", + "documentation":"

IoT Greengrass can't process your request right now. Try again later.

", "error":{"httpStatusCode":500}, "exception":true, "fault":true @@ -1744,7 +1770,7 @@ "documentation":"

The list of system devices that the container can access.

" } }, - "documentation":"

Contains information about a container in which AWS Lambda functions run on AWS IoT Greengrass core devices.

" + "documentation":"

Contains information about a container in which Lambda functions run on Greengrass core devices.

" }, "LambdaDeviceList":{ "type":"list", @@ -1788,10 +1814,10 @@ }, "type":{ "shape":"LambdaEventSourceType", - "documentation":"

The type of event source. Choose from the following options:

  • PUB_SUB – Subscribe to local publish/subscribe messages. This event source type doesn't support MQTT wildcards (+ and #) in the event source topic.

  • IOT_CORE – Subscribe to AWS IoT Core MQTT messages. This event source type supports MQTT wildcards (+ and #) in the event source topic.

" + "documentation":"

The type of event source. Choose from the following options:

  • PUB_SUB – Subscribe to local publish/subscribe messages. This event source type doesn't support MQTT wildcards (+ and #) in the event source topic.

  • IOT_CORE – Subscribe to Amazon Web Services IoT Core MQTT messages. This event source type supports MQTT wildcards (+ and #) in the event source topic.

" } }, - "documentation":"

Contains information about an event source for an AWS Lambda function. The event source defines the topics on which this Lambda function subscribes to receive messages that run the function.

" + "documentation":"

Contains information about an event source for an Lambda function. The event source defines the topics on which this Lambda function subscribes to receive messages that run the function.

" }, "LambdaEventSourceList":{ "type":"list", @@ -1814,11 +1840,11 @@ "members":{ "eventSources":{ "shape":"LambdaEventSourceList", - "documentation":"

The list of event sources to which to subscribe to receive work messages. The Lambda function runs when it receives a message from an event source. You can subscribe this function to local publish/subscribe messages and AWS IoT Core MQTT messages.

" + "documentation":"

The list of event sources to which to subscribe to receive work messages. The Lambda function runs when it receives a message from an event source. You can subscribe this function to local publish/subscribe messages and Amazon Web Services IoT Core MQTT messages.

" }, "maxQueueSize":{ "shape":"OptionalInteger", - "documentation":"

The maximum size of the message queue for the Lambda function component. The AWS IoT Greengrass core stores messages in a FIFO (first-in-first-out) queue until it can run the Lambda function to consume each message.

", + "documentation":"

The maximum size of the message queue for the Lambda function component. The IoT Greengrass core stores messages in a FIFO (first-in-first-out) queue until it can run the Lambda function to consume each message.

", "box":true }, "maxInstancesCount":{ @@ -1828,7 +1854,7 @@ }, "maxIdleTimeInSeconds":{ "shape":"OptionalInteger", - "documentation":"

The maximum amount of time in seconds that a non-pinned Lambda function can idle before the AWS IoT Greengrass Core software stops its process.

", + "documentation":"

The maximum amount of time in seconds that a non-pinned Lambda function can idle before the IoT Greengrass Core software stops its process.

", "box":true }, "timeoutInSeconds":{ @@ -1843,7 +1869,7 @@ }, "pinned":{ "shape":"OptionalBoolean", - "documentation":"

Whether or not the Lambda function is pinned, or long-lived.

  • A pinned Lambda function starts when AWS IoT Greengrass starts and keeps running in its own container.

  • A non-pinned Lambda function starts only when it receives a work item and exists after it idles for maxIdleTimeInSeconds. If the function has multiple work items, the AWS IoT Greengrass Core software creates multiple instances of the function.

Default: true

", + "documentation":"

Whether or not the Lambda function is pinned, or long-lived.

  • A pinned Lambda function starts when IoT Greengrass starts and keeps running in its own container.

  • A non-pinned Lambda function starts only when it receives a work item and exists after it idles for maxIdleTimeInSeconds. If the function has multiple work items, the IoT Greengrass Core software creates multiple instances of the function.

Default: true

", "box":true }, "inputPayloadEncodingType":{ @@ -1863,7 +1889,7 @@ "documentation":"

The parameters for the Linux process that contains the Lambda function.

" } }, - "documentation":"

Contains parameters for a Lambda function that runs on AWS IoT Greengrass.

" + "documentation":"

Contains parameters for a Lambda function that runs on IoT Greengrass.

" }, "LambdaFilesystemPermission":{ "type":"string", @@ -1902,10 +1928,10 @@ }, "componentLambdaParameters":{ "shape":"LambdaExecutionParameters", - "documentation":"

The system and runtime parameters for the Lambda function as it runs on the AWS IoT Greengrass core device.

" + "documentation":"

The system and runtime parameters for the Lambda function as it runs on the Greengrass core device.

" } }, - "documentation":"

Contains information about an AWS Lambda function to import to create a component.

" + "documentation":"

Contains information about an Lambda function to import to create a component.

" }, "LambdaInputPayloadEncodingType":{ "type":"string", @@ -1926,14 +1952,14 @@ "members":{ "isolationMode":{ "shape":"LambdaIsolationMode", - "documentation":"

The isolation mode for the process that contains the Lambda function. The process can run in an isolated runtime environment inside the AWS IoT Greengrass container, or as a regular process outside any container.

Default: GreengrassContainer

" + "documentation":"

The isolation mode for the process that contains the Lambda function. The process can run in an isolated runtime environment inside the IoT Greengrass container, or as a regular process outside any container.

Default: GreengrassContainer

" }, "containerParams":{ "shape":"LambdaContainerParams", "documentation":"

The parameters for the container in which the Lambda function runs.

" } }, - "documentation":"

Contains parameters for a Linux process that contains an AWS Lambda function.

" + "documentation":"

Contains parameters for a Linux process that contains an Lambda function.

" }, "LambdaVolumeList":{ "type":"list", @@ -1960,11 +1986,11 @@ }, "addGroupOwner":{ "shape":"OptionalBoolean", - "documentation":"

Whether or not to add the AWS IoT Greengrass user group as an owner of the volume.

Default: false

", + "documentation":"

Whether or not to add the IoT Greengrass user group as an owner of the volume.

Default: false

", "box":true } }, - "documentation":"

Contains information about a volume that Linux processes in a container can access. When you define a volume, the AWS IoT Greengrass Core software mounts the source files to the destination inside the container.

" + "documentation":"

Contains information about a volume that Linux processes in a container can access. When you define a volume, the IoT Greengrass Core software mounts the source files to the destination inside the container.

" }, "LifecycleStateDetails":{ "type":"string", @@ -1977,7 +2003,7 @@ "members":{ "coreDeviceThingName":{ "shape":"IoTThingName", - "documentation":"

The name of the core device. This is also the name of the AWS IoT thing.

", + "documentation":"

The name of the core device. This is also the name of the IoT thing.

", "location":"uri", "locationName":"coreDeviceThingName" }, @@ -2093,13 +2119,13 @@ "members":{ "thingGroupArn":{ "shape":"ThingGroupARN", - "documentation":"

The ARN of the AWS IoT thing group by which to filter. If you specify this parameter, the list includes only core devices that are members of this thing group.

", + "documentation":"

The ARN of the IoT thing group by which to filter. If you specify this parameter, the list includes only core devices that are members of this thing group.

", "location":"querystring", "locationName":"thingGroupArn" }, "status":{ "shape":"CoreDeviceStatus", - "documentation":"

The core device status by which to filter. If you specify this parameter, the list includes only core devices that have this status. Choose one of the following options:

  • HEALTHY – The AWS IoT Greengrass Core software and all components run on the core device without issue.

  • UNHEALTHY – The AWS IoT Greengrass Core software or a component is in a failed state on the core device.

", + "documentation":"

The core device status by which to filter. If you specify this parameter, the list includes only core devices that have this status. Choose one of the following options:

  • HEALTHY – The IoT Greengrass Core software and all components run on the core device without issue.

  • UNHEALTHY – The IoT Greengrass Core software or a component is in a failed state on the core device.

", "location":"querystring", "locationName":"status" }, @@ -2137,7 +2163,7 @@ "members":{ "targetArn":{ "shape":"TargetARN", - "documentation":"

The ARN of the target AWS IoT thing or thing group.

", + "documentation":"

The ARN of the target IoT thing or thing group.

", "location":"querystring", "locationName":"targetArn" }, @@ -2182,7 +2208,7 @@ "members":{ "coreDeviceThingName":{ "shape":"CoreDeviceThingName", - "documentation":"

The name of the core device. This is also the name of the AWS IoT thing.

", + "documentation":"

The name of the core device. This is also the name of the IoT thing.

", "location":"uri", "locationName":"coreDeviceThingName" }, @@ -2221,7 +2247,7 @@ "members":{ "coreDeviceThingName":{ "shape":"CoreDeviceThingName", - "documentation":"

The name of the core device. This is also the name of the AWS IoT thing.

", + "documentation":"

The name of the core device. This is also the name of the IoT thing.

", "location":"uri", "locationName":"coreDeviceThingName" }, @@ -2271,10 +2297,15 @@ "members":{ "tags":{ "shape":"TagMap", - "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the IoT Greengrass V2 Developer Guide.

" } } }, + "Memory":{ + "type":"long", + "max":9223372036854771712, + "min":0 + }, "NextTokenString":{"type":"string"}, "NonEmptyString":{ "type":"string", @@ -2298,6 +2329,16 @@ "YAML" ] }, + "RequestAlreadyInProgressException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The request is already in progress. This exception occurs when you use a client token for multiple requests while IoT Greengrass is still processing an earlier request that uses the same client token.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "ResolveComponentCandidatesRequest":{ "type":"structure", "required":[ @@ -2344,7 +2385,7 @@ "documentation":"

The recipe of the component version.

" } }, - "documentation":"

Contains information about a component version that is compatible to run on a AWS IoT Greengrass core device.

" + "documentation":"

Contains information about a component version that is compatible to run on a Greengrass core device.

" }, "ResolvedComponentVersionsList":{ "type":"list", @@ -2409,6 +2450,20 @@ "key":{"shape":"NonEmptyString"}, "value":{"shape":"NonEmptyString"} }, + "SystemResourceLimits":{ + "type":"structure", + "members":{ + "memory":{ + "shape":"Memory", + "documentation":"

The maximum amount of RAM, expressed in kilobytes, that a component's processes can use on the core device.

" + }, + "cpus":{ + "shape":"CPU", + "documentation":"

The maximum amount of CPU time that a component's processes can use on the core device. A core device's total CPU time is equivalent to the device's number of CPU cores. For example, on a core device with 4 CPU cores, you can set this value to 2 to limit the component's processes to 50 percent usage of each CPU core. On a device with 1 CPU core, you can set this value to 0.25 to limit the component's processes to 25 percent usage of the CPU. If you set this value to a number greater than the number of CPU cores, the IoT Greengrass Core software doesn't limit the component's CPU usage.

" + } + }, + "documentation":"

Contains information about system resource limits that the IoT Greengrass Core software applies to a component's processes. For more information, see Configure system resource limits for components.

" + }, "TagKey":{ "type":"string", "max":128, @@ -2443,7 +2498,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the IoT Greengrass V2 Developer Guide.

" } } }, @@ -2566,5 +2621,5 @@ ] } }, - "documentation":"

AWS IoT Greengrass brings local compute, messaging, data management, sync, and ML inference capabilities to edge devices. This enables devices to collect and analyze data closer to the source of information, react autonomously to local events, and communicate securely with each other on local networks. Local devices can also communicate securely with AWS IoT Core and export IoT data to the AWS Cloud. AWS IoT Greengrass developers can use AWS Lambda functions and components to create and deploy applications to fleets of edge devices for local operation.

AWS IoT Greengrass Version 2 provides a new major version of the AWS IoT Greengrass Core software, new APIs, and a new console. Use this API reference to learn how to use the AWS IoT Greengrass V2 API operations to manage components, manage deployments, and core devices.

For more information, see What is AWS IoT Greengrass? in the AWS IoT Greengrass V2 Developer Guide.

" + "documentation":"

IoT Greengrass brings local compute, messaging, data management, sync, and ML inference capabilities to edge devices. This enables devices to collect and analyze data closer to the source of information, react autonomously to local events, and communicate securely with each other on local networks. Local devices can also communicate securely with Amazon Web Services IoT Core and export IoT data to the Amazon Web Services Cloud. IoT Greengrass developers can use Lambda functions and components to create and deploy applications to fleets of edge devices for local operation.

IoT Greengrass Version 2 provides a new major version of the IoT Greengrass Core software, new APIs, and a new console. Use this API reference to learn how to use the IoT Greengrass V2 API operations to manage components, manage deployments, and core devices.

For more information, see What is IoT Greengrass? in the IoT Greengrass V2 Developer Guide.

" } diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index 84fafd5b4758..fc7359b54368 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT groundstation AWS Java SDK :: Services :: GroundStation diff --git a/services/guardduty/pom.xml b/services/guardduty/pom.xml index e46ddd070b9d..b9f5eda5851c 100644 --- a/services/guardduty/pom.xml +++ b/services/guardduty/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 guardduty diff --git a/services/health/pom.xml b/services/health/pom.xml index 2a4805b01e11..4fbb434a3d96 100644 --- a/services/health/pom.xml +++ b/services/health/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT health AWS Java SDK :: Services :: AWS Health APIs and Notifications diff --git a/services/health/src/main/resources/codegen-resources/service-2.json b/services/health/src/main/resources/codegen-resources/service-2.json index cf20ec7ae542..4f1a0bd85331 100644 --- a/services/health/src/main/resources/codegen-resources/service-2.json +++ b/services/health/src/main/resources/codegen-resources/service-2.json @@ -307,7 +307,7 @@ "documentation":"

If the results of a search are large, only a portion of the results are returned, and a nextToken pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.

" }, "maxResults":{ - "shape":"maxResults", + "shape":"maxResultsLowerRange", "documentation":"

The maximum number of items to return in one batch, between 10 and 100, inclusive.

" } } @@ -533,7 +533,7 @@ "documentation":"

If the results of a search are large, only a portion of the results are returned, and a nextToken pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.

" }, "maxResults":{ - "shape":"maxResults", + "shape":"maxResultsLowerRange", "documentation":"

The maximum number of items to return in one batch, between 10 and 100, inclusive.

" }, "locale":{ @@ -1120,7 +1120,7 @@ "entityArnList":{ "type":"list", "member":{"shape":"entityArn"}, - "max":100, + "max":99, "min":1 }, "entityStatusCode":{ @@ -1146,7 +1146,7 @@ "entityValueList":{ "type":"list", "member":{"shape":"entityValue"}, - "max":100, + "max":99, "min":1 }, "eventAggregateField":{ @@ -1239,6 +1239,11 @@ "max":100, "min":10 }, + "maxResultsLowerRange":{ + "type":"integer", + "max":100, + "min":1 + }, "metadataKey":{ "type":"string", "max":32766 diff --git a/services/healthlake/pom.xml b/services/healthlake/pom.xml index 6c50317c94cd..6f7ac1e553f6 100644 --- a/services/healthlake/pom.xml +++ b/services/healthlake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT healthlake AWS Java SDK :: Services :: Health Lake diff --git a/services/healthlake/src/main/resources/codegen-resources/paginators-1.json b/services/healthlake/src/main/resources/codegen-resources/paginators-1.json index c134891ac251..d103dbc646cf 100644 --- a/services/healthlake/src/main/resources/codegen-resources/paginators-1.json +++ b/services/healthlake/src/main/resources/codegen-resources/paginators-1.json @@ -4,6 +4,16 @@ "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults" + }, + "ListFHIRExportJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListFHIRImportJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/services/healthlake/src/main/resources/codegen-resources/service-2.json b/services/healthlake/src/main/resources/codegen-resources/service-2.json index 2a8fffc7e263..99777b5cfb48 100644 --- a/services/healthlake/src/main/resources/codegen-resources/service-2.json +++ b/services/healthlake/src/main/resources/codegen-resources/service-2.json @@ -25,6 +25,7 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], "documentation":"

Creates a Data Store that can ingest and export FHIR formatted data.

" @@ -93,7 +94,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Displays the properties of a FHIR import job, including the ID, ARN, name, and the status of the job.

" + "documentation":"

Displays the properties of a FHIR import job, including the ID, ARN, name, and the status of the job.

" }, "ListFHIRDatastores":{ "name":"ListFHIRDatastores", @@ -110,6 +111,54 @@ ], "documentation":"

Lists all FHIR Data Stores that are in the user’s account, regardless of Data Store status.

" }, + "ListFHIRExportJobs":{ + "name":"ListFHIRExportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFHIRExportJobsRequest"}, + "output":{"shape":"ListFHIRExportJobsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all FHIR export jobs associated with an account and their statuses.

" + }, + "ListFHIRImportJobs":{ + "name":"ListFHIRImportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFHIRImportJobsRequest"}, + "output":{"shape":"ListFHIRImportJobsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all FHIR import jobs associated with an account and their statuses.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns a list of all existing tags associated with a Data Store.

" + }, "StartFHIRExportJob":{ "name":"StartFHIRExportJob", "http":{ @@ -143,6 +192,34 @@ {"shape":"InternalServerException"} ], "documentation":"

Begins a FHIR Import job.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Adds a user specifed key and value tag to a Data Store.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes tags from a Data Store.

" } }, "shapes":{ @@ -154,6 +231,12 @@ "documentation":"

Access is denied. Your account is not authorized to perform this operation.

", "exception":true }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"^arn:aws((-us-gov)|(-iso)|(-iso-b)|(-cn))?:healthlake:[a-z0-9-]+:\\d{12}:datastore\\/fhir\\/.{32}" + }, "BoundedLengthString":{ "type":"string", "max":5000, @@ -166,6 +249,13 @@ "min":1, "pattern":"^[a-zA-Z0-9-]+$" }, + "CmkType":{ + "type":"string", + "enum":[ + "CUSTOMER_MANAGED_KMS_KEY", + "AWS_OWNED_KMS_KEY" + ] + }, "ConflictException":{ "type":"structure", "members":{ @@ -186,6 +276,10 @@ "shape":"FHIRVersion", "documentation":"

The FHIR version of the Data Store. The only supported version is R4.

" }, + "SseConfiguration":{ + "shape":"SseConfiguration", + "documentation":"

The server-side encryption key configuration for a customer provided encryption key specified for creating a Data Store.

" + }, "PreloadDataConfig":{ "shape":"PreloadDataConfig", "documentation":"

Optional parameter to preload data upon creation of the Data Store. Currently, the only supported preloaded data is synthetic data generated from Synthea.

" @@ -194,6 +288,10 @@ "shape":"ClientTokenString", "documentation":"

Optional user provided token used for ensuring idempotency.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Resource tags that are applied to a Data Store when it is created.

" } } }, @@ -300,6 +398,10 @@ "shape":"String", "documentation":"

The AWS endpoint for the Data Store. Each Data Store will have it's own endpoint with Data Store ID in the endpoint URL.

" }, + "SseConfiguration":{ + "shape":"SseConfiguration", + "documentation":"

The server-side encryption key configuration for a customer provided encryption key (CMK).

" + }, "PreloadDataConfig":{ "shape":"PreloadDataConfig", "documentation":"

The preloaded data configuration for the Data Store. Only data preloaded from Synthea is supported.

" @@ -429,6 +531,12 @@ } } }, + "EncryptionKeyID":{ + "type":"string", + "max":400, + "min":1, + "pattern":"(arn:aws((-us-gov)|(-iso)|(-iso-b)|(-cn))?:kms:)?([a-z]{2}-[a-z]+(-[a-z]+)?-\\d:)?(\\d{12}:)?(((key/)?[a-zA-Z0-9-_]+)|(alias/[a-zA-Z0-9:/_-]+))" + }, "ExportJobProperties":{ "type":"structure", "required":[ @@ -478,6 +586,10 @@ }, "documentation":"

The properties of a FHIR export job, including the ID, ARN, name, and the status of the job.

" }, + "ExportJobPropertiesList":{ + "type":"list", + "member":{"shape":"ExportJobProperties"} + }, "FHIRVersion":{ "type":"string", "enum":["R4"] @@ -526,6 +638,7 @@ "shape":"InputDataConfig", "documentation":"

The input data configuration that was supplied when the Import job was created.

" }, + "JobOutputDataConfig":{"shape":"OutputDataConfig"}, "DataAccessRoleArn":{ "shape":"IamRoleArn", "documentation":"

The Amazon Resource Name (ARN) that gives Amazon HealthLake access to your input data.

" @@ -537,6 +650,10 @@ }, "documentation":"

Displays the properties of the import job, including the ID, Arn, Name, and the status of the Data Store.

" }, + "ImportJobPropertiesList":{ + "type":"list", + "member":{"shape":"ImportJobProperties"} + }, "InputDataConfig":{ "type":"structure", "members":{ @@ -574,10 +691,26 @@ "enum":[ "SUBMITTED", "IN_PROGRESS", + "COMPLETED_WITH_ERRORS", "COMPLETED", "FAILED" ] }, + "KmsEncryptionConfig":{ + "type":"structure", + "required":["CmkType"], + "members":{ + "CmkType":{ + "shape":"CmkType", + "documentation":"

The type of customer-managed-key(CMK) used for encyrption. The two types of supported CMKs are customer owned CMKs and AWS owned CMKs.

" + }, + "KmsKeyId":{ + "shape":"EncryptionKeyID", + "documentation":"

The KMS encryption key id/alias used to encrypt the Data Store contents at rest.

" + } + }, + "documentation":"

The customer-managed-key(CMK) used when creating a Data Store. If a customer owned key is not specified, an AWS owned key will be used for encryption.

" + }, "ListFHIRDatastoresRequest":{ "type":"structure", "members":{ @@ -609,6 +742,121 @@ } } }, + "ListFHIRExportJobsRequest":{ + "type":"structure", + "required":["DatastoreId"], + "members":{ + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

This parameter limits the response to the export job with the specified Data Store ID.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token used to identify the next page of results to return for a ListFHIRExportJobs query.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

This parameter limits the number of results returned for a ListFHIRExportJobs to a maximum quantity specified by the user.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

This parameter limits the response to the export job with the specified job name.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

This parameter limits the response to the export jobs with the specified job status.

" + }, + "SubmittedBefore":{ + "shape":"Timestamp", + "documentation":"

This parameter limits the response to FHIR export jobs submitted before a user specified date.

" + }, + "SubmittedAfter":{ + "shape":"Timestamp", + "documentation":"

This parameter limits the response to FHIR export jobs submitted after a user specified date.

" + } + } + }, + "ListFHIRExportJobsResponse":{ + "type":"structure", + "required":["ExportJobPropertiesList"], + "members":{ + "ExportJobPropertiesList":{ + "shape":"ExportJobPropertiesList", + "documentation":"

The properties of listed FHIR export jobs, including the ID, ARN, name, and the status of the job.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token used to identify the next page of results to return for a ListFHIRExportJobs query.

" + } + } + }, + "ListFHIRImportJobsRequest":{ + "type":"structure", + "required":["DatastoreId"], + "members":{ + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

This parameter limits the response to the import job with the specified Data Store ID.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token used to identify the next page of results to return for a ListFHIRImportJobs query.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

This parameter limits the number of results returned for a ListFHIRImportJobs to a maximum quantity specified by the user.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

This parameter limits the response to the import job with the specified job name.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

This parameter limits the response to the import job with the specified job status.

" + }, + "SubmittedBefore":{ + "shape":"Timestamp", + "documentation":"

This parameter limits the response to FHIR import jobs submitted before a user specified date.

" + }, + "SubmittedAfter":{ + "shape":"Timestamp", + "documentation":"

This parameter limits the response to FHIR import jobs submitted after a user specified date.

" + } + } + }, + "ListFHIRImportJobsResponse":{ + "type":"structure", + "required":["ImportJobPropertiesList"], + "members":{ + "ImportJobPropertiesList":{ + "shape":"ImportJobPropertiesList", + "documentation":"

The properties of a listed FHIR import jobs, including the ID, ARN, name, and the status of the job.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token used to identify the next page of results to return for a ListFHIRImportJobs query.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name(ARN) of the Data Store for which tags are being added.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

Returns a list of tags associated with a Data Store.

" + } + } + }, "MaxResultsInteger":{ "type":"integer", "max":500, @@ -628,9 +876,9 @@ "OutputDataConfig":{ "type":"structure", "members":{ - "S3Uri":{ - "shape":"S3Uri", - "documentation":"

The S3Uri is the user specified S3 location to which data will be exported from a FHIR Data Store.

" + "S3Configuration":{ + "shape":"S3Configuration", + "documentation":"

The output data configuration that was supplied when the export job was created.

" } }, "documentation":"

The output data configuration that was supplied when the export job was created.

", @@ -659,11 +907,40 @@ "documentation":"

The requested Data Store was not found.

", "exception":true }, + "S3Configuration":{ + "type":"structure", + "required":[ + "S3Uri", + "KmsKeyId" + ], + "members":{ + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

The S3Uri is the user specified S3 location of the FHIR data to be imported into Amazon HealthLake.

" + }, + "KmsKeyId":{ + "shape":"EncryptionKeyID", + "documentation":"

The KMS key ID used to access the S3 bucket.

" + } + }, + "documentation":"

The configuration of the S3 bucket for either an import or export job. This includes assigning permissions for access.

" + }, "S3Uri":{ "type":"string", "max":1024, "pattern":"s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?" }, + "SseConfiguration":{ + "type":"structure", + "required":["KmsEncryptionConfig"], + "members":{ + "KmsEncryptionConfig":{ + "shape":"KmsEncryptionConfig", + "documentation":"

The KMS encryption configuration used to provide details for data encryption.

" + } + }, + "documentation":"

The server-side encryption key configuration for a customer provided encryption key.

" + }, "StartFHIRExportJobRequest":{ "type":"structure", "required":[ @@ -721,6 +998,7 @@ "type":"structure", "required":[ "InputDataConfig", + "JobOutputDataConfig", "DatastoreId", "DataAccessRoleArn", "ClientToken" @@ -734,6 +1012,7 @@ "shape":"InputDataConfig", "documentation":"

The input properties of the FHIR Import job in the StartFHIRImport job request.

" }, + "JobOutputDataConfig":{"shape":"OutputDataConfig"}, "DatastoreId":{ "shape":"DatastoreId", "documentation":"

The AWS-generated Data Store ID.

" @@ -775,6 +1054,70 @@ "max":10000, "pattern":"[\\P{M}\\p{M}]{0,10000}" }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key portion of a tag. Tag keys are case sensitive.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value portion of tag. Tag values are case sensitive.

" + } + }, + "documentation":"

A tag is a label consisting of a user-defined key and value. The form for tags is {\"Key\", \"Value\"}

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name(ARN)that gives Amazon HealthLake access to the Data Store which tags are being added to.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The user specified key and value pair tags being added to a Data Store.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -784,6 +1127,28 @@ "exception":true }, "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

\"The Amazon Resource Name(ARN) of the Data Store for which tags are being removed

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The keys for the tags to be removed from the Healthlake Data Store.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "ValidationException":{ "type":"structure", "members":{ diff --git a/services/honeycode/pom.xml b/services/honeycode/pom.xml index cbdd952f0c2f..0755963fb7a9 100644 --- a/services/honeycode/pom.xml +++ b/services/honeycode/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT honeycode AWS Java SDK :: Services :: Honeycode diff --git a/services/iam/pom.xml b/services/iam/pom.xml index 4d9d3f57fda8..9a49eca4a7d3 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iam AWS Java SDK :: Services :: AWS IAM diff --git a/services/iam/src/main/resources/codegen-resources/service-2.json b/services/iam/src/main/resources/codegen-resources/service-2.json index b54fae4af90c..fa18c51596ea 100644 --- a/services/iam/src/main/resources/codegen-resources/service-2.json +++ b/services/iam/src/main/resources/codegen-resources/service-2.json @@ -233,7 +233,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.

If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.

When you create the IAM OIDC provider, you specify the following:

  • The URL of the OIDC identity provider (IdP) to trust

  • A list of client IDs (also known as audiences) that identify the application or applications allowed to authenticate using the OIDC provider

  • A list of thumbprints of one or more server certificates that the IdP uses

You get all of this information from the OIDC IdP that you want to use to access Amazon Web Services.

The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

" + "documentation":"

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.

If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.

When you create the IAM OIDC provider, you specify the following:

  • The URL of the OIDC identity provider (IdP) to trust

  • A list of client IDs (also known as audiences) that identify the application or applications allowed to authenticate using the OIDC provider

  • A list of thumbprints of one or more server certificates that the IdP uses

You get all of this information from the OIDC IdP that you want to use to access Amazon Web Services.

Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. These OIDC IdPs include Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation.

The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

" }, "CreatePolicy":{ "name":"CreatePolicy", @@ -2402,7 +2402,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.

The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.)

Typically, you need to update a thumbprint only when the identity provider's certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.

Trust for the OIDC provider is derived from the provider's certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint operation to highly privileged users.

" + "documentation":"

Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.

The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.)

Typically, you need to update a thumbprint only when the identity provider certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.

Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. These OIDC IdPs include Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation.

Trust for the OIDC provider is derived from the provider certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint operation to highly privileged users.

" }, "UpdateRole":{ "name":"UpdateRole", diff --git a/services/identitystore/pom.xml b/services/identitystore/pom.xml index 7ea3b4c6fc4f..d071d0d80127 100644 --- a/services/identitystore/pom.xml +++ b/services/identitystore/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT identitystore AWS Java SDK :: Services :: Identitystore diff --git a/services/identitystore/src/main/resources/codegen-resources/service-2.json b/services/identitystore/src/main/resources/codegen-resources/service-2.json index d5137d37bf90..ca781c64f02b 100644 --- a/services/identitystore/src/main/resources/codegen-resources/service-2.json +++ b/services/identitystore/src/main/resources/codegen-resources/service-2.json @@ -111,7 +111,7 @@ "members":{ "IdentityStoreId":{ "shape":"IdentityStoreId", - "documentation":"

The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string which contains number and lower case letters. This value is generated at the time that a new identity store is created.

" + "documentation":"

The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string that contains number and lower case letters. This value is generated at the time that a new identity store is created.

" }, "GroupId":{ "shape":"ResourceId", @@ -132,7 +132,7 @@ }, "DisplayName":{ "shape":"GroupDisplayName", - "documentation":"

Contains the group’s display name value. The length limit is 1024 characters. This value can consist of letters, accented characters, symbols, numbers, punctuation, tab, new line, carriage return, space and non breaking space in this attribute. The characters “<>;:%” are excluded. This value is specified at the time the group is created and stored as an attribute of the group object in the identity store.

" + "documentation":"

Contains the group’s display name value. The length limit is 1,024 characters. This value can consist of letters, accented characters, symbols, numbers, punctuation, tab, new line, carriage return, space, and nonbreaking space in this attribute. The characters <>;:% are excluded. This value is specified at the time that the group is created and stored as an attribute of the group object in the identity store.

" } } }, @@ -145,7 +145,7 @@ "members":{ "IdentityStoreId":{ "shape":"IdentityStoreId", - "documentation":"

The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string which contains number and lower case letters. This value is generated at the time that a new identity store is created.

" + "documentation":"

The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string that contains number and lower case letters. This value is generated at the time that a new identity store is created.

" }, "UserId":{ "shape":"ResourceId", @@ -162,7 +162,7 @@ "members":{ "UserName":{ "shape":"UserName", - "documentation":"

Contains the user’s username value. The length limit is 128 characters. This value can consist of letters, accented characters, symbols, numbers and punctuation. The characters “<>;:%” are excluded. This value is specified at the time the user is created and stored as an attribute of the user object in the identity store.

" + "documentation":"

Contains the user’s user name value. The length limit is 128 characters. This value can consist of letters, accented characters, symbols, numbers, and punctuation. The characters <>;:% are excluded. This value is specified at the time the user is created and stored as an attribute of the user object in the identity store.

" }, "UserId":{ "shape":"ResourceId", @@ -179,7 +179,7 @@ "members":{ "AttributePath":{ "shape":"AttributePath", - "documentation":"

The attribute path used to specify which attribute name to search. Length limit is 255 characters. For example, UserName is a valid attribute path for the ListUsers API, and DisplayName is a valid attribute path for the ListGroups API.

" + "documentation":"

The attribute path that is used to specify which attribute name to search. Length limit is 255 characters. For example, UserName is a valid attribute path for the ListUsers API, and DisplayName is a valid attribute path for the ListGroups API.

" }, "AttributeValue":{ "shape":"SensitiveStringType", @@ -205,7 +205,7 @@ }, "DisplayName":{ "shape":"GroupDisplayName", - "documentation":"

Contains the group’s display name value. The length limit is 1024 characters. This value can consist of letters, accented characters, symbols, numbers, punctuation, tab, new line, carriage return, space and non breaking space in this attribute. The characters “<>;:%” are excluded. This value is specified at the time the group is created and stored as an attribute of the group object in the identity store.

" + "documentation":"

Contains the group’s display name value. The length limit is 1,024 characters. This value can consist of letters, accented characters, symbols, numbers, punctuation, tab, new line, carriage return, space, and nonbreaking space in this attribute. The characters <>;:% are excluded. This value is specified at the time the group is created and stored as an attribute of the group object in the identity store.

" } }, "documentation":"

A group object, which contains a specified group’s metadata and attributes.

" @@ -245,15 +245,15 @@ "members":{ "IdentityStoreId":{ "shape":"IdentityStoreId", - "documentation":"

The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string which contains number and lower case letters. This value is generated at the time that a new identity store is created.

" + "documentation":"

The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string that contains number and lower case letters. This value is generated at the time that a new identity store is created.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of results to be returned per request, which is used in the ListUsers and ListGroups request to specify how many results to return in one page. The length limit is 50 characters.

" + "documentation":"

The maximum number of results to be returned per request. This parameter is used in the ListUsers and ListGroups request to specify how many results to return in one page. The length limit is 50 characters.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

The pagination token used for the ListUsers and ListGroups APIs. This value is generated by the identity store service and is returned in the API response if the total results are more than the size of one page, and when this token is used in the API request to search for the next page.

" + "documentation":"

The pagination token used for the ListUsers and ListGroups API operations. This value is generated by the identity store service. It is returned in the API response if the total results are more than the size of one page. This token is also returned when it is used in the API request to search for the next page.

" }, "Filters":{ "shape":"Filters", @@ -271,7 +271,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

The pagination token used for the ListUsers and ListGroups APIs. This value is generated by the identity store service and is returned in the API response if the total results are more than the size of one page, and when this token is used in the API request to search for the next page.

" + "documentation":"

The pagination token used for the ListUsers and ListGroups API operations. This value is generated by the identity store service. It is returned in the API response if the total results are more than the size of one page. This token is also returned when it1 is used in the API request to search for the next page.

" } } }, @@ -281,15 +281,15 @@ "members":{ "IdentityStoreId":{ "shape":"IdentityStoreId", - "documentation":"

The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string which contains number and lower case letters. This value is generated at the time that a new identity store is created.

" + "documentation":"

The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string that contains number and lower case letters. This value is generated at the time that a new identity store is created.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of results to be returned per request, which is used in the ListUsers and ListGroups request to specify how many results to return in one page. The length limit is 50 characters.

" + "documentation":"

The maximum number of results to be returned per request. This parameter is used in the ListUsers and ListGroups request to specify how many results to return in one page. The length limit is 50 characters.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

The pagination token used for the ListUsers and ListGroups APIs. This value is generated by the identity store service and is returned in the API response if the total results are more than the size of one page, and when this token is used in the API request to search for the next page.

" + "documentation":"

The pagination token used for the ListUsers and ListGroups API operations. This value is generated by the identity store service. It is returned in the API response if the total results are more than the size of one page. This token is also returned when it is used in the API request to search for the next page.

" }, "Filters":{ "shape":"Filters", @@ -307,7 +307,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

The pagination token used for the ListUsers and ListGroups APIs. This value is generated by the identity store service and is returned in the API response if the total results are more than the size of one page, and when this token is used in the API request to search for the next page.

" + "documentation":"

The pagination token used for the ListUsers and ListGroups API operations. This value is generated by the identity store service. It is returned in the API response if the total results are more than the size of one page. This token is also returned when it is used in the API request to search for the next page.

" } } }, @@ -393,7 +393,7 @@ "members":{ "UserName":{ "shape":"UserName", - "documentation":"

Contains the user’s username value. The length limit is 128 characters. This value can consist of letters, accented characters, symbols, numbers and punctuation. The characters “<>;:%” are excluded. This value is specified at the time the user is created and stored as an attribute of the user object in the identity store.

" + "documentation":"

Contains the user’s user name value. The length limit is 128 characters. This value can consist of letters, accented characters, symbols, numbers, and punctuation. The characters <>;:% are excluded. This value is specified at the time the user is created and stored as an attribute of the user object in the identity store.

" }, "UserId":{ "shape":"ResourceId", @@ -425,5 +425,6 @@ "documentation":"

The request failed because it contains a syntax error.

", "exception":true } - } + }, + "documentation":"

The AWS Single Sign-On (SSO) Identity Store service provides a single place to retrieve all of your identities (users and groups). For more information about AWS, see the AWS Single Sign-On User Guide.

" } diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml index da23f597c9cc..220bda19718f 100644 --- a/services/imagebuilder/pom.xml +++ b/services/imagebuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT imagebuilder AWS Java SDK :: Services :: Imagebuilder diff --git a/services/imagebuilder/src/main/resources/codegen-resources/service-2.json b/services/imagebuilder/src/main/resources/codegen-resources/service-2.json index 1db5031b1e9c..2b87ce512051 100644 --- a/services/imagebuilder/src/main/resources/codegen-resources/service-2.json +++ b/services/imagebuilder/src/main/resources/codegen-resources/service-2.json @@ -560,7 +560,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Returns the list of component build versions for the specified semantic version.

" + "documentation":"

Returns the list of component build versions for the specified semantic version.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: When you retrieve or reference a resource with a semantic version, you can use wildcards (x) to filter your results. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards. For example, specifying \"1.2.x\", or \"1.x.x\" works to filter list results, but neither \"1.x.2\", nor \"x.2.x\" will work. You do not have to specify the build - Image Builder automatically uses a wildcard for that, if applicable.

" }, "ListComponents":{ "name":"ListComponents", @@ -579,7 +579,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Returns the list of component build versions for the specified semantic version.

" + "documentation":"

Returns the list of component build versions for the specified semantic version.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: When you retrieve or reference a resource with a semantic version, you can use wildcards (x) to filter your results. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards. For example, specifying \"1.2.x\", or \"1.x.x\" works to filter list results, but neither \"1.x.2\", nor \"x.2.x\" will work. You do not have to specify the build - Image Builder automatically uses a wildcard for that, if applicable.

" }, "ListContainerRecipes":{ "name":"ListContainerRecipes", @@ -992,7 +992,7 @@ "members":{ "region":{ "shape":"NonEmptyString", - "documentation":"

The Region of the Amazon EC2 AMI.

" + "documentation":"

The Amazon Web Services Region of the Amazon EC2 AMI.

" }, "image":{ "shape":"NonEmptyString", @@ -1039,7 +1039,7 @@ }, "launchPermission":{ "shape":"LaunchPermissionConfiguration", - "documentation":"

Launch permissions can be used to configure which accounts can use the AMI to launch instances.

" + "documentation":"

Launch permissions can be used to configure which Amazon Web Services accounts can use the AMI to launch instances.

" } }, "documentation":"

Define and configure the output AMIs of the pipeline.

" @@ -1355,7 +1355,7 @@ "members":{ "arn":{ "shape":"ImageBuilderArn", - "documentation":"

The Amazon Resource Name (ARN) of the component.

" + "documentation":"

The Amazon Resource Name (ARN) of the component.

Semantic versioning is included in each object's Amazon Resource Name (ARN), at the level that applies to that object as follows:

  1. Versionless ARNs and Name ARNs do not include specific values in any of the nodes. The nodes are either left off entirely, or they are specified as wildcards, for example: x.x.x.

  2. Version ARNs have only the first three nodes: <major>.<minor>.<patch>

  3. Build version ARNs have all four nodes, and point to a specific build for a specific version of an object.

" }, "name":{ "shape":"ResourceName", @@ -1363,7 +1363,7 @@ }, "version":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the component.

" + "documentation":"

The semantic version of the component.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number, and that is not open for updates.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: When you retrieve or reference a resource with a semantic version, you can use wildcards (x) to filter your results. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards. For example, specifying \"1.2.x\", or \"1.x.x\" works to filter list results, but neither \"1.x.2\", nor \"x.2.x\" will work. You do not have to specify the build - Image Builder automatically uses a wildcard for that, if applicable.

" }, "description":{ "shape":"NonEmptyString", @@ -1390,7 +1390,7 @@ "documentation":"

The date that the component was created.

" } }, - "documentation":"

A high-level overview of a component semantic version.

" + "documentation":"

The defining characteristics of a specific version of an Amazon Web Services TOE component.

" }, "ComponentVersionArn":{ "type":"string", @@ -1446,7 +1446,7 @@ "members":{ "arn":{ "shape":"ImageBuilderArn", - "documentation":"

The Amazon Resource Name (ARN) of the container recipe.

" + "documentation":"

The Amazon Resource Name (ARN) of the container recipe.

Semantic versioning is included in each object's Amazon Resource Name (ARN), at the level that applies to that object as follows:

  1. Versionless ARNs and Name ARNs do not include specific values in any of the nodes. The nodes are either left off entirely, or they are specified as wildcards, for example: x.x.x.

  2. Version ARNs have only the first three nodes: <major>.<minor>.<patch>

  3. Build version ARNs have all four nodes, and point to a specific build for a specific version of an object.

" }, "containerType":{ "shape":"ContainerType", @@ -1470,7 +1470,7 @@ }, "version":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the container recipe (<major>.<minor>.<patch>).

" + "documentation":"

The semantic version of the container recipe.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number, and that is not open for updates.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: When you retrieve or reference a resource with a semantic version, you can use wildcards (x) to filter your results. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards. For example, specifying \"1.2.x\", or \"1.x.x\" works to filter list results, but neither \"1.x.2\", nor \"x.2.x\" will work. You do not have to specify the build - Image Builder automatically uses a wildcard for that, if applicable.

" }, "components":{ "shape":"ComponentConfigurationList", @@ -1584,7 +1584,7 @@ }, "semanticVersion":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the component. This version follows the semantic version syntax. For example, major.minor.patch. This could be versioned like software (2.0.1) or like a date (2019.12.01).

" + "documentation":"

The semantic version of the component. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number, and that is not open for updates.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

" }, "description":{ "shape":"NonEmptyString", @@ -1668,7 +1668,7 @@ }, "semanticVersion":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the container recipe (<major>.<minor>.<patch>).

" + "documentation":"

The semantic version of the container recipe. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number, and that is not open for updates.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

" }, "components":{ "shape":"ComponentConfigurationList", @@ -1882,7 +1882,7 @@ }, "semanticVersion":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the image recipe.

" + "documentation":"

The semantic version of the image recipe. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number, and that is not open for updates.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

" }, "components":{ "shape":"ComponentConfigurationList", @@ -2761,7 +2761,7 @@ "members":{ "arn":{ "shape":"ImageBuilderArn", - "documentation":"

The Amazon Resource Name (ARN) of the image.

" + "documentation":"

The Amazon Resource Name (ARN) of the image.

Semantic versioning is included in each object's Amazon Resource Name (ARN), at the level that applies to that object as follows:

  1. Versionless ARNs and Name ARNs do not include specific values in any of the nodes. The nodes are either left off entirely, or they are specified as wildcards, for example: x.x.x.

  2. Version ARNs have only the first three nodes: <major>.<minor>.<patch>

  3. Build version ARNs have all four nodes, and point to a specific build for a specific version of an object.

" }, "type":{ "shape":"ImageType", @@ -2773,7 +2773,7 @@ }, "version":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the image.

" + "documentation":"

The semantic version of the image.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number, and that is not open for updates.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: When you retrieve or reference a resource with a semantic version, you can use wildcards (x) to filter your results. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards. For example, specifying \"1.2.x\", or \"1.x.x\" works to filter list results, but neither \"1.x.2\", nor \"x.2.x\" will work. You do not have to specify the build - Image Builder automatically uses a wildcard for that, if applicable.

" }, "platform":{ "shape":"Platform", @@ -2797,7 +2797,7 @@ }, "containerRecipe":{ "shape":"ContainerRecipe", - "documentation":"

The container recipe used to create the container image type.

" + "documentation":"

The recipe that is used to create an Image Builder container image.

" }, "sourcePipelineName":{ "shape":"ResourceName", @@ -2832,7 +2832,7 @@ "documentation":"

The tags of the image.

" } }, - "documentation":"

An image build version.

" + "documentation":"

An Image Builder image. You must specify exactly one recipe for the image – either a container recipe (containerRecipe), which creates a container image, or an image recipe (imageRecipe), which creates an AMI.

" }, "ImageBuildVersionArn":{ "type":"string", @@ -3161,38 +3161,38 @@ "members":{ "arn":{ "shape":"ImageBuilderArn", - "documentation":"

The Amazon Resource Name (ARN) of the image semantic version.

" + "documentation":"

The Amazon Resource Name (ARN) of a specific version of an Image Builder image.

Semantic versioning is included in each object's Amazon Resource Name (ARN), at the level that applies to that object as follows:

  1. Versionless ARNs and Name ARNs do not include specific values in any of the nodes. The nodes are either left off entirely, or they are specified as wildcards, for example: x.x.x.

  2. Version ARNs have only the first three nodes: <major>.<minor>.<patch>

  3. Build version ARNs have all four nodes, and point to a specific build for a specific version of an object.

" }, "name":{ "shape":"ResourceName", - "documentation":"

The name of the image semantic version.

" + "documentation":"

The name of this specific version of an Image Builder image.

" }, "type":{ "shape":"ImageType", - "documentation":"

Specifies whether this is an AMI or container image.

" + "documentation":"

Specifies whether this image is an AMI or a container image.

" }, "version":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the image semantic version.

" + "documentation":"

Details for a specific version of an Image Builder image. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number, and that is not open for updates.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: When you retrieve or reference a resource with a semantic version, you can use wildcards (x) to filter your results. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards. For example, specifying \"1.2.x\", or \"1.x.x\" works to filter list results, but neither \"1.x.2\", nor \"x.2.x\" will work. You do not have to specify the build - Image Builder automatically uses a wildcard for that, if applicable.

" }, "platform":{ "shape":"Platform", - "documentation":"

The platform of the image semantic version.

" + "documentation":"

The platform of the image version, for example \"Windows\" or \"Linux\".

" }, "osVersion":{ "shape":"OsVersion", - "documentation":"

The operating system version of the instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.

" + "documentation":"

The operating system version of the Amazon EC2 build instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.

" }, "owner":{ "shape":"NonEmptyString", - "documentation":"

The owner of the image semantic version.

" + "documentation":"

The owner of the image version.

" }, "dateCreated":{ "shape":"DateTime", - "documentation":"

The date at which this image semantic version was created.

" + "documentation":"

The date on which this specific version of the Image Builder image was created.

" } }, - "documentation":"

An image semantic version.

" + "documentation":"

The defining characteristics of a specific version of an Image Builder image.

" }, "ImageVersionArn":{ "type":"string", @@ -3223,7 +3223,7 @@ }, "semanticVersion":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the component. This version follows the semantic version syntax. For example, major.minor.patch. This could be versioned like software (2.0.1) or like a date (2019.12.01).

" + "documentation":"

The semantic version of the component. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: When you retrieve or reference a resource with a semantic version, you can use wildcards (x) to filter your results. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards. For example, specifying \"1.2.x\", or \"1.x.x\" works to filter list results, but neither \"1.x.2\", nor \"x.2.x\" will work. You do not have to specify the build - Image Builder automatically uses a wildcard for that, if applicable.

" }, "description":{ "shape":"NonEmptyString", @@ -3235,7 +3235,7 @@ }, "type":{ "shape":"ComponentType", - "documentation":"

The type of the component denotes whether the component is used to build the image or only to test it.

" + "documentation":"

The type of the component denotes whether the component is used to build the image, or only to test it.

" }, "format":{ "shape":"ComponentFormat", @@ -3523,7 +3523,7 @@ "members":{ "userIds":{ "shape":"AccountList", - "documentation":"

The account ID.

" + "documentation":"

The Amazon Web Services account ID.

" }, "userGroups":{ "shape":"StringList", @@ -3616,11 +3616,11 @@ }, "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

Use the following filters to streamline results:

  • description

  • name

  • platform

  • supportedOsVersion

  • type

  • version

" }, "byName":{ "shape":"Boolean", - "documentation":"

Returns the list of component build versions for the specified semantic version.

" + "documentation":"

Returns the list of component build versions for the specified name.

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -3642,7 +3642,7 @@ }, "componentVersionList":{ "shape":"ComponentVersionList", - "documentation":"

The list of component semantic versions.

" + "documentation":"

The list of component semantic versions.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

" }, "nextToken":{ "shape":"PaginationToken", @@ -3659,7 +3659,7 @@ }, "filters":{ "shape":"FilterList", - "documentation":"

Request filters that are used to narrow the list of container images that are returned.

" + "documentation":"

Use the following filters to streamline results:

  • containerType

  • name

  • parentImage

  • platform

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -3694,7 +3694,7 @@ "members":{ "filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • name - The name of this distribution configuration.

" + "documentation":"

You can filter on name to streamline results.

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -3734,7 +3734,7 @@ }, "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

Use the following filters to streamline results:

  • name

  • osVersion

  • platform

  • type

  • version

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -3810,7 +3810,7 @@ }, "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

Use the following filters to streamline results:

  • name

  • version

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -3845,7 +3845,7 @@ "members":{ "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

Use the following filters to streamline results:

  • description

  • distributionConfigurationArn

  • imageRecipeArn

  • infrastructureConfigurationArn

  • name

  • status

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -3884,7 +3884,7 @@ }, "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

Use the following filters to streamline results:

  • name

  • parentImage

  • platform

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -3923,7 +3923,7 @@ }, "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

Use the following filters to streamline results:

  • name

  • osVersion

  • platform

  • type

  • version

" }, "byName":{ "shape":"Boolean", @@ -3953,7 +3953,7 @@ }, "imageVersionList":{ "shape":"ImageVersionList", - "documentation":"

The list of image semantic versions.

" + "documentation":"

The list of image semantic versions.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: When you retrieve or reference a resource with a semantic version, you can use wildcards (x) to filter your results. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards. For example, specifying \"1.2.x\", or \"1.x.x\" works to filter list results, but neither \"1.x.2\", nor \"x.2.x\" will work. You do not have to specify the build - Image Builder automatically uses a wildcard for that, if applicable.

" }, "nextToken":{ "shape":"PaginationToken", @@ -3966,7 +3966,7 @@ "members":{ "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

You can filter on name to streamline results.

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -4383,7 +4383,7 @@ "members":{ "uninstallAfterBuild":{ "shape":"NullableBoolean", - "documentation":"

This property defaults to true. If Image Builder installs the SSM agent on a build instance, it removes the agent before creating a snapshot for the AMI. To ensure that the AMI you create includes the SSM agent, set this property to false.

" + "documentation":"

Controls whether the SSM agent is removed from your final build image, prior to creating the new AMI. If this is set to true, then the agent is removed from the final image. If it's set to false, then the agent is left in, so that it is included in the new AMI. The default value is false.

" } }, "documentation":"

Contains settings for the SSM agent on your build instance.

" diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index 445a966b7dfd..b3ce81da6ecd 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT inspector AWS Java SDK :: Services :: Amazon Inspector Service diff --git a/services/iot/pom.xml b/services/iot/pom.xml index f06e3340aae8..d4991c235733 100644 --- a/services/iot/pom.xml +++ b/services/iot/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iot AWS Java SDK :: Services :: AWS IoT diff --git a/services/iot/src/main/resources/codegen-resources/service-2.json b/services/iot/src/main/resources/codegen-resources/service-2.json index ec2b5c1d07a2..e5b5ce92a5a7 100644 --- a/services/iot/src/main/resources/codegen-resources/service-2.json +++ b/services/iot/src/main/resources/codegen-resources/service-2.json @@ -94,7 +94,7 @@ {"shape":"InternalFailureException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Attaches a policy to the specified target.

" + "documentation":"

Attaches the specified policy to the specified principal (certificate or other credential).

" }, "AttachPrincipalPolicy":{ "name":"AttachPrincipalPolicy", @@ -7264,7 +7264,7 @@ "CreationDate":{"type":"timestamp"}, "CredentialDurationSeconds":{ "type":"integer", - "max":3600, + "max":43200, "min":900 }, "CustomCodeSigning":{ diff --git a/services/iot1clickdevices/pom.xml b/services/iot1clickdevices/pom.xml index 02d543736075..8fae191250a6 100644 --- a/services/iot1clickdevices/pom.xml +++ b/services/iot1clickdevices/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iot1clickdevices AWS Java SDK :: Services :: IoT 1Click Devices Service diff --git a/services/iot1clickprojects/pom.xml b/services/iot1clickprojects/pom.xml index 28f329faa81f..f63907f7e888 100644 --- a/services/iot1clickprojects/pom.xml +++ b/services/iot1clickprojects/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iot1clickprojects AWS Java SDK :: Services :: IoT 1Click Projects diff --git a/services/iotanalytics/pom.xml b/services/iotanalytics/pom.xml index 1227c3a73000..a0736017a124 100644 --- a/services/iotanalytics/pom.xml +++ b/services/iotanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iotanalytics AWS Java SDK :: Services :: IoTAnalytics diff --git a/services/iotanalytics/src/main/resources/codegen-resources/service-2.json b/services/iotanalytics/src/main/resources/codegen-resources/service-2.json index be2c6a806201..0e0bf9607b1c 100644 --- a/services/iotanalytics/src/main/resources/codegen-resources/service-2.json +++ b/services/iotanalytics/src/main/resources/codegen-resources/service-2.json @@ -63,7 +63,7 @@ {"shape":"ThrottlingException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a channel. A channel collects data from an MQTT topic and archives the raw, unprocessed messages before publishing the data to a pipeline.

" + "documentation":"

Used to create a channel. A channel collects data from an MQTT topic and archives the raw, unprocessed messages before publishing the data to a pipeline.

" }, "CreateDataset":{ "name":"CreateDataset", @@ -82,7 +82,7 @@ {"shape":"ThrottlingException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a dataset. A dataset stores data retrieved from a data store by applying a queryAction (a SQL query) or a containerAction (executing a containerized application). This operation creates the skeleton of a dataset. The dataset can be populated manually by calling CreateDatasetContent or automatically according to a trigger you specify.

" + "documentation":"

Used to create a dataset. A dataset stores data retrieved from a data store by applying a queryAction (a SQL query) or a containerAction (executing a containerized application). This operation creates the skeleton of a dataset. The dataset can be populated manually by calling CreateDatasetContent or automatically according to a trigger you specify.

" }, "CreateDatasetContent":{ "name":"CreateDatasetContent", @@ -99,7 +99,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates the content of a data set by applying a queryAction (a SQL query) or a containerAction (executing a containerized application).

" + "documentation":"

Creates the content of a dataset by applying a queryAction (a SQL query) or a containerAction (executing a containerized application).

" }, "CreateDatastore":{ "name":"CreateDatastore", @@ -118,7 +118,7 @@ {"shape":"ThrottlingException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a data store, which is a repository for messages. Only data stores that are used to save pipeline data can be configured with ParquetConfiguration.

" + "documentation":"

Creates a data store, which is a repository for messages.

" }, "CreatePipeline":{ "name":"CreatePipeline", @@ -290,7 +290,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves the current settings of the AWS IoT Analytics logging options.

" + "documentation":"

Retrieves the current settings of the IoT Analytics logging options.

" }, "DescribePipeline":{ "name":"DescribePipeline", @@ -324,7 +324,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves the contents of a data set as presigned URIs.

" + "documentation":"

Retrieves the contents of a dataset as presigned URIs.

" }, "ListChannels":{ "name":"ListChannels", @@ -357,7 +357,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists information about data set contents that have been created.

" + "documentation":"

Lists information about dataset contents that have been created.

" }, "ListDatasets":{ "name":"ListDatasets", @@ -373,7 +373,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves information about data sets.

" + "documentation":"

Retrieves information about datasets.

" }, "ListDatastores":{ "name":"ListDatastores", @@ -438,7 +438,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Sets or updates the AWS IoT Analytics logging options.

If you update the value of any loggingOptions field, it takes up to one minute for the change to take effect. Also, if you change the policy attached to the role you specified in the roleArn field (for example, to correct an invalid policy), it takes up to five minutes for that change to take effect.

" + "documentation":"

Sets or updates the IoT Analytics logging options.

If you update the value of any loggingOptions field, it takes up to one minute for the change to take effect. Also, if you change the policy attached to the role you specified in the roleArn field (for example, to correct an invalid policy), it takes up to five minutes for that change to take effect.

" }, "RunPipelineActivity":{ "name":"RunPipelineActivity", @@ -543,7 +543,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates the settings of a channel.

" + "documentation":"

Used to update the settings of a channel.

" }, "UpdateDataset":{ "name":"UpdateDataset", @@ -559,7 +559,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates the settings of a data set.

" + "documentation":"

Updates the settings of a dataset.

" }, "UpdateDatastore":{ "name":"UpdateDatastore", @@ -575,7 +575,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates the settings of a data store.

" + "documentation":"

Used to update the settings of a data store.

" }, "UpdatePipeline":{ "name":"UpdatePipeline", @@ -681,7 +681,7 @@ }, "messages":{ "shape":"Messages", - "documentation":"

The list of messages to be sent. Each message has the format: { \"messageId\": \"string\", \"payload\": \"string\"}.

The field names of message payloads (data) that you send to AWS IoT Analytics:

  • Must contain only alphanumeric characters and undescores (_). No other special characters are allowed.

  • Must begin with an alphabetic character or single underscore (_).

  • Cannot contain hyphens (-).

  • In regular expression terms: \"^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)$\".

  • Cannot be more than 255 characters.

  • Are case insensitive. (Fields named foo and FOO in the same payload are considered duplicates.)

For example, {\"temp_01\": 29} or {\"_temp_01\": 29} are valid, but {\"temp-01\": 29}, {\"01_temp\": 29} or {\"__temp_01\": 29} are invalid in message payloads.

" + "documentation":"

The list of messages to be sent. Each message has the format: { \"messageId\": \"string\", \"payload\": \"string\"}.

The field names of message payloads (data) that you send to IoT Analytics:

  • Must contain only alphanumeric characters and undescores (_). No other special characters are allowed.

  • Must begin with an alphabetic character or single underscore (_).

  • Cannot contain hyphens (-).

  • In regular expression terms: \"^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)$\".

  • Cannot be more than 255 characters.

  • Are case insensitive. (Fields named foo and FOO in the same payload are considered duplicates.)

For example, {\"temp_01\": 29} or {\"_temp_01\": 29} are valid, but {\"temp-01\": 29}, {\"01_temp\": 29} or {\"__temp_01\": 29} are invalid in message payloads.

" } } }, @@ -741,7 +741,7 @@ }, "storage":{ "shape":"ChannelStorage", - "documentation":"

Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the channel is created.

" + "documentation":"

Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You can't change this storage option after the channel is created.

" }, "arn":{ "shape":"ChannelArn", @@ -765,7 +765,7 @@ }, "lastMessageArrivalTime":{ "shape":"Timestamp", - "documentation":"

The last time when a new message arrived in the channel.

AWS IoT Analytics updates this value at most once per minute for one channel. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

" + "documentation":"

The last time when a new message arrived in the channel.

IoT Analytics updates this value at most once per minute for one channel. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

" } }, "documentation":"

A collection of data from an MQTT topic. Channels archive the raw, unprocessed messages before publishing the data to a pipeline.

" @@ -807,7 +807,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z0-9_]+$" + "pattern":"(^(?!_{2}))(^[a-zA-Z0-9_]+$)" }, "ChannelStatistics":{ "type":"structure", @@ -832,21 +832,21 @@ "members":{ "serviceManagedS3":{ "shape":"ServiceManagedChannelS3Storage", - "documentation":"

Use this to store channel data in an S3 bucket managed by AWS IoT Analytics. You cannot change the choice of service-managed or customer-managed S3 storage after the channel is created.

" + "documentation":"

Used to store channel data in an S3 bucket managed by IoT Analytics. You can't change the choice of S3 storage after the data store is created.

" }, "customerManagedS3":{ "shape":"CustomerManagedChannelS3Storage", - "documentation":"

Use this to store channel data in an S3 bucket that you manage. If customer managed storage is selected, the retentionPeriod parameter is ignored. You cannot change the choice of service-managed or customer-managed S3 storage after the channel is created.

" + "documentation":"

Used to store channel data in an S3 bucket that you manage. If customer managed storage is selected, the retentionPeriod parameter is ignored. You can't change the choice of S3 storage after the data store is created.

" } }, - "documentation":"

Where channel data is stored. You may choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. This cannot be changed after creation of the channel.

" + "documentation":"

Where channel data is stored. You may choose one of serviceManagedS3, customerManagedS3 storage. If not specified, the default is serviceManagedS3. This can't be changed after creation of the channel.

" }, "ChannelStorageSummary":{ "type":"structure", "members":{ "serviceManagedS3":{ "shape":"ServiceManagedChannelS3StorageSummary", - "documentation":"

Used to store channel data in an S3 bucket managed by AWS IoT Analytics.

" + "documentation":"

Used to store channel data in an S3 bucket managed by IoT Analytics.

" }, "customerManagedS3":{ "shape":"CustomerManagedChannelS3StorageSummary", @@ -884,7 +884,7 @@ }, "lastMessageArrivalTime":{ "shape":"Timestamp", - "documentation":"

The last time when a new message arrived in the channel.

AWS IoT Analytics updates this value at most once per minute for one channel. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

" + "documentation":"

The last time when a new message arrived in the channel.

IoT Analytics updates this value at most once per minute for one channel. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

" } }, "documentation":"

A summary of information about a channel.

" @@ -902,7 +902,7 @@ }, "type":{ "shape":"ColumnDataType", - "documentation":"

The type of data. For more information about the supported data types, see Common data types in the AWS Glue Developer Guide.

" + "documentation":"

The type of data. For more information about the supported data types, see Common data types in the Glue Developer Guide.

" } }, "documentation":"

Contains information about a column that stores your data.

" @@ -967,7 +967,7 @@ }, "channelStorage":{ "shape":"ChannelStorage", - "documentation":"

Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the channel is created.

" + "documentation":"

Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You can't change this storage option after the channel is created.

" }, "retentionPeriod":{ "shape":"RetentionPeriod", @@ -1030,15 +1030,15 @@ "members":{ "datasetName":{ "shape":"DatasetName", - "documentation":"

The name of the data set.

" + "documentation":"

The name of the dataset.

" }, "actions":{ "shape":"DatasetActions", - "documentation":"

A list of actions that create the data set contents.

" + "documentation":"

A list of actions that create the dataset contents.

" }, "triggers":{ "shape":"DatasetTriggers", - "documentation":"

A list of triggers. A trigger causes data set contents to be populated at a specified time interval or when another data set's contents are created. The list of triggers can be empty or contain up to five DataSetTrigger objects.

" + "documentation":"

A list of triggers. A trigger causes dataset contents to be populated at a specified time interval or when another dataset's contents are created. The list of triggers can be empty or contain up to five DataSetTrigger objects.

" }, "contentDeliveryRules":{ "shape":"DatasetContentDeliveryRules", @@ -1046,19 +1046,19 @@ }, "retentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

Optional. How long, in days, versions of dataset contents are kept for the dataset. If not specified or set to null, versions of dataset contents are retained for at most 90 days. The number of versions of dataset contents retained is determined by the versioningConfiguration parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.

" + "documentation":"

Optional. How long, in days, versions of dataset contents are kept for the dataset. If not specified or set to null, versions of dataset contents are retained for at most 90 days. The number of versions of dataset contents retained is determined by the versioningConfiguration parameter. For more information, see Keeping Multiple Versions of IoT Analytics datasets in the IoT Analytics User Guide.

" }, "versioningConfiguration":{ "shape":"VersioningConfiguration", - "documentation":"

Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.

" + "documentation":"

Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of IoT Analytics datasets in the IoT Analytics User Guide.

" }, "tags":{ "shape":"TagList", - "documentation":"

Metadata which can be used to manage the data set.

" + "documentation":"

Metadata which can be used to manage the dataset.

" }, "lateDataRules":{ "shape":"LateDataRules", - "documentation":"

A list of data rules that send notifications to Amazon CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.

" + "documentation":"

A list of data rules that send notifications to CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.

" } } }, @@ -1089,7 +1089,7 @@ }, "datastoreStorage":{ "shape":"DatastoreStorage", - "documentation":"

Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the data store is created.

" + "documentation":"

Where data in a data store is stored.. You can choose serviceManagedS3 storage, customerManagedS3 storage, or iotSiteWiseMultiLayerStorage storage. The default is serviceManagedS3. You can't change the choice of Amazon S3 storage after your data store is created.

" }, "retentionPeriod":{ "shape":"RetentionPeriod", @@ -1101,11 +1101,11 @@ }, "fileFormatConfiguration":{ "shape":"FileFormatConfiguration", - "documentation":"

Contains the configuration information of file formats. AWS IoT Analytics data stores support JSON and Parquet.

The default file format is JSON. You can specify only one format.

You can't change the file format after you create the data store.

" + "documentation":"

Contains the configuration information of file formats. IoT Analytics data stores support JSON and Parquet.

The default file format is JSON. You can specify only one format.

You can't change the file format after you create the data store.

" }, "datastorePartitions":{ "shape":"DatastorePartitions", - "documentation":"

Contains information about the partitions in a data store.

" + "documentation":"

Contains information about the partition dimensions in a data store.

" } } }, @@ -1139,7 +1139,7 @@ }, "pipelineActivities":{ "shape":"PipelineActivities", - "documentation":"

A list of PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.

The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity. For example:

pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]

" + "documentation":"

A list of PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda unctions on messages for advanced processing; or performing mathematical transformations to normalize device data.

The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity. For example:

pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]

" }, "tags":{ "shape":"TagList", @@ -1173,14 +1173,14 @@ }, "keyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

Optional. The prefix used to create the keys of the channel data objects. Each object in an S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

" + "documentation":"

(Optional) The prefix used to create the keys of the channel data objects. Each object in an S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

" + "documentation":"

The ARN of the role that grants IoT Analytics permission to interact with your Amazon S3 resources.

" } }, - "documentation":"

Use this to store channel data in an S3 bucket that you manage. If customer managed storage is selected, the retentionPeriod parameter is ignored. You cannot change the choice of service-managed or customer-managed S3 storage after the channel is created.

" + "documentation":"

Used to store channel data in an S3 bucket that you manage. If customer-managed storage is selected, the retentionPeriod parameter is ignored. You can't change the choice of S3 storage after the data store is created.

" }, "CustomerManagedChannelS3StorageSummary":{ "type":"structure", @@ -1191,11 +1191,11 @@ }, "keyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

Optional. The prefix used to create the keys of the channel data objects. Each object in an S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a forward slash (/).

" + "documentation":"

(Optional) The prefix used to create the keys of the channel data objects. Each object in an S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a forward slash (/).

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

" + "documentation":"

The ARN of the role that grants IoT Analytics permission to interact with your Amazon S3 resources.

" } }, "documentation":"

Used to store channel data in an S3 bucket that you manage.

" @@ -1209,55 +1209,55 @@ "members":{ "bucket":{ "shape":"BucketName", - "documentation":"

The name of the S3 bucket in which data store data is stored.

" + "documentation":"

The name of the Amazon S3 bucket where your data is stored.

" }, "keyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

Optional. The prefix used to create the keys of the data store data objects. Each object in an S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

" + "documentation":"

(Optional) The prefix used to create the keys of the data store data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

" + "documentation":"

The ARN of the role that grants IoT Analytics permission to interact with your Amazon S3 resources.

" } }, - "documentation":"

Use this to store data store data in an S3 bucket that you manage. When customer-managed storage is selected, the retentionPeriod parameter is ignored. You cannot change the choice of service-managed or customer-managed S3 storage after the data store is created.

" + "documentation":"

S3-customer-managed; When you choose customer-managed storage, the retentionPeriod parameter is ignored. You can't change the choice of Amazon S3 storage after your data store is created.

" }, "CustomerManagedDatastoreS3StorageSummary":{ "type":"structure", "members":{ "bucket":{ "shape":"BucketName", - "documentation":"

The name of the S3 bucket in which data store data is stored.

" + "documentation":"

The name of the Amazon S3 bucket where your data is stored.

" }, "keyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

Optional. The prefix used to create the keys of the data store data objects. Each object in an S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

" + "documentation":"

(Optional) The prefix used to create the keys of the data store data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

" + "documentation":"

The ARN of the role that grants IoT Analytics permission to interact with your Amazon S3 resources.

" } }, - "documentation":"

Used to store data store data in an S3 bucket that you manage.

" + "documentation":"

Contains information about the data store that you manage.

" }, "Dataset":{ "type":"structure", "members":{ "name":{ "shape":"DatasetName", - "documentation":"

The name of the data set.

" + "documentation":"

The name of the dataset.

" }, "arn":{ "shape":"DatasetArn", - "documentation":"

The ARN of the data set.

" + "documentation":"

The ARN of the dataset.

" }, "actions":{ "shape":"DatasetActions", - "documentation":"

The DatasetAction objects that automatically create the data set contents.

" + "documentation":"

The DatasetAction objects that automatically create the dataset contents.

" }, "triggers":{ "shape":"DatasetTriggers", - "documentation":"

The DatasetTrigger objects that specify when the data set is automatically updated.

" + "documentation":"

The DatasetTrigger objects that specify when the dataset is automatically updated.

" }, "contentDeliveryRules":{ "shape":"DatasetContentDeliveryRules", @@ -1265,48 +1265,48 @@ }, "status":{ "shape":"DatasetStatus", - "documentation":"

The status of the data set.

" + "documentation":"

The status of the dataset.

" }, "creationTime":{ "shape":"Timestamp", - "documentation":"

When the data set was created.

" + "documentation":"

When the dataset was created.

" }, "lastUpdateTime":{ "shape":"Timestamp", - "documentation":"

The last time the data set was updated.

" + "documentation":"

The last time the dataset was updated.

" }, "retentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

Optional. How long, in days, message data is kept for the data set.

" + "documentation":"

Optional. How long, in days, message data is kept for the dataset.

" }, "versioningConfiguration":{ "shape":"VersioningConfiguration", - "documentation":"

Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.

" + "documentation":"

Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of IoT Analytics datasets in the IoT Analytics User Guide.

" }, "lateDataRules":{ "shape":"LateDataRules", - "documentation":"

A list of data rules that send notifications to Amazon CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.

" + "documentation":"

A list of data rules that send notifications to CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.

" } }, - "documentation":"

Information about a data set.

" + "documentation":"

Information about a dataset.

" }, "DatasetAction":{ "type":"structure", "members":{ "actionName":{ "shape":"DatasetActionName", - "documentation":"

The name of the data set action by which data set contents are automatically created.

" + "documentation":"

The name of the dataset action by which dataset contents are automatically created.

" }, "queryAction":{ "shape":"SqlQueryDatasetAction", - "documentation":"

An SqlQueryDatasetAction object that uses an SQL query to automatically create data set contents.

" + "documentation":"

An SqlQueryDatasetAction object that uses an SQL query to automatically create dataset contents.

" }, "containerAction":{ "shape":"ContainerDatasetAction", "documentation":"

Information that allows the system to run a containerized application to create the dataset contents. The application must be in a Docker container along with any required support libraries.

" } }, - "documentation":"

A DatasetAction object that specifies how data set contents are automatically created.

" + "documentation":"

A DatasetAction object that specifies how dataset contents are automatically created.

" }, "DatasetActionName":{ "type":"string", @@ -1353,7 +1353,7 @@ "members":{ "iotEventsDestinationConfiguration":{ "shape":"IotEventsDestinationConfiguration", - "documentation":"

Configuration information for delivery of dataset contents to AWS IoT Events.

" + "documentation":"

Configuration information for delivery of dataset contents to IoT Events.

" }, "s3DestinationConfiguration":{ "shape":"S3DestinationConfiguration", @@ -1396,14 +1396,14 @@ "members":{ "state":{ "shape":"DatasetContentState", - "documentation":"

The state of the data set contents. Can be one of READY, CREATING, SUCCEEDED, or FAILED.

" + "documentation":"

The state of the dataset contents. Can be one of READY, CREATING, SUCCEEDED, or FAILED.

" }, "reason":{ "shape":"Reason", - "documentation":"

The reason the data set contents are in this state.

" + "documentation":"

The reason the dataset contents are in this state.

" } }, - "documentation":"

The state of the data set contents and the reason they are in this state.

" + "documentation":"

The state of the dataset contents and the reason they are in this state.

" }, "DatasetContentSummaries":{ "type":"list", @@ -1418,7 +1418,7 @@ }, "status":{ "shape":"DatasetContentStatus", - "documentation":"

The status of the data set contents.

" + "documentation":"

The status of the dataset contents.

" }, "creationTime":{ "shape":"Timestamp", @@ -1460,20 +1460,20 @@ "members":{ "entryName":{ "shape":"EntryName", - "documentation":"

The name of the data set item.

" + "documentation":"

The name of the dataset item.

" }, "dataURI":{ "shape":"PresignedURI", - "documentation":"

The presigned URI of the data set item.

" + "documentation":"

The presigned URI of the dataset item.

" } }, - "documentation":"

The reference to a data set entry.

" + "documentation":"

The reference to a dataset entry.

" }, "DatasetName":{ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z0-9_]+$" + "pattern":"(^(?!_{2}))(^[a-zA-Z0-9_]+$)" }, "DatasetStatus":{ "type":"string", @@ -1492,30 +1492,30 @@ "members":{ "datasetName":{ "shape":"DatasetName", - "documentation":"

The name of the data set.

" + "documentation":"

The name of the dataset.

" }, "status":{ "shape":"DatasetStatus", - "documentation":"

The status of the data set.

" + "documentation":"

The status of the dataset.

" }, "creationTime":{ "shape":"Timestamp", - "documentation":"

The time the data set was created.

" + "documentation":"

The time the dataset was created.

" }, "lastUpdateTime":{ "shape":"Timestamp", - "documentation":"

The last time the data set was updated.

" + "documentation":"

The last time the dataset was updated.

" }, "triggers":{ "shape":"DatasetTriggers", - "documentation":"

A list of triggers. A trigger causes data set content to be populated at a specified time interval or when another data set is populated. The list of triggers can be empty or contain up to five DataSetTrigger objects

" + "documentation":"

A list of triggers. A trigger causes dataset content to be populated at a specified time interval or when another dataset is populated. The list of triggers can be empty or contain up to five DataSetTrigger objects

" }, "actions":{ "shape":"DatasetActionSummaries", "documentation":"

A list of DataActionSummary objects.

" } }, - "documentation":"

A summary of information about a data set.

" + "documentation":"

A summary of information about a dataset.

" }, "DatasetTrigger":{ "type":"structure", @@ -1526,10 +1526,10 @@ }, "dataset":{ "shape":"TriggeringDataset", - "documentation":"

The data set whose content creation triggers the creation of this data set's contents.

" + "documentation":"

The dataset whose content creation triggers the creation of this dataset's contents.

" } }, - "documentation":"

The DatasetTrigger that specifies when the data set is automatically updated.

" + "documentation":"

The DatasetTrigger that specifies when the dataset is automatically updated.

" }, "DatasetTriggers":{ "type":"list", @@ -1546,7 +1546,7 @@ }, "storage":{ "shape":"DatastoreStorage", - "documentation":"

Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the data store is created.

" + "documentation":"

Where data in a data store is stored.. You can choose serviceManagedS3 storage, customerManagedS3 storage, or iotSiteWiseMultiLayerStorage storage. The default is serviceManagedS3. You can't change the choice of Amazon S3 storage after your data store is created.

" }, "arn":{ "shape":"DatastoreArn", @@ -1570,15 +1570,15 @@ }, "lastMessageArrivalTime":{ "shape":"Timestamp", - "documentation":"

The last time when a new message arrived in the data store.

AWS IoT Analytics updates this value at most once per minute for one data store. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

" + "documentation":"

The last time when a new message arrived in the data store.

IoT Analytics updates this value at most once per minute for Amazon Simple Storage Service one data store. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

" }, "fileFormatConfiguration":{ "shape":"FileFormatConfiguration", - "documentation":"

Contains the configuration information of file formats. AWS IoT Analytics data stores support JSON and Parquet.

The default file format is JSON. You can specify only one format.

You can't change the file format after you create the data store.

" + "documentation":"

Contains the configuration information of file formats. IoT Analytics data stores support JSON and Parquet.

The default file format is JSON. You can specify only one format.

You can't change the file format after you create the data store.

" }, "datastorePartitions":{ "shape":"DatastorePartitions", - "documentation":"

Contains information about the partitions in a data store.

" + "documentation":"

Contains information about the partition dimensions in a data store.

" } }, "documentation":"

Information about a data store.

" @@ -1602,35 +1602,56 @@ "documentation":"

The datastore activity that specifies where to store the processed data.

" }, "DatastoreArn":{"type":"string"}, + "DatastoreIotSiteWiseMultiLayerStorage":{ + "type":"structure", + "required":["customerManagedS3Storage"], + "members":{ + "customerManagedS3Storage":{ + "shape":"IotSiteWiseCustomerManagedDatastoreS3Storage", + "documentation":"

Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.

" + } + }, + "documentation":"

Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage. You can't change the choice of Amazon S3 storage after your data store is created.

" + }, + "DatastoreIotSiteWiseMultiLayerStorageSummary":{ + "type":"structure", + "members":{ + "customerManagedS3Storage":{ + "shape":"IotSiteWiseCustomerManagedDatastoreS3StorageSummary", + "documentation":"

Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.

" + } + }, + "documentation":"

Contains information about the data store that you manage, which stores data used by IoT SiteWise.

" + }, "DatastoreName":{ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z0-9_]+$" + "pattern":"(^(?!_{2}))(^[a-zA-Z0-9_]+$)" }, "DatastorePartition":{ "type":"structure", "members":{ "attributePartition":{ "shape":"Partition", - "documentation":"

A partition defined by an attributeName.

" + "documentation":"

A partition dimension defined by an attributeName.

" }, "timestampPartition":{ "shape":"TimestampPartition", - "documentation":"

A partition defined by an attributeName and a timestamp format.

" + "documentation":"

A partition dimension defined by a timestamp attribute.

" } }, - "documentation":"

A single partition in a data store.

" + "documentation":"

A single dimension to partition a data store. The dimension must be an AttributePartition or a TimestampPartition.

" }, "DatastorePartitions":{ "type":"structure", "members":{ "partitions":{ "shape":"Partitions", - "documentation":"

A list of partitions in a data store.

" + "documentation":"

A list of partition dimensions in a data store.

" } }, - "documentation":"

Contains information about partitions in a data store.

" + "documentation":"

Contains information about the partition dimensions in a data store.

" }, "DatastoreStatistics":{ "type":"structure", @@ -1655,28 +1676,36 @@ "members":{ "serviceManagedS3":{ "shape":"ServiceManagedDatastoreS3Storage", - "documentation":"

Use this to store data store data in an S3 bucket managed by AWS IoT Analytics. You cannot change the choice of service-managed or customer-managed S3 storage after the data store is created.

" + "documentation":"

Used to store data in an Amazon S3 bucket managed by IoT Analytics. You can't change the choice of Amazon S3 storage after your data store is created.

" }, "customerManagedS3":{ "shape":"CustomerManagedDatastoreS3Storage", - "documentation":"

Use this to store data store data in an S3 bucket that you manage. When customer managed storage is selected, the retentionPeriod parameter is ignored. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the data store.

" + "documentation":"

S3-customer-managed; When you choose customer-managed storage, the retentionPeriod parameter is ignored. You can't change the choice of Amazon S3 storage after your data store is created.

" + }, + "iotSiteWiseMultiLayerStorage":{ + "shape":"DatastoreIotSiteWiseMultiLayerStorage", + "documentation":"

Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage. You can't change the choice of Amazon S3 storage after your data store is created.

" } }, - "documentation":"

Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the data store is created.

" + "documentation":"

Where data in a data store is stored.. You can choose serviceManagedS3 storage, customerManagedS3 storage, or iotSiteWiseMultiLayerStorage storage. The default is serviceManagedS3. You can't change the choice of Amazon S3 storage after your data store is created.

" }, "DatastoreStorageSummary":{ "type":"structure", "members":{ "serviceManagedS3":{ "shape":"ServiceManagedDatastoreS3StorageSummary", - "documentation":"

Used to store data store data in an S3 bucket managed by AWS IoT Analytics.

" + "documentation":"

Used to store data in an Amazon S3 bucket managed by IoT Analytics.

" }, "customerManagedS3":{ "shape":"CustomerManagedDatastoreS3StorageSummary", - "documentation":"

Used to store data store data in an S3 bucket that you manage.

" + "documentation":"

Used to store data in an Amazon S3 bucket managed by IoT Analytics.

" + }, + "iotSiteWiseMultiLayerStorage":{ + "shape":"DatastoreIotSiteWiseMultiLayerStorageSummary", + "documentation":"

Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.

" } }, - "documentation":"

Where data store data is stored.

" + "documentation":"

Contains information about your data store.

" }, "DatastoreSummaries":{ "type":"list", @@ -1691,7 +1720,7 @@ }, "datastoreStorage":{ "shape":"DatastoreStorageSummary", - "documentation":"

Where data store data is stored.

" + "documentation":"

Where data in a data store is stored.

" }, "status":{ "shape":"DatastoreStatus", @@ -1707,7 +1736,7 @@ }, "lastMessageArrivalTime":{ "shape":"Timestamp", - "documentation":"

The last time when a new message arrived in the data store.

AWS IoT Analytics updates this value at most once per minute for one data store. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

" + "documentation":"

The last time when a new message arrived in the data store.

IoT Analytics updates this value at most once per minute for Amazon Simple Storage Service one data store. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

" }, "fileFormatType":{ "shape":"FileFormatType", @@ -1715,7 +1744,7 @@ }, "datastorePartitions":{ "shape":"DatastorePartitions", - "documentation":"

Contains information about the partitions in a data store.

" + "documentation":"

Contains information about the partition dimensions in a data store.

" } }, "documentation":"

A summary of information about a data store.

" @@ -1756,7 +1785,7 @@ "members":{ "datasetName":{ "shape":"DatasetName", - "documentation":"

The name of the data set to delete.

", + "documentation":"

The name of the dataset to delete.

", "location":"uri", "locationName":"datasetName" } @@ -1810,10 +1839,10 @@ "members":{ "timeoutInMinutes":{ "shape":"SessionTimeoutInMinutes", - "documentation":"

A time interval. You can use timeoutInMinutes so that AWS IoT Analytics can batch up late data notifications that have been generated since the last execution. AWS IoT Analytics sends one batch of notifications to Amazon CloudWatch Events at one time.

For more information about how to write a timestamp expression, see Date and Time Functions and Operators, in the Presto 0.172 Documentation.

" + "documentation":"

A time interval. You can use timeoutInMinutes so that IoT Analytics can batch up late data notifications that have been generated since the last execution. IoT Analytics sends one batch of notifications to Amazon CloudWatch Events at one time.

For more information about how to write a timestamp expression, see Date and Time Functions and Operators, in the Presto 0.172 Documentation.

" } }, - "documentation":"

A structure that contains the configuration information of a delta time session window.

DeltaTime specifies a time interval. You can use DeltaTime to create dataset contents with data that has arrived in the data store since the last execution. For an example of DeltaTime, see Creating a SQL dataset with a delta window (CLI) in the AWS IoT Analytics User Guide.

" + "documentation":"

A structure that contains the configuration information of a delta time session window.

DeltaTime specifies a time interval. You can use DeltaTime to create dataset contents with data that has arrived in the data store since the last execution. For an example of DeltaTime, see Creating a SQL dataset with a delta window (CLI) in the IoT Analytics User Guide.

" }, "DescribeChannelRequest":{ "type":"structure", @@ -1827,7 +1856,7 @@ }, "includeStatistics":{ "shape":"IncludeStatisticsFlag", - "documentation":"

If true, additional statistical information about the channel is included in the response. This feature cannot be used with a channel whose S3 storage is customer-managed.

", + "documentation":"

If true, additional statistical information about the channel is included in the response. This feature can't be used with a channel whose S3 storage is customer-managed.

", "location":"querystring", "locationName":"includeStatistics" } @@ -1852,7 +1881,7 @@ "members":{ "datasetName":{ "shape":"DatasetName", - "documentation":"

The name of the data set whose information is retrieved.

", + "documentation":"

The name of the dataset whose information is retrieved.

", "location":"uri", "locationName":"datasetName" } @@ -1863,7 +1892,7 @@ "members":{ "dataset":{ "shape":"Dataset", - "documentation":"

An object that contains information about the data set.

" + "documentation":"

An object that contains information about the dataset.

" } } }, @@ -1879,7 +1908,7 @@ }, "includeStatistics":{ "shape":"IncludeStatisticsFlag", - "documentation":"

If true, additional statistical information about the data store is included in the response. This feature cannot be used with a data store whose S3 storage is customer-managed.

", + "documentation":"

If true, additional statistical information about the data store is included in the response. This feature can't be used with a data store whose S3 storage is customer-managed.

", "location":"querystring", "locationName":"includeStatistics" } @@ -1908,7 +1937,7 @@ "members":{ "loggingOptions":{ "shape":"LoggingOptions", - "documentation":"

The current settings of the AWS IoT Analytics logging options.

" + "documentation":"

The current settings of the IoT Analytics logging options.

" } } }, @@ -1963,7 +1992,7 @@ "documentation":"

The next activity in the pipeline.

" } }, - "documentation":"

An activity that adds data from the AWS IoT device registry to your message.

" + "documentation":"

An activity that adds data from the IoT device registry to your message.

" }, "DeviceShadowEnrichActivity":{ "type":"structure", @@ -1995,7 +2024,7 @@ "documentation":"

The next activity in the pipeline.

" } }, - "documentation":"

An activity that adds information from the AWS IoT Device Shadow service to a message.

" + "documentation":"

An activity that adds information from the IoT Device Shadow service to a message.

" }, "DoubleValue":{"type":"double"}, "EndTime":{"type":"timestamp"}, @@ -2028,7 +2057,7 @@ "documentation":"

Contains the configuration information of the Parquet format.

" } }, - "documentation":"

Contains the configuration information of file formats. AWS IoT Analytics data stores support JSON and Parquet.

The default file format is JSON. You can specify only one format.

You can't change the file format after you create the data store.

" + "documentation":"

Contains the configuration information of file formats. IoT Analytics data stores support JSON and Parquet.

The default file format is JSON. You can specify only one format.

You can't change the file format after you create the data store.

" }, "FileFormatType":{ "type":"string", @@ -2070,13 +2099,13 @@ "members":{ "datasetName":{ "shape":"DatasetName", - "documentation":"

The name of the data set whose contents are retrieved.

", + "documentation":"

The name of the dataset whose contents are retrieved.

", "location":"uri", "locationName":"datasetName" }, "versionId":{ "shape":"DatasetContentVersion", - "documentation":"

The version of the data set whose contents are retrieved. You can also use the strings \"$LATEST\" or \"$LATEST_SUCCEEDED\" to retrieve the contents of the latest or latest successfully completed data set. If not specified, \"$LATEST_SUCCEEDED\" is the default.

", + "documentation":"

The version of the dataset whose contents are retrieved. You can also use the strings \"$LATEST\" or \"$LATEST_SUCCEEDED\" to retrieve the contents of the latest or latest successfully completed dataset. If not specified, \"$LATEST_SUCCEEDED\" is the default.

", "location":"querystring", "locationName":"versionId" } @@ -2095,7 +2124,7 @@ }, "status":{ "shape":"DatasetContentStatus", - "documentation":"

The status of the data set content.

" + "documentation":"

The status of the dataset content.

" } } }, @@ -2108,14 +2137,14 @@ "members":{ "tableName":{ "shape":"GlueTableName", - "documentation":"

The name of the table in your AWS Glue Data Catalog that is used to perform the ETL operations. An AWS Glue Data Catalog table contains partitioned data and descriptions of data sources and targets.

" + "documentation":"

The name of the table in your Glue Data Catalog that is used to perform the ETL operations. An Glue Data Catalog table contains partitioned data and descriptions of data sources and targets.

" }, "databaseName":{ "shape":"GlueDatabaseName", - "documentation":"

The name of the database in your AWS Glue Data Catalog in which the table is located. An AWS Glue Data Catalog database contains metadata tables.

" + "documentation":"

The name of the database in your Glue Data Catalog in which the table is located. An Glue Data Catalog database contains metadata tables.

" } }, - "documentation":"

Configuration information for coordination with AWS Glue, a fully managed extract, transform and load (ETL) service.

" + "documentation":"

Configuration information for coordination with Glue, a fully managed extract, transform and load (ETL) service.

" }, "GlueDatabaseName":{ "type":"string", @@ -2162,14 +2191,14 @@ "members":{ "inputName":{ "shape":"IotEventsInputName", - "documentation":"

The name of the AWS IoT Events input to which dataset contents are delivered.

" + "documentation":"

The name of the IoT Events input to which dataset contents are delivered.

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role that grants AWS IoT Analytics permission to deliver dataset contents to an AWS IoT Events input.

" + "documentation":"

The ARN of the role that grants IoT Analytics permission to deliver dataset contents to an IoT Events input.

" } }, - "documentation":"

Configuration information for delivery of dataset contents to AWS IoT Events.

" + "documentation":"

Configuration information for delivery of dataset contents to IoT Events.

" }, "IotEventsInputName":{ "type":"string", @@ -2177,6 +2206,35 @@ "min":1, "pattern":"^[a-zA-Z][a-zA-Z0-9_]*$" }, + "IotSiteWiseCustomerManagedDatastoreS3Storage":{ + "type":"structure", + "required":["bucket"], + "members":{ + "bucket":{ + "shape":"BucketName", + "documentation":"

The name of the Amazon S3 bucket where your data is stored.

" + }, + "keyPrefix":{ + "shape":"S3KeyPrefix", + "documentation":"

(Optional) The prefix used to create the keys of the data store data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

" + } + }, + "documentation":"

Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage. You can't change the choice of Amazon S3 storage after your data store is created.

" + }, + "IotSiteWiseCustomerManagedDatastoreS3StorageSummary":{ + "type":"structure", + "members":{ + "bucket":{ + "shape":"BucketName", + "documentation":"

The name of the Amazon S3 bucket where your data is stored.

" + }, + "keyPrefix":{ + "shape":"S3KeyPrefix", + "documentation":"

(Optional) The prefix used to create the keys of the data store data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

" + } + }, + "documentation":"

Contains information about the data store that you manage, which stores data used by IoT SiteWise.

" + }, "JsonConfiguration":{ "type":"structure", "members":{ @@ -2298,7 +2356,7 @@ "members":{ "datasetName":{ "shape":"DatasetName", - "documentation":"

The name of the data set whose contents information you want to list.

", + "documentation":"

The name of the dataset whose contents information you want to list.

", "location":"uri", "locationName":"datasetName" }, @@ -2316,13 +2374,13 @@ }, "scheduledOnOrAfter":{ "shape":"Timestamp", - "documentation":"

A filter to limit results to those data set contents whose creation is scheduled on or after the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", + "documentation":"

A filter to limit results to those dataset contents whose creation is scheduled on or after the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", "location":"querystring", "locationName":"scheduledOnOrAfter" }, "scheduledBefore":{ "shape":"Timestamp", - "documentation":"

A filter to limit results to those data set contents whose creation is scheduled before the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", + "documentation":"

A filter to limit results to those dataset contents whose creation is scheduled before the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", "location":"querystring", "locationName":"scheduledBefore" } @@ -2333,7 +2391,7 @@ "members":{ "datasetContentSummaries":{ "shape":"DatasetContentSummaries", - "documentation":"

Summary information about data set contents that have been created.

" + "documentation":"

Summary information about dataset contents that have been created.

" }, "nextToken":{ "shape":"NextToken", @@ -2468,7 +2526,7 @@ "members":{ "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role that grants permission to AWS IoT Analytics to perform logging.

" + "documentation":"

The ARN of the role that grants permission to IoT Analytics to perform logging.

" }, "level":{ "shape":"LoggingLevel", @@ -2476,7 +2534,7 @@ }, "enabled":{ "shape":"LoggingEnabled", - "documentation":"

If true, logging is enabled for AWS IoT Analytics.

" + "documentation":"

If true, logging is enabled for IoT Analytics.

" } }, "documentation":"

Information about logging options.

" @@ -2596,10 +2654,10 @@ "members":{ "attributeName":{ "shape":"PartitionAttributeName", - "documentation":"

The attribute name of the partition.

" + "documentation":"

The name of the attribute that defines a partition dimension.

" } }, - "documentation":"

A single partition.

" + "documentation":"

A partition dimension defined by an attribute.

" }, "PartitionAttributeName":{ "type":"string", @@ -2674,7 +2732,7 @@ }, "selectAttributes":{ "shape":"SelectAttributesActivity", - "documentation":"

Creates a new message using only the specified attributes from the original message.

" + "documentation":"

Used to create a new message using only the specified attributes from the original message.

" }, "filter":{ "shape":"FilterActivity", @@ -2686,11 +2744,11 @@ }, "deviceRegistryEnrich":{ "shape":"DeviceRegistryEnrichActivity", - "documentation":"

Adds data from the AWS IoT device registry to your message.

" + "documentation":"

Adds data from the IoT device registry to your message.

" }, "deviceShadowEnrich":{ "shape":"DeviceShadowEnrichActivity", - "documentation":"

Adds information from the AWS IoT Device Shadow service to a message.

" + "documentation":"

Adds information from the IoT Device Shadow service to a message.

" } }, "documentation":"

An activity that performs a transformation on a message.

" @@ -2700,7 +2758,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z0-9_]+$" + "pattern":"(^(?!_{2}))(^[a-zA-Z0-9_]+$)" }, "PipelineSummaries":{ "type":"list", @@ -2735,7 +2793,7 @@ "members":{ "loggingOptions":{ "shape":"LoggingOptions", - "documentation":"

The new values of the AWS IoT Analytics logging options.

" + "documentation":"

The new values of the IoT Analytics logging options.

" } } }, @@ -2891,7 +2949,7 @@ "members":{ "pipelineActivity":{ "shape":"PipelineActivity", - "documentation":"

The pipeline activity that is run. This must not be a channel activity or a datastore activity because these activities are used in a pipeline only to load the original message and to store the (possibly) transformed message. If a lambda activity is specified, only short-running Lambda functions (those with a timeout of less than 30 seconds or less) can be used.

" + "documentation":"

The pipeline activity that is run. This must not be a channel activity or a data store activity because these activities are used in a pipeline only to load the original message and to store the (possibly) transformed message. If a Lambda activity is specified, only short-running Lambda functions (those with a timeout of less than 30 seconds or less) can be used.

" }, "payloads":{ "shape":"MessagePayloads", @@ -2930,11 +2988,11 @@ }, "glueConfiguration":{ "shape":"GlueConfiguration", - "documentation":"

Configuration information for coordination with AWS Glue, a fully managed extract, transform and load (ETL) service.

" + "documentation":"

Configuration information for coordination with Glue, a fully managed extract, transform and load (ETL) service.

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 and AWS Glue resources.

" + "documentation":"

The ARN of the role that grants IoT Analytics permission to interact with your Amazon S3 and Glue resources.

" } }, "documentation":"

Configuration information for delivery of dataset contents to Amazon Simple Storage Service (Amazon S3).

" @@ -3037,31 +3095,31 @@ "documentation":"

The next activity in the pipeline.

" } }, - "documentation":"

Creates a new message using only the specified attributes from the original message.

" + "documentation":"

Used to create a new message using only the specified attributes from the original message.

" }, "ServiceManagedChannelS3Storage":{ "type":"structure", "members":{ }, - "documentation":"

Use this to store channel data in an S3 bucket managed by AWS IoT Analytics. You cannot change the choice of service-managed or customer-managed S3 storage after the channel is created.

" + "documentation":"

Used to store channel data in an S3 bucket managed by IoT Analytics. You can't change the choice of S3 storage after the data store is created.

" }, "ServiceManagedChannelS3StorageSummary":{ "type":"structure", "members":{ }, - "documentation":"

Used to store channel data in an S3 bucket managed by AWS IoT Analytics.

" + "documentation":"

Used to store channel data in an S3 bucket managed by IoT Analytics.

" }, "ServiceManagedDatastoreS3Storage":{ "type":"structure", "members":{ }, - "documentation":"

Use this to store data store data in an S3 bucket managed by AWS IoT Analytics. You cannot change the choice of service-managed or customer-managed S3 storage after the data store is created.

" + "documentation":"

Used to store data in an Amazon S3 bucket managed by IoT Analytics. You can't change the choice of Amazon S3 storage after your data store is created.

" }, "ServiceManagedDatastoreS3StorageSummary":{ "type":"structure", "members":{ }, - "documentation":"

Used to store data store data in an S3 bucket managed by AWS IoT Analytics.

" + "documentation":"

Contains information about the data store that is managed by IoT Analytics.

" }, "ServiceUnavailableException":{ "type":"structure", @@ -3225,10 +3283,10 @@ }, "timestampFormat":{ "shape":"TimestampFormat", - "documentation":"

The timestamp format of a partition defined by a timestamp.

" + "documentation":"

The timestamp format of a partition defined by a timestamp. The default format is seconds since epoch (January 1, 1970 at midnight UTC time).

" } }, - "documentation":"

A partition defined by a timestamp.

" + "documentation":"

A partition dimension defined by a timestamp attribute.

" }, "TriggeringDataset":{ "type":"structure", @@ -3281,11 +3339,11 @@ }, "channelStorage":{ "shape":"ChannelStorage", - "documentation":"

Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the channel is created.

" + "documentation":"

Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You can't change this storage option after the channel is created.

" }, "retentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

How long, in days, message data is kept for the channel. The retention period cannot be updated if the channel's S3 storage is customer-managed.

" + "documentation":"

How long, in days, message data is kept for the channel. The retention period can't be updated if the channel's Amazon S3 storage is customer-managed.

" } } }, @@ -3298,7 +3356,7 @@ "members":{ "datasetName":{ "shape":"DatasetName", - "documentation":"

The name of the data set to update.

", + "documentation":"

The name of the dataset to update.

", "location":"uri", "locationName":"datasetName" }, @@ -3320,11 +3378,11 @@ }, "versioningConfiguration":{ "shape":"VersioningConfiguration", - "documentation":"

Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.

" + "documentation":"

Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of IoT Analytics datasets in the IoT Analytics User Guide.

" }, "lateDataRules":{ "shape":"LateDataRules", - "documentation":"

A list of data rules that send notifications to Amazon CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.

" + "documentation":"

A list of data rules that send notifications to CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.

" } } }, @@ -3340,15 +3398,15 @@ }, "retentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

How long, in days, message data is kept for the data store. The retention period cannot be updated if the data store's S3 storage is customer-managed.

" + "documentation":"

How long, in days, message data is kept for the data store. The retention period can't be updated if the data store's Amazon S3 storage is customer-managed.

" }, "datastoreStorage":{ "shape":"DatastoreStorage", - "documentation":"

Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default isserviceManagedS3. You cannot change this storage option after the data store is created.

" + "documentation":"

Where data in a data store is stored.. You can choose serviceManagedS3 storage, customerManagedS3 storage, or iotSiteWiseMultiLayerStorage storage. The default is serviceManagedS3. You can't change the choice of Amazon S3 storage after your data store is created.

" }, "fileFormatConfiguration":{ "shape":"FileFormatConfiguration", - "documentation":"

Contains the configuration information of file formats. AWS IoT Analytics data stores support JSON and Parquet.

The default file format is JSON. You can specify only one format.

You can't change the file format after you create the data store.

" + "documentation":"

Contains the configuration information of file formats. IoT Analytics data stores support JSON and Parquet.

The default file format is JSON. You can specify only one format.

You can't change the file format after you create the data store.

" } } }, @@ -3433,5 +3491,5 @@ "resourceArn":{"type":"string"}, "resourceId":{"type":"string"} }, - "documentation":"

AWS IoT Analytics allows you to collect large amounts of device data, process messages, and store them. You can then query the data and run sophisticated analytics on it. AWS IoT Analytics enables advanced data exploration through integration with Jupyter Notebooks and data visualization through integration with Amazon QuickSight.

Traditional analytics and business intelligence tools are designed to process structured data. IoT data often comes from devices that record noisy processes (such as temperature, motion, or sound). As a result the data from these devices can have significant gaps, corrupted messages, and false readings that must be cleaned up before analysis can occur. Also, IoT data is often only meaningful in the context of other data from external sources.

AWS IoT Analytics automates the steps required to analyze data from IoT devices. AWS IoT Analytics filters, transforms, and enriches IoT data before storing it in a time-series data store for analysis. You can set up the service to collect only the data you need from your devices, apply mathematical transforms to process the data, and enrich the data with device-specific metadata such as device type and location before storing it. Then, you can analyze your data by running queries using the built-in SQL query engine, or perform more complex analytics and machine learning inference. AWS IoT Analytics includes pre-built models for common IoT use cases so you can answer questions like which devices are about to fail or which customers are at risk of abandoning their wearable devices.

" + "documentation":"

IoT Analytics allows you to collect large amounts of device data, process messages, and store them. You can then query the data and run sophisticated analytics on it. IoT Analytics enables advanced data exploration through integration with Jupyter Notebooks and data visualization through integration with Amazon QuickSight.

Traditional analytics and business intelligence tools are designed to process structured data. IoT data often comes from devices that record noisy processes (such as temperature, motion, or sound). As a result the data from these devices can have significant gaps, corrupted messages, and false readings that must be cleaned up before analysis can occur. Also, IoT data is often only meaningful in the context of other data from external sources.

IoT Analytics automates the steps required to analyze data from IoT devices. IoT Analytics filters, transforms, and enriches IoT data before storing it in a time-series data store for analysis. You can set up the service to collect only the data you need from your devices, apply mathematical transforms to process the data, and enrich the data with device-specific metadata such as device type and location before storing it. Then, you can analyze your data by running queries using the built-in SQL query engine, or perform more complex analytics and machine learning inference. IoT Analytics includes pre-built models for common IoT use cases so you can answer questions like which devices are about to fail or which customers are at risk of abandoning their wearable devices.

" } diff --git a/services/iotdataplane/pom.xml b/services/iotdataplane/pom.xml index 6c7ce56d35bc..2a6558cba060 100644 --- a/services/iotdataplane/pom.xml +++ b/services/iotdataplane/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iotdataplane AWS Java SDK :: Services :: AWS IoT Data Plane diff --git a/services/iotdeviceadvisor/pom.xml b/services/iotdeviceadvisor/pom.xml index 6183c664dcc3..b8d5147ae005 100644 --- a/services/iotdeviceadvisor/pom.xml +++ b/services/iotdeviceadvisor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iotdeviceadvisor AWS Java SDK :: Services :: Iot Device Advisor diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index ebea21ffacc5..2b697607df9b 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iotevents AWS Java SDK :: Services :: IoT Events diff --git a/services/ioteventsdata/pom.xml b/services/ioteventsdata/pom.xml index e8990b27a5b6..289285a78525 100644 --- a/services/ioteventsdata/pom.xml +++ b/services/ioteventsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ioteventsdata AWS Java SDK :: Services :: IoT Events Data diff --git a/services/iotfleethub/pom.xml b/services/iotfleethub/pom.xml index af8540b8d6b8..51d6e6f54336 100644 --- a/services/iotfleethub/pom.xml +++ b/services/iotfleethub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iotfleethub AWS Java SDK :: Services :: Io T Fleet Hub diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index 1a2f3bb3f22f..a2c34ad55a63 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iotjobsdataplane AWS Java SDK :: Services :: IoT Jobs Data Plane diff --git a/services/iotsecuretunneling/pom.xml b/services/iotsecuretunneling/pom.xml index 6b11014be94b..7314ef1223ef 100644 --- a/services/iotsecuretunneling/pom.xml +++ b/services/iotsecuretunneling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iotsecuretunneling AWS Java SDK :: Services :: IoTSecureTunneling diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml index 166dabeee374..d1c617816c42 100644 --- a/services/iotsitewise/pom.xml +++ b/services/iotsitewise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iotsitewise AWS Java SDK :: Services :: Io T Site Wise diff --git a/services/iotsitewise/src/main/resources/codegen-resources/service-2.json b/services/iotsitewise/src/main/resources/codegen-resources/service-2.json index 6ca96971b09c..0076ba1d64bf 100644 --- a/services/iotsitewise/src/main/resources/codegen-resources/service-2.json +++ b/services/iotsitewise/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"ConflictingOperationException"} ], "documentation":"

Associates a child asset with the given parent asset through a hierarchy defined in the parent asset's model. For more information, see Associating assets in the IoT SiteWise User Guide.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "BatchAssociateProjectAssets":{ "name":"BatchAssociateProjectAssets", @@ -125,7 +125,7 @@ {"shape":"ConflictingOperationException"} ], "documentation":"

Creates an asset from an existing asset model. For more information, see Creating assets in the IoT SiteWise User Guide.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "CreateAssetModel":{ "name":"CreateAssetModel", @@ -146,7 +146,7 @@ {"shape":"ConflictingOperationException"} ], "documentation":"

Creates an asset model from specified property and hierarchy definitions. You create assets from asset models. With asset models, you can easily create assets of the same type that have standardized definitions. Each asset created from a model inherits the asset model's property and hierarchy definitions. For more information, see Defining asset models in the IoT SiteWise User Guide.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "CreateDashboard":{ "name":"CreateDashboard", @@ -184,7 +184,7 @@ {"shape":"LimitExceededException"} ], "documentation":"

Creates a gateway, which is a virtual or edge device that delivers industrial data streams from local servers to IoT SiteWise. For more information, see Ingesting data using a gateway in the IoT SiteWise User Guide.

", - "endpoint":{"hostPrefix":"edge."} + "endpoint":{"hostPrefix":"api."} }, "CreatePortal":{ "name":"CreatePortal", @@ -259,7 +259,7 @@ {"shape":"ConflictingOperationException"} ], "documentation":"

Deletes an asset. This action can't be undone. For more information, see Deleting assets and models in the IoT SiteWise User Guide.

You can't delete an asset that's associated to another asset. For more information, see DisassociateAssets.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "DeleteAssetModel":{ "name":"DeleteAssetModel", @@ -278,7 +278,7 @@ {"shape":"ConflictingOperationException"} ], "documentation":"

Deletes an asset model. This action can't be undone. You must delete all assets created from an asset model before you can delete the model. Also, you can't delete an asset model if a parent asset model exists that contains a property formula expression that depends on the asset model that you want to delete. For more information, see Deleting assets and models in the IoT SiteWise User Guide.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "DeleteDashboard":{ "name":"DeleteDashboard", @@ -312,7 +312,7 @@ {"shape":"ThrottlingException"} ], "documentation":"

Deletes a gateway from IoT SiteWise. When you delete a gateway, some of the gateway's files remain in your gateway's file system.

", - "endpoint":{"hostPrefix":"edge."} + "endpoint":{"hostPrefix":"api."} }, "DeletePortal":{ "name":"DeletePortal", @@ -384,7 +384,7 @@ {"shape":"ThrottlingException"} ], "documentation":"

Retrieves information about an asset.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "DescribeAssetModel":{ "name":"DescribeAssetModel", @@ -401,7 +401,7 @@ {"shape":"ThrottlingException"} ], "documentation":"

Retrieves information about an asset model.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "DescribeAssetProperty":{ "name":"DescribeAssetProperty", @@ -418,7 +418,7 @@ {"shape":"ThrottlingException"} ], "documentation":"

Retrieves information about an asset property.

When you call this operation for an attribute property, this response includes the default attribute value that you define in the asset model. If you update the default value in the model, this operation's response includes the new default value.

This operation doesn't return the value of the asset property. To get the value of an asset property, use GetAssetPropertyValue.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "DescribeDashboard":{ "name":"DescribeDashboard", @@ -451,7 +451,8 @@ {"shape":"InternalFailureException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves information about the default encryption configuration for the Amazon Web Services account in the default or specified Region. For more information, see Key management in the IoT SiteWise User Guide.

" + "documentation":"

Retrieves information about the default encryption configuration for the Amazon Web Services account in the default or specified Region. For more information, see Key management in the IoT SiteWise User Guide.

", + "endpoint":{"hostPrefix":"api."} }, "DescribeGateway":{ "name":"DescribeGateway", @@ -468,7 +469,7 @@ {"shape":"ThrottlingException"} ], "documentation":"

Retrieves information about a gateway.

", - "endpoint":{"hostPrefix":"edge."} + "endpoint":{"hostPrefix":"api."} }, "DescribeGatewayCapabilityConfiguration":{ "name":"DescribeGatewayCapabilityConfiguration", @@ -485,7 +486,7 @@ {"shape":"ThrottlingException"} ], "documentation":"

Retrieves information about a gateway capability configuration. Each gateway capability defines data sources for a gateway. A capability configuration can contain multiple data source configurations. If you define OPC-UA sources for a gateway in the IoT SiteWise console, all of your OPC-UA sources are stored in one capability configuration. To list all capability configurations for a gateway, use DescribeGateway.

", - "endpoint":{"hostPrefix":"edge."} + "endpoint":{"hostPrefix":"api."} }, "DescribeLoggingOptions":{ "name":"DescribeLoggingOptions", @@ -502,7 +503,7 @@ {"shape":"ResourceNotFoundException"} ], "documentation":"

Retrieves the current IoT SiteWise logging options.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "DescribePortal":{ "name":"DescribePortal", @@ -556,7 +557,8 @@ {"shape":"LimitExceededException"}, {"shape":"ConflictingOperationException"} ], - "documentation":"

Retrieves information about the storage configuration for IoT SiteWise.

" + "documentation":"

Retrieves information about the storage configuration for IoT SiteWise.

", + "endpoint":{"hostPrefix":"api."} }, "DisassociateAssets":{ "name":"DisassociateAssets", @@ -573,7 +575,7 @@ {"shape":"ConflictingOperationException"} ], "documentation":"

Disassociates a child asset from the given parent asset through a hierarchy defined in the parent asset's model.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "GetAssetPropertyAggregates":{ "name":"GetAssetPropertyAggregates", @@ -678,7 +680,7 @@ {"shape":"ThrottlingException"} ], "documentation":"

Retrieves a paginated list of summaries of all asset models.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "ListAssetRelationships":{ "name":"ListAssetRelationships", @@ -695,7 +697,7 @@ {"shape":"ThrottlingException"} ], "documentation":"

Retrieves a paginated list of asset relationships for an asset. You can use this operation to identify an asset's root asset and all associated assets between that asset and its root.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "ListAssets":{ "name":"ListAssets", @@ -712,7 +714,7 @@ {"shape":"ThrottlingException"} ], "documentation":"

Retrieves a paginated list of asset summaries.

You can use this operation to do the following:

  • List assets based on a specific asset model.

  • List top-level assets.

You can't use this operation to list all assets. To retrieve summaries for all of your assets, use ListAssetModels to get all of your asset model IDs. Then, use ListAssets to get all assets for each asset model.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "ListAssociatedAssets":{ "name":"ListAssociatedAssets", @@ -729,7 +731,7 @@ {"shape":"ThrottlingException"} ], "documentation":"

Retrieves a paginated list of associated assets.

You can use this operation to do the following:

  • List child assets associated to a parent asset by a hierarchy that you specify.

  • List an asset's parent asset.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "ListDashboards":{ "name":"ListDashboards", @@ -762,7 +764,7 @@ {"shape":"ThrottlingException"} ], "documentation":"

Retrieves a paginated list of gateways.

", - "endpoint":{"hostPrefix":"edge."} + "endpoint":{"hostPrefix":"api."} }, "ListPortals":{ "name":"ListPortals", @@ -832,7 +834,8 @@ {"shape":"LimitExceededException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the list of tags for an IoT SiteWise resource.

" + "documentation":"

Retrieves the list of tags for an IoT SiteWise resource.

", + "endpoint":{"hostPrefix":"api."} }, "PutDefaultEncryptionConfiguration":{ "name":"PutDefaultEncryptionConfiguration", @@ -849,7 +852,8 @@ {"shape":"LimitExceededException"}, {"shape":"ConflictingOperationException"} ], - "documentation":"

Sets the default encryption configuration for the Amazon Web Services account. For more information, see Key management in the IoT SiteWise User Guide.

" + "documentation":"

Sets the default encryption configuration for the Amazon Web Services account. For more information, see Key management in the IoT SiteWise User Guide.

", + "endpoint":{"hostPrefix":"api."} }, "PutLoggingOptions":{ "name":"PutLoggingOptions", @@ -867,7 +871,7 @@ {"shape":"ResourceNotFoundException"} ], "documentation":"

Sets logging options for IoT SiteWise.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "PutStorageConfiguration":{ "name":"PutStorageConfiguration", @@ -886,7 +890,8 @@ {"shape":"LimitExceededException"}, {"shape":"ConflictingOperationException"} ], - "documentation":"

Configures storage settings for IoT SiteWise.

" + "documentation":"

Configures storage settings for IoT SiteWise.

", + "endpoint":{"hostPrefix":"api."} }, "TagResource":{ "name":"TagResource", @@ -906,7 +911,8 @@ {"shape":"UnauthorizedException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Adds tags to an IoT SiteWise resource. If a tag already exists for the resource, this operation updates the tag's value.

" + "documentation":"

Adds tags to an IoT SiteWise resource. If a tag already exists for the resource, this operation updates the tag's value.

", + "endpoint":{"hostPrefix":"api."} }, "UntagResource":{ "name":"UntagResource", @@ -925,7 +931,8 @@ {"shape":"LimitExceededException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Removes a tag from an IoT SiteWise resource.

" + "documentation":"

Removes a tag from an IoT SiteWise resource.

", + "endpoint":{"hostPrefix":"api."} }, "UpdateAccessPolicy":{ "name":"UpdateAccessPolicy", @@ -963,7 +970,7 @@ {"shape":"ConflictingOperationException"} ], "documentation":"

Updates an asset's name. For more information, see Updating assets and models in the IoT SiteWise User Guide.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "UpdateAssetModel":{ "name":"UpdateAssetModel", @@ -984,7 +991,7 @@ {"shape":"ConflictingOperationException"} ], "documentation":"

Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the IoT SiteWise User Guide.

This operation overwrites the existing model with the provided model. To avoid deleting your asset model's properties or hierarchies, you must include their IDs and definitions in the updated asset model payload. For more information, see DescribeAssetModel.

If you remove a property from an asset model, IoT SiteWise deletes all previous data for that property. If you remove a hierarchy definition from an asset model, IoT SiteWise disassociates every asset associated with that hierarchy. You can't change the type or data type of an existing property.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "UpdateAssetProperty":{ "name":"UpdateAssetProperty", @@ -1001,7 +1008,7 @@ {"shape":"ConflictingOperationException"} ], "documentation":"

Updates an asset property's alias and notification state.

This operation overwrites the property's existing alias and notification state. To keep your existing property's alias or notification state, you must include the existing values in the UpdateAssetProperty request. For more information, see DescribeAssetProperty.

", - "endpoint":{"hostPrefix":"model."} + "endpoint":{"hostPrefix":"api."} }, "UpdateDashboard":{ "name":"UpdateDashboard", @@ -1036,7 +1043,7 @@ {"shape":"ThrottlingException"} ], "documentation":"

Updates a gateway's name.

", - "endpoint":{"hostPrefix":"edge."} + "endpoint":{"hostPrefix":"api."} }, "UpdateGatewayCapabilityConfiguration":{ "name":"UpdateGatewayCapabilityConfiguration", @@ -1056,7 +1063,7 @@ {"shape":"LimitExceededException"} ], "documentation":"

Updates a gateway capability configuration or defines a new capability configuration. Each gateway capability defines data sources for a gateway. A capability configuration can contain multiple data source configurations. If you define OPC-UA sources for a gateway in the IoT SiteWise console, all of your OPC-UA sources are stored in one capability configuration. To list all capability configurations for a gateway, use DescribeGateway.

", - "endpoint":{"hostPrefix":"edge."} + "endpoint":{"hostPrefix":"api."} }, "UpdatePortal":{ "name":"UpdatePortal", @@ -2037,7 +2044,8 @@ "enum":[ "IN_SYNC", "OUT_OF_SYNC", - "SYNC_FAILED" + "SYNC_FAILED", + "UNKNOWN" ] }, "ClientToken":{ @@ -2066,6 +2074,13 @@ }, "documentation":"

Contains information about a composite model property on an asset.

" }, + "ComputeLocation":{ + "type":"string", + "enum":[ + "EDGE", + "CLOUD" + ] + }, "ConfigurationErrorDetails":{ "type":"structure", "required":[ @@ -2129,6 +2144,11 @@ "error":{"httpStatusCode":409}, "exception":true }, + "CoreDeviceThingName":{ + "type":"string", + "max":128, + "min":1 + }, "CreateAccessPolicyRequest":{ "type":"structure", "required":[ @@ -3352,6 +3372,36 @@ "min":1, "pattern":"[^\\u0000-\\u001F\\u007F]+" }, + "DetailedError":{ + "type":"structure", + "required":[ + "code", + "message" + ], + "members":{ + "code":{ + "shape":"DetailedErrorCode", + "documentation":"

The error code.

" + }, + "message":{ + "shape":"DetailedErrorMessage", + "documentation":"

The error message.

" + } + }, + "documentation":"

Contains detailed error information.

" + }, + "DetailedErrorCode":{ + "type":"string", + "enum":[ + "INCOMPATIBLE_COMPUTE_LOCATION", + "INCOMPATIBLE_FORWARDING_CONFIGURATION" + ] + }, + "DetailedErrorMessage":{"type":"string"}, + "DetailedErrors":{ + "type":"list", + "member":{"shape":"DetailedError"} + }, "DisassociateAssetsRequest":{ "type":"structure", "required":[ @@ -3421,6 +3471,10 @@ "message":{ "shape":"ErrorMessage", "documentation":"

The error message.

" + }, + "details":{ + "shape":"DetailedErrors", + "documentation":"

A list of detailed errors.

" } }, "documentation":"

Contains the details of an IoT SiteWise error.

" @@ -3454,6 +3508,24 @@ "type":"list", "member":{"shape":"ExpressionVariable"} }, + "ForwardingConfig":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{ + "shape":"ForwardingConfigState", + "documentation":"

The forwarding state for the given property.

" + } + }, + "documentation":"

The forwarding configuration for a given property.

" + }, + "ForwardingConfigState":{ + "type":"string", + "enum":[ + "DISABLED", + "ENABLED" + ] + }, "GatewayCapabilitySummaries":{ "type":"list", "member":{"shape":"GatewayCapabilitySummary"} @@ -3478,11 +3550,14 @@ }, "GatewayPlatform":{ "type":"structure", - "required":["greengrass"], "members":{ "greengrass":{ "shape":"Greengrass", "documentation":"

A gateway that runs on IoT Greengrass.

" + }, + "greengrassV2":{ + "shape":"GreengrassV2", + "documentation":"

A gateway that runs on IoT Greengrass V2.

" } }, "documentation":"

Contains a gateway's platform information.

" @@ -3508,6 +3583,7 @@ "shape":"Name", "documentation":"

The name of the asset.

" }, + "gatewayPlatform":{"shape":"GatewayPlatform"}, "gatewayCapabilitySummaries":{ "shape":"GatewayCapabilitySummaries", "documentation":"

A list of gateway capability summaries that each contain a namespace and status. Each gateway capability defines data sources for the gateway. To retrieve a capability configuration's definition, use DescribeGatewayCapabilityConfiguration.

" @@ -3828,6 +3904,17 @@ }, "documentation":"

Contains details for a gateway that runs on IoT Greengrass. To create a gateway that runs on IoT Greengrass, you must add the IoT SiteWise connector to a Greengrass group and deploy it. Your Greengrass group must also have permissions to upload data to IoT SiteWise. For more information, see Ingesting data using a gateway in the IoT SiteWise User Guide.

" }, + "GreengrassV2":{ + "type":"structure", + "required":["coreDeviceThingName"], + "members":{ + "coreDeviceThingName":{ + "shape":"CoreDeviceThingName", + "documentation":"

The name of the IoT thing for your IoT Greengrass V2 core device.

" + } + }, + "documentation":"

Contains details for a gateway that runs on IoT Greengrass V2. To create a gateway that runs on IoT Greengrass V2, you must deploy the IoT SiteWise Edge component to your gateway device. Your Greengrass device role must use the AWSIoTSiteWiseEdgeAccess policy. For more information, see Using IoT SiteWise at the edge in the IoT SiteWise User Guide.

" + }, "GroupIdentity":{ "type":"structure", "required":["id"], @@ -3999,9 +4086,8 @@ }, "Interval":{ "type":"string", - "max":3, - "min":2, - "pattern":"1w|1d|1h|15m|5m|1m" + "max":23, + "min":2 }, "IntervalInSeconds":{ "type":"long", @@ -4505,9 +4591,24 @@ "Measurement":{ "type":"structure", "members":{ + "processingConfig":{ + "shape":"MeasurementProcessingConfig", + "documentation":"

The processing configuration for the given measurement property. You can configure measurements to be kept at the edge or forwarded to the Amazon Web Services Cloud. By default, measurements are forwarded to the cloud.

" + } }, "documentation":"

Contains an asset measurement property. For more information, see Measurements in the IoT SiteWise User Guide.

" }, + "MeasurementProcessingConfig":{ + "type":"structure", + "required":["forwardingConfig"], + "members":{ + "forwardingConfig":{ + "shape":"ForwardingConfig", + "documentation":"

The forwarding configuration for the given measurement property.

" + } + }, + "documentation":"

The processing configuration for the given measurement property. You can configure measurements to be kept at the edge or forwarded to the Amazon Web Services Cloud. By default, measurements are forwarded to the cloud.

" + }, "Metric":{ "type":"structure", "required":[ @@ -4527,10 +4628,25 @@ "window":{ "shape":"MetricWindow", "documentation":"

The window (time interval) over which IoT SiteWise computes the metric's aggregation expression. IoT SiteWise computes one data point per window.

" + }, + "processingConfig":{ + "shape":"MetricProcessingConfig", + "documentation":"

The processing configuration for the given metric property. You can configure metrics to be computed at the edge or in the Amazon Web Services Cloud. By default, metrics are forwarded to the cloud.

" } }, "documentation":"

Contains an asset metric property. With metrics, you can calculate aggregate functions, such as an average, maximum, or minimum, as specified through an expression. A metric maps several values to a single value (such as a sum).

The maximum number of dependent/cascading variables used in any one metric calculation is 10. Therefore, a root metric can have up to 10 cascading metrics in its computational dependency tree. Additionally, a metric can only have a data type of DOUBLE and consume properties with data types of INTEGER or DOUBLE.

For more information, see Metrics in the IoT SiteWise User Guide.

" }, + "MetricProcessingConfig":{ + "type":"structure", + "required":["computeLocation"], + "members":{ + "computeLocation":{ + "shape":"ComputeLocation", + "documentation":"

The compute location for the given metric property.

" + } + }, + "documentation":"

The processing configuration for the given metric property. You can configure metrics to be computed at the edge or in the Amazon Web Services Cloud. By default, metrics are forwarded to the cloud.

" + }, "MetricWindow":{ "type":"structure", "members":{ @@ -4587,6 +4703,11 @@ "min":1, "pattern":"[A-Za-z0-9+/=]+" }, + "Offset":{ + "type":"string", + "max":25, + "min":2 + }, "OffsetInNanos":{ "type":"integer", "max":999999999, @@ -5177,10 +5298,26 @@ "variables":{ "shape":"ExpressionVariables", "documentation":"

The list of variables used in the expression.

" + }, + "processingConfig":{ + "shape":"TransformProcessingConfig", + "documentation":"

The processing configuration for the given transform property. You can configure transforms to be kept at the edge or forwarded to the Amazon Web Services Cloud. You can also configure transforms to be computed at the edge or in the cloud.

" } }, "documentation":"

Contains an asset transform property. A transform is a one-to-one mapping of a property's data points from one form to another. For example, you can use a transform to convert a Celsius data stream to Fahrenheit by applying the transformation expression to each data point of the Celsius stream. A transform can only have a data type of DOUBLE and consume properties with data types of INTEGER or DOUBLE.

For more information, see Transforms in the IoT SiteWise User Guide.

" }, + "TransformProcessingConfig":{ + "type":"structure", + "required":["computeLocation"], + "members":{ + "computeLocation":{ + "shape":"ComputeLocation", + "documentation":"

The compute location for the given transform property.

" + }, + "forwardingConfig":{"shape":"ForwardingConfig"} + }, + "documentation":"

The processing configuration for the given transform property. You can configure transforms to be kept at the edge or forwarded to the Amazon Web Services Cloud. You can also configure transforms to be computed at the edge or in the cloud.

" + }, "TraversalDirection":{ "type":"string", "enum":[ @@ -5198,10 +5335,14 @@ "members":{ "interval":{ "shape":"Interval", - "documentation":"

The time interval for the tumbling window. Note that w represents weeks, d represents days, h represents hours, and m represents minutes. IoT SiteWise computes the 1w interval the end of Sunday at midnight each week (UTC), the 1d interval at the end of each day at midnight (UTC), the 1h interval at the end of each hour, and so on.

When IoT SiteWise aggregates data points for metric computations, the start of each interval is exclusive and the end of each interval is inclusive. IoT SiteWise places the computed data point at the end of the interval.

" + "documentation":"

The time interval for the tumbling window. The interval time must be between 1 minute and 1 week.

IoT SiteWise computes the 1w interval the end of Sunday at midnight each week (UTC), the 1d interval at the end of each day at midnight (UTC), the 1h interval at the end of each hour, and so on.

When IoT SiteWise aggregates data points for metric computations, the start of each interval is exclusive and the end of each interval is inclusive. IoT SiteWise places the computed data point at the end of the interval.

" + }, + "offset":{ + "shape":"Offset", + "documentation":"

The offset for the tumbling window. The offset parameter accepts the following:

  • The offset time.

    For example, if you specify 18h for offset and 1d for interval, IoT SiteWise aggregates data in one of the following ways:

    • If you create the metric before or at 6:00 p.m. (UTC), you get the first aggregation result at 6 p.m. (UTC) on the day when you create the metric.

    • If you create the metric after 6:00 p.m. (UTC), you get the first aggregation result at 6 p.m. (UTC) the next day.

  • The ISO 8601 format.

    For example, if you specify PT18H for offset and 1d for interval, IoT SiteWise aggregates data in one of the following ways:

    • If you create the metric before or at 6:00 p.m. (UTC), you get the first aggregation result at 6 p.m. (UTC) on the day when you create the metric.

    • If you create the metric after 6:00 p.m. (UTC), you get the first aggregation result at 6 p.m. (UTC) the next day.

  • The 24-hour clock.

    For example, if you specify 00:03:00 for offset and 5m for interval, and you create the metric at 2 p.m. (UTC), you get the first aggregation result at 2:03 p.m. (UTC). You get the second aggregation result at 2:08 p.m. (UTC).

  • The offset time zone.

    For example, if you specify 2021-07-23T18:00-08 for offset and 1d for interval, IoT SiteWise aggregates data in one of the following ways:

    • If you create the metric before or at 6:00 p.m. (PST), you get the first aggregation result at 6 p.m. (PST) on the day when you create the metric.

    • If you create the metric after 6:00 p.m. (PST), you get the first aggregation result at 6 p.m. (PST) the next day.

" } }, - "documentation":"

Contains a tumbling window, which is a repeating fixed-sized, non-overlapping, and contiguous time interval. This window is used in metric and aggregation computations.

" + "documentation":"

Contains a tumbling window, which is a repeating fixed-sized, non-overlapping, and contiguous time window. You use this window in metrics to aggregate data from properties and other assets.

You can use m, h, d, and w when you specify an interval or offset. Note that m represents minutes, and w represents weeks. You can also use s to represent seconds in offset.

The interval and offset parameters support the ISO 8601 format. For example, PT5S represents five seconds, PT5M represents five minutes, and PT5H represents five hours.

" }, "UnauthorizedException":{ "type":"structure", diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index f2cf4443f799..558a7f40ecc0 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iotthingsgraph AWS Java SDK :: Services :: IoTThingsGraph diff --git a/services/iotwireless/pom.xml b/services/iotwireless/pom.xml index 90bb92d2c7f5..6131c8003373 100644 --- a/services/iotwireless/pom.xml +++ b/services/iotwireless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT iotwireless AWS Java SDK :: Services :: IoT Wireless diff --git a/services/iotwireless/src/main/resources/codegen-resources/service-2.json b/services/iotwireless/src/main/resources/codegen-resources/service-2.json index 19ec9ee5367a..0714c4d6ce06 100644 --- a/services/iotwireless/src/main/resources/codegen-resources/service-2.json +++ b/services/iotwireless/src/main/resources/codegen-resources/service-2.json @@ -467,7 +467,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns current default log-levels, or log levels by resource types, could be for wireless device log options or wireless gateway log options.

" + "documentation":"

Returns current default log levels or log levels by resource types. Based on resource types, log levels can be for wireless device log options or wireless gateway log options.

" }, "GetPartnerAccount":{ "name":"GetPartnerAccount", @@ -501,7 +501,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Fetches the log-level override if any for a given resource-ID and resource-type, coulde be a wireless device or a wireless gateway.

" + "documentation":"

Fetches the log-level override, if any, for a given resource-ID and resource-type. It can be used for a wireless device or a wireless gateway.

" }, "GetServiceEndpoint":{ "name":"GetServiceEndpoint", @@ -819,7 +819,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Sets the log-level override for a resource-ID and resource-type, could be a wireless gateway or a wireless device.

" + "documentation":"

Sets the log-level override for a resource-ID and resource-type. This option can be specified for a wireless gateway or a wireless device. A limit of 200 log level override can be set per account.

" }, "ResetAllResourceLogLevels":{ "name":"ResetAllResourceLogLevels", @@ -837,7 +837,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Remove log-level overrides if any for all resources (both wireless devices and wireless gateways).

" + "documentation":"

Removes the log-level overrides for all resources; both wireless devices and wireless gateways.

" }, "ResetResourceLogLevel":{ "name":"ResetResourceLogLevel", @@ -855,7 +855,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Remove log-level override if any for a specific resource-ID and resource-type, could be a wireless device or a wireless gateway.

" + "documentation":"

Removes the log-level override, if any, for a specific resource-ID and resource-type. It can be used for a wireless device or a wireless gateway.

" }, "SendDataToWirelessDevice":{ "name":"SendDataToWirelessDevice", @@ -963,7 +963,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Set default log level, or log levels by resource types, could be for wireless device log options or wireless gateways log options. This is to control the log messages that will be displayed in CloudWatch.

" + "documentation":"

Set default log level, or log levels by resource types. This can be for wireless device log options or wireless gateways log options and is used to control the log messages that'll be displayed in CloudWatch.

" }, "UpdatePartnerAccount":{ "name":"UpdatePartnerAccount", @@ -1061,6 +1061,7 @@ "AddGwMetadata":{"type":"boolean"}, "AmazonId":{ "type":"string", + "documentation":"

The Sidewalk Amazon ID.

", "max":2048 }, "AmazonResourceName":{ @@ -2082,7 +2083,7 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of the resource, currently support WirelessDevice and WirelessGateway.

", + "documentation":"

The type of the resource, which can be WirelessDevice or WirelessGateway.

", "location":"querystring", "locationName":"resourceType" } @@ -3166,7 +3167,7 @@ "MessageId":{"type":"string"}, "MessageType":{ "type":"string", - "documentation":"

Sidewalk device message type.

", + "documentation":"

Sidewalk device message type. Default value is CUSTOM_COMMAND_ID_NOTIFY.

", "enum":[ "CUSTOM_COMMAND_ID_NOTIFY", "CUSTOM_COMMAND_ID_GET", @@ -3300,7 +3301,7 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of the resource, currently support WirelessDevice and WirelessGateway.

", + "documentation":"

The type of the resource, which can be WirelessDevice or WirelessGateway.

", "location":"querystring", "locationName":"resourceType" }, @@ -3343,7 +3344,7 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of the resource, currently support WirelessDevice and WirelessGateway.

", + "documentation":"

The type of the resource, which can be WirelessDevice or WirelessGateway.

", "location":"querystring", "locationName":"resourceType" } @@ -3357,7 +3358,7 @@ "ResourceId":{"type":"string"}, "ResourceIdentifier":{ "type":"string", - "documentation":"

The identifier of the resource. For a Wireless Device, it is the wireless device id. For a wireless gateway, it is the wireless gateway id.

", + "documentation":"

The identifier of the resource. For a Wireless Device, it is the wireless device ID. For a wireless gateway, it is the wireless gateway ID.

", "max":256 }, "ResourceNotFoundException":{ @@ -3435,7 +3436,7 @@ }, "PayloadData":{ "shape":"PayloadData", - "documentation":"

The message payload to send.

" + "documentation":"

The binary to be sent to the end device, encoded in base64.

" }, "WirelessMetadata":{ "shape":"WirelessMetadata", @@ -3563,6 +3564,7 @@ "SidewalkDevice":{ "type":"structure", "members":{ + "AmazonId":{"shape":"AmazonId"}, "SidewalkId":{ "shape":"SidewalkId", "documentation":"

The sidewalk device identification.

" @@ -4055,7 +4057,7 @@ "Event":{"shape":"WirelessDeviceEvent"}, "LogLevel":{"shape":"LogLevel"} }, - "documentation":"

The log option for a wireless device event. Can be used to set log level for a specific wireless device event. For a LoRaWAN device, the possible events for a log messsage are: Join, Rejoin, Downlink_Data, Uplink_Data. For a Sidewalk device, the possible events for a log message are: Registration, Downlink_Data, Uplink_Data.

" + "documentation":"

The log options for a wireless device event and can be used to set log levels for a specific wireless device event.

For a LoRaWAN device, possible events for a log messsage are: Join, Rejoin, Downlink_Data, and Uplink_Data. For a Sidewalk device, possible events for a log message are Registration, Downlink_Data, and Uplink_Data.

" }, "WirelessDeviceEventLogOptionList":{ "type":"list", @@ -4071,7 +4073,8 @@ "enum":[ "WirelessDeviceId", "DevEui", - "ThingName" + "ThingName", + "SidewalkManufacturingSn" ] }, "WirelessDeviceLogOption":{ @@ -4088,7 +4091,7 @@ "LogLevel":{"shape":"LogLevel"}, "Events":{"shape":"WirelessDeviceEventLogOptionList"} }, - "documentation":"

The log option for wireless devices. Can be used to set log level for a specific type of wireless device.

" + "documentation":"

The log options for wireless devices and can be used to set log levels for a specific type of wireless device.

" }, "WirelessDeviceLogOptionList":{ "type":"list", @@ -4167,7 +4170,7 @@ "Event":{"shape":"WirelessGatewayEvent"}, "LogLevel":{"shape":"LogLevel"} }, - "documentation":"

The log option for a wireless gateway event. Can be used to set log level for a specific wireless gateway event. For a LoRaWAN gateway, the possible events for a log message are: CUPS_Request, Certificate.

" + "documentation":"

The log options for a wireless gateway event and can be used to set log levels for a specific wireless gateway event.

For a LoRaWAN gateway, possible events for a log message are CUPS_Request and Certificate.

" }, "WirelessGatewayEventLogOptionList":{ "type":"list", @@ -4197,7 +4200,7 @@ "LogLevel":{"shape":"LogLevel"}, "Events":{"shape":"WirelessGatewayEventLogOptionList"} }, - "documentation":"

The log option for wireless gateways. Can be used to set log level for a specific type of wireless gateway.

" + "documentation":"

The log options for wireless gateways and can be used to set log levels for a specific type of wireless gateway.

" }, "WirelessGatewayLogOptionList":{ "type":"list", diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml index 927a300f39d4..d3aac41e954f 100644 --- a/services/ivs/pom.xml +++ b/services/ivs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ivs AWS Java SDK :: Services :: Ivs diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index 8cfa6c8e63a5..fdbd88d0be67 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT kafka AWS Java SDK :: Services :: Kafka diff --git a/services/kendra/pom.xml b/services/kendra/pom.xml index b1477529e168..fafaf7c65994 100644 --- a/services/kendra/pom.xml +++ b/services/kendra/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT kendra AWS Java SDK :: Services :: Kendra diff --git a/services/kendra/src/main/resources/codegen-resources/service-2.json b/services/kendra/src/main/resources/codegen-resources/service-2.json index 5429a3a9b3ec..0e390427ec90 100644 --- a/services/kendra/src/main/resources/codegen-resources/service-2.json +++ b/services/kendra/src/main/resources/codegen-resources/service-2.json @@ -646,7 +646,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Enables you to provide feedback to Amazon Kendra to improve the performance of the service.

" + "documentation":"

Enables you to provide feedback to Amazon Kendra to improve the performance of your index.

" }, "TagResource":{ "name":"TagResource", @@ -889,7 +889,7 @@ "documentation":"

Performs a less than or equals operation on two document attributes. Use with a document attribute of type Integer or Long.

" } }, - "documentation":"

Provides filtering the query results based on document attributes.

When you use the AndAllFilters or OrAllFilters, filters you can use 2 layers under the first attribute filter. For example, you can use:

<AndAllFilters>

  1. <OrAllFilters>

  2. <EqualTo>

If you use more than 2 layers, you receive a ValidationException exception with the message \"AttributeFilter cannot have a depth of more than 2.\"

" + "documentation":"

Provides filtering the query results based on document attributes.

When you use the AndAllFilters or OrAllFilters, filters you can use 2 layers under the first attribute filter. For example, you can use:

<AndAllFilters>

  1. <OrAllFilters>

  2. <EqualTo>

If you use more than 2 layers, you receive a ValidationException exception with the message \"AttributeFilter cannot have a depth of more than 2.\"

If you use more than 10 attribute filters, you receive a ValidationException exception with the message \"AttributeFilter cannot have a length of more than 10\".

" }, "AttributeFilterList":{ "type":"list", @@ -1098,11 +1098,11 @@ "members":{ "StorageCapacityUnits":{ "shape":"StorageCapacityUnit", - "documentation":"

The amount of extra storage capacity for an index. A single capacity unit for an index provides 150 GB of storage space or 500,000 documents, whichever is reached first.

" + "documentation":"

The amount of extra storage capacity for an index. A single capacity unit provides 30 GB of storage space or 100,000 documents, whichever is reached first.

" }, "QueryCapacityUnits":{ "shape":"QueryCapacityUnit", - "documentation":"

The amount of extra query capacity for an index and GetQuerySuggestions capacity.

A single extra capacity unit for an index provides 0.5 queries per second or approximately 40,000 queries per day.

GetQuerySuggestions capacity is 5 times the provisioned query capacity for an index. For example, the base capacity for an index is 0.5 queries per second, so GetQuerySuggestions capacity is 2.5 calls per second. If adding another 0.5 queries per second to total 1 queries per second for an index, the GetQuerySuggestions capacity is 5 calls per second.

" + "documentation":"

The amount of extra query capacity for an index and GetQuerySuggestions capacity.

A single extra capacity unit for an index provides 0.1 queries per second or approximately 8,000 queries per day.

GetQuerySuggestions capacity is five times the provisioned query capacity for an index, or the base capacity of 2.5 calls per second, whichever is higher. For example, the base capacity for an index is 0.1 queries per second, and GetQuerySuggestions capacity has a base of 2.5 calls per second. If you add another 0.1 queries per second to total 0.2 queries per second for an index, the GetQuerySuggestions capacity is 2.5 calls per second (higher than five times 0.2 queries per second).

" } }, "documentation":"

Specifies capacity units configured for your enterprise edition index. You can add and remove capacity units to tune an index to your requirements.

" @@ -1510,7 +1510,7 @@ }, "SecretArn":{ "shape":"SecretArn", - "documentation":"

The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager. The credentials should be a user/password pair. For more information, see Using a Database Data Source. For more information about AWS Secrets Manager, see What Is AWS Secrets Manager in the Secrets Manager user guide.

" + "documentation":"

The Amazon Resource Name (ARN) of credentials stored in Secrets Manager. The credentials should be a user/password pair. For more information, see Using a Database Data Source. For more information about Secrets Manager, see What Is Secrets Manager in the Secrets Manager user guide.

" } }, "documentation":"

Provides the information necessary to connect to a database.

" @@ -1653,7 +1653,7 @@ }, "Edition":{ "shape":"IndexEdition", - "documentation":"

The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION for your production databases. Once you set the edition for an index, it can't be changed.

The Edition parameter is optional. If you don't supply a value, the default is ENTERPRISE_EDITION.

" + "documentation":"

The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION for your production databases. Once you set the edition for an index, it can't be changed.

The Edition parameter is optional. If you don't supply a value, the default is ENTERPRISE_EDITION.

For more information on quota limits for enterprise and developer editions, see Quotas.

" }, "RoleArn":{ "shape":"RoleArn", @@ -1828,7 +1828,11 @@ "shape":"GoogleDriveConfiguration", "documentation":"

Provides configuration for data sources that connect to Google Drive.

" }, - "WebCrawlerConfiguration":{"shape":"WebCrawlerConfiguration"} + "WebCrawlerConfiguration":{"shape":"WebCrawlerConfiguration"}, + "WorkDocsConfiguration":{ + "shape":"WorkDocsConfiguration", + "documentation":"

Provides the configuration information to connect to WorkDocs as your data source.

" + } }, "documentation":"

Configuration information for a Amazon Kendra data source.

" }, @@ -2076,7 +2080,8 @@ "CUSTOM", "CONFLUENCE", "GOOGLEDRIVE", - "WEBCRAWLER" + "WEBCRAWLER", + "WORKDOCS" ] }, "DataSourceVpcConfiguration":{ @@ -3973,6 +3978,12 @@ "DESCENDING" ] }, + "OrganizationId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"d-[0-9a-fA-F]{10}" + }, "Port":{ "type":"integer", "max":65535, @@ -4968,7 +4979,7 @@ }, "SecretArn":{ "shape":"SecretArn", - "documentation":"

The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager. The credentials should be a user/password pair. If you use SharePoint Sever, you also need to provide the sever domain name as part of the credentials. For more information, see Using a Microsoft SharePoint Data Source. For more information about AWS Secrets Manager, see What Is AWS Secrets Manager in the Secrets Manager user guide.

" + "documentation":"

The Amazon Resource Name (ARN) of credentials stored in Secrets Manager. The credentials should be a user/password pair. If you use SharePoint Server, you also need to provide the sever domain name as part of the credentials. For more information, see Using a Microsoft SharePoint Data Source. For more information about Secrets Manager, see What Is Secrets Manager in the Secrets Manager user guide.

" }, "CrawlAttachments":{ "shape":"Boolean", @@ -5798,6 +5809,37 @@ "SUBDOMAINS", "EVERYTHING" ] + }, + "WorkDocsConfiguration":{ + "type":"structure", + "required":["OrganizationId"], + "members":{ + "OrganizationId":{ + "shape":"OrganizationId", + "documentation":"

The identifier of the directory corresponding to your Amazon WorkDocs site repository.

You can find the organization ID in the AWS Directory Service by going to Active Directory, then Directories. Your Amazon WorkDocs site directory has an ID, which is the organization ID. You can also set up a new Amazon WorkDocs directory in the AWS Directory Service console and enable a Amazon WorkDocs site for the directory in the Amazon WorkDocs console.

" + }, + "CrawlComments":{ + "shape":"Boolean", + "documentation":"

TRUE to include comments on documents in your index. Including comments in your index means each comment is a document that can be searched on.

The default is set to FALSE.

" + }, + "UseChangeLog":{ + "shape":"Boolean", + "documentation":"

TRUE to use the change logs to update documents in your index instead of scanning all documents.

If you are syncing your Amazon WorkDocs data source with your index for the first time, all documents are scanned. After your first sync, you can use the change logs to update your documents in your index for future syncs.

The default is set to FALSE.

" + }, + "InclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

A list of regular expression patterns to include certain files in your Amazon WorkDocs site repository. Files that match the patterns are included in the index. Files that don't match the patterns are excluded from the index. If a file matches both an inclusion pattern and an exclusion pattern, the exclusion pattern takes precedence and the file isn’t included in the index.

" + }, + "ExclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

A list of regular expression patterns to exclude certain files in your Amazon WorkDocs site repository. Files that match the patterns are excluded from the index. Files that don’t match the patterns are included in the index. If a file matches both an inclusion pattern and an exclusion pattern, the exclusion pattern takes precedence and the file isn’t included in the index.

" + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

A list of DataSourceToIndexFieldMapping objects that map Amazon WorkDocs field names to custom index field names in Amazon Kendra. You must first create the custom index fields using the UpdateIndex operation before you map to Amazon WorkDocs fields. For more information, see Mapping Data Source Fields. The Amazon WorkDocs data source field names need to exist in your Amazon WorkDocs custom metadata.

" + } + }, + "documentation":"

Provides the configuration information to connect to Amazon WorkDocs as your data source.

Amazon WorkDocs connector is available in Oregon, North Virginia, Sydney, Singapore and Ireland regions.

" } }, "documentation":"

Amazon Kendra is a service for indexing large document sets.

" diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index ff0201259a52..b634655551b9 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT kinesis AWS Java SDK :: Services :: Amazon Kinesis diff --git a/services/kinesis/src/main/java/software/amazon/awssdk/services/kinesis/KinesisRetryPolicy.java b/services/kinesis/src/main/java/software/amazon/awssdk/services/kinesis/KinesisRetryPolicy.java index 0bbdbc5dcc89..621993a91f4f 100644 --- a/services/kinesis/src/main/java/software/amazon/awssdk/services/kinesis/KinesisRetryPolicy.java +++ b/services/kinesis/src/main/java/software/amazon/awssdk/services/kinesis/KinesisRetryPolicy.java @@ -42,6 +42,7 @@ public static RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { RetryMode retryMode = RetryMode.resolver() .profileFile(() -> config.option(SdkClientOption.PROFILE_FILE)) .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) .resolve(); return AwsRetryPolicy.forRetryMode(retryMode) .toBuilder() diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml index f838b1324cdc..5138e7ad9ce3 100644 --- a/services/kinesisanalytics/pom.xml +++ b/services/kinesisanalytics/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT kinesisanalytics AWS Java SDK :: Services :: Amazon Kinesis Analytics diff --git a/services/kinesisanalyticsv2/pom.xml b/services/kinesisanalyticsv2/pom.xml index 3108f00434c6..44fa7429c614 100644 --- a/services/kinesisanalyticsv2/pom.xml +++ b/services/kinesisanalyticsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT kinesisanalyticsv2 AWS Java SDK :: Services :: Kinesis Analytics V2 diff --git a/services/kinesisvideo/pom.xml b/services/kinesisvideo/pom.xml index 4aa9dccc12a8..8986f9a70d9f 100644 --- a/services/kinesisvideo/pom.xml +++ b/services/kinesisvideo/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 kinesisvideo diff --git a/services/kinesisvideoarchivedmedia/pom.xml b/services/kinesisvideoarchivedmedia/pom.xml index 6d5525b698f6..e97b58c1fcd9 100644 --- a/services/kinesisvideoarchivedmedia/pom.xml +++ b/services/kinesisvideoarchivedmedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT kinesisvideoarchivedmedia AWS Java SDK :: Services :: Kinesis Video Archived Media diff --git a/services/kinesisvideomedia/pom.xml b/services/kinesisvideomedia/pom.xml index f0a5352d857b..02209d97ca23 100644 --- a/services/kinesisvideomedia/pom.xml +++ b/services/kinesisvideomedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT kinesisvideomedia AWS Java SDK :: Services :: Kinesis Video Media diff --git a/services/kinesisvideosignaling/pom.xml b/services/kinesisvideosignaling/pom.xml index 372b9abde078..8fb123ded769 100644 --- a/services/kinesisvideosignaling/pom.xml +++ b/services/kinesisvideosignaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT kinesisvideosignaling AWS Java SDK :: Services :: Kinesis Video Signaling diff --git a/services/kms/pom.xml b/services/kms/pom.xml index 73a88a5b5457..e05e0d6dd486 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT kms AWS Java SDK :: Services :: AWS KMS diff --git a/services/lakeformation/pom.xml b/services/lakeformation/pom.xml index 2ecd7ddda3c3..b7b3bc67c427 100644 --- a/services/lakeformation/pom.xml +++ b/services/lakeformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT lakeformation AWS Java SDK :: Services :: LakeFormation diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index c298ec06e779..45ccbeddf9fe 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT lambda AWS Java SDK :: Services :: AWS Lambda diff --git a/services/lambda/src/main/resources/codegen-resources/service-2.json b/services/lambda/src/main/resources/codegen-resources/service-2.json index 8300445964c4..32ba7897435e 100644 --- a/services/lambda/src/main/resources/codegen-resources/service-2.json +++ b/services/lambda/src/main/resources/codegen-resources/service-2.json @@ -238,7 +238,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} ], "documentation":"

Deletes the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

" }, @@ -835,7 +836,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} ], "documentation":"

Configures options for asynchronous invocation on a function, version, or alias. If a configuration already exists for a function, version, or alias, this operation overwrites it. If you exclude any settings, they are removed. To set one option without affecting existing settings for other options, use UpdateFunctionEventInvokeConfig.

By default, Lambda retries an asynchronous invocation twice if the function returns an error. It retains events in a queue for up to six hours. When an event fails all processing attempts or stays in the asynchronous invocation queue for too long, Lambda discards it. To retain discarded events, configure a dead-letter queue with UpdateFunctionConfiguration.

To send an invocation record to a queue, topic, function, or event bus, specify a destination. You can configure separate destinations for successful invocations (on-success) and events that fail all processing attempts (on-failure). You can configure destinations in addition to or instead of a dead-letter queue.

" }, @@ -1037,7 +1039,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} ], "documentation":"

Updates the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

" } @@ -1537,7 +1540,7 @@ }, "SourceAccessConfigurations":{ "shape":"SourceAccessConfigurations", - "documentation":"

An array of the authentication protocol, or the VPC components to secure your event source.

" + "documentation":"

An array of authentication protocols or VPC components required to secure your event source.

" }, "SelfManagedEventSource":{ "shape":"SelfManagedEventSource", @@ -1994,7 +1997,7 @@ }, "StartingPosition":{ "shape":"EventSourcePosition", - "documentation":"

The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is only supported for Amazon Kinesis streams.

" + "documentation":"

The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK stream sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams.

" }, "StartingPositionTimestamp":{ "shape":"Date", @@ -2006,11 +2009,11 @@ }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

(Streams and SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds. The default value is zero.

" + "documentation":"

(Streams and Amazon SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds. The default value is zero.

" }, "ParallelizationFactor":{ "shape":"ParallelizationFactor", - "documentation":"

(Streams only) The number of batches to process from each shard concurrently. The default value is 1.

" + "documentation":"

(Streams only) The number of batches to process concurrently from each shard. The default value is 1.

" }, "EventSourceArn":{ "shape":"Arn", @@ -2022,11 +2025,11 @@ }, "LastModified":{ "shape":"Date", - "documentation":"

The date that the event source mapping was last updated, or its state changed.

" + "documentation":"

The date that the event source mapping was last updated or that its state changed.

" }, "LastProcessingResult":{ "shape":"String", - "documentation":"

The result of the last Lambda invocation of your Lambda function.

" + "documentation":"

The result of the last Lambda invocation of your function.

" }, "State":{ "shape":"String", @@ -2034,7 +2037,7 @@ }, "StateTransitionReason":{ "shape":"String", - "documentation":"

Indicates whether the last change to the event source mapping was made by a user, or by the Lambda service.

" + "documentation":"

Indicates whether a user or Lambda made the last change to the event source mapping.

" }, "DestinationConfig":{ "shape":"DestinationConfig", @@ -2046,15 +2049,15 @@ }, "Queues":{ "shape":"Queues", - "documentation":"

(MQ) The name of the Amazon MQ broker destination queue to consume.

" + "documentation":"

(Amazon MQ) The name of the Amazon MQ broker destination queue to consume.

" }, "SourceAccessConfigurations":{ "shape":"SourceAccessConfigurations", - "documentation":"

An array of the authentication protocol, or the VPC components to secure your event source.

" + "documentation":"

An array of the authentication protocol, VPC components, or virtual host to secure and define your event source.

" }, "SelfManagedEventSource":{ "shape":"SelfManagedEventSource", - "documentation":"

The Self-Managed Apache Kafka cluster for your event source.

" + "documentation":"

The self-managed Apache Kafka cluster for your event source.

" }, "MaximumRecordAgeInSeconds":{ "shape":"MaximumRecordAgeInSeconds", @@ -2070,14 +2073,14 @@ }, "TumblingWindowInSeconds":{ "shape":"TumblingWindowInSeconds", - "documentation":"

(Streams only) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.

" + "documentation":"

(Streams only) The duration in seconds of a processing window. The range is 1–900 seconds.

" }, "FunctionResponseTypes":{ "shape":"FunctionResponseTypeList", "documentation":"

(Streams only) A list of current response type enums applied to the event source mapping.

" } }, - "documentation":"

A mapping between an Amazon Web Services resource and an Lambda function. See CreateEventSourceMapping for details.

" + "documentation":"

A mapping between an Amazon Web Services resource and a Lambda function. For details, see CreateEventSourceMapping.

" }, "EventSourceMappingsList":{ "type":"list", @@ -3327,7 +3330,7 @@ }, "MaxItems":{ "shape":"MaxListItems", - "documentation":"

The maximum number of event source mappings to return.

", + "documentation":"

The maximum number of event source mappings to return. Note that ListEventSourceMappings returns a maximum of 100 items in each response, even if you set the number higher.

", "location":"querystring", "locationName":"MaxItems" } @@ -4301,7 +4304,7 @@ "documentation":"

The list of bootstrap servers for your Kafka brokers in the following format: \"KAFKA_BOOTSTRAP_SERVERS\": [\"abc.xyz.com:xxxx\",\"abc2.xyz.com:xxxx\"].

" } }, - "documentation":"

The Self-Managed Apache Kafka cluster for your event source.

" + "documentation":"

The self-managed Apache Kafka cluster for your event source.

" }, "SensitiveString":{ "type":"string", @@ -4328,14 +4331,14 @@ "members":{ "Type":{ "shape":"SourceAccessType", - "documentation":"

The type of authentication protocol or the VPC components for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\".

  • BASIC_AUTH - (MQ) The Secrets Manager secret that stores your broker credentials.

  • VPC_SUBNET - The subnets associated with your VPC. Lambda connects to these subnets to fetch data from your Self-Managed Apache Kafka cluster.

  • VPC_SECURITY_GROUP - The VPC security group used to manage access to your Self-Managed Apache Kafka brokers.

  • SASL_SCRAM_256_AUTH - The Secrets Manager ARN of your secret key used for SASL SCRAM-256 authentication of your Self-Managed Apache Kafka brokers.

  • SASL_SCRAM_512_AUTH - The Secrets Manager ARN of your secret key used for SASL SCRAM-512 authentication of your Self-Managed Apache Kafka brokers.

  • VIRTUAL_HOST - The name of the virtual host in your RabbitMQ broker. Lambda will use this host as the event source.

" + "documentation":"

The type of authentication protocol, VPC components, or virtual host for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\".

  • BASIC_AUTH - (Amazon MQ) The Secrets Manager secret that stores your broker credentials.

  • BASIC_AUTH - (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL/PLAIN authentication of your Apache Kafka brokers.

  • VPC_SUBNET - The subnets associated with your VPC. Lambda connects to these subnets to fetch data from your self-managed Apache Kafka cluster.

  • VPC_SECURITY_GROUP - The VPC security group used to manage access to your self-managed Apache Kafka brokers.

  • SASL_SCRAM_256_AUTH - The Secrets Manager ARN of your secret key used for SASL SCRAM-256 authentication of your self-managed Apache Kafka brokers.

  • SASL_SCRAM_512_AUTH - The Secrets Manager ARN of your secret key used for SASL SCRAM-512 authentication of your self-managed Apache Kafka brokers.

  • VIRTUAL_HOST - (Amazon MQ) The name of the virtual host in your RabbitMQ broker. Lambda uses this RabbitMQ host as the event source.

" }, "URI":{ "shape":"URI", "documentation":"

The value for your chosen configuration in Type. For example: \"URI\": \"arn:aws:secretsmanager:us-east-1:01234567890:secret:MyBrokerSecretName\".

" } }, - "documentation":"

You can specify the authentication protocol, or the VPC components to secure access to your event source.

" + "documentation":"

To secure and define access to your event source, you can specify the authentication protocol, VPC components, or virtual host.

" }, "SourceAccessConfigurations":{ "type":"list", @@ -4681,7 +4684,7 @@ }, "SourceAccessConfigurations":{ "shape":"SourceAccessConfigurations", - "documentation":"

An array of the authentication protocol, or the VPC components to secure your event source.

" + "documentation":"

An array of authentication protocols or VPC components required to secure your event source.

" }, "TumblingWindowInSeconds":{ "shape":"TumblingWindowInSeconds", diff --git a/services/lexmodelbuilding/pom.xml b/services/lexmodelbuilding/pom.xml index b39e3fd371bd..916d84b492b2 100644 --- a/services/lexmodelbuilding/pom.xml +++ b/services/lexmodelbuilding/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT lexmodelbuilding AWS Java SDK :: Services :: Amazon Lex Model Building diff --git a/services/lexmodelbuilding/src/main/resources/codegen-resources/paginators-1.json b/services/lexmodelbuilding/src/main/resources/codegen-resources/paginators-1.json index 57cb7bf24c9e..a101307850da 100644 --- a/services/lexmodelbuilding/src/main/resources/codegen-resources/paginators-1.json +++ b/services/lexmodelbuilding/src/main/resources/codegen-resources/paginators-1.json @@ -40,6 +40,11 @@ "output_token": "nextToken", "limit_key": "maxResults" }, + "GetMigrations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, "GetSlotTypeVersions": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/lexmodelbuilding/src/main/resources/codegen-resources/service-2.json b/services/lexmodelbuilding/src/main/resources/codegen-resources/service-2.json index 260f5d8e32a1..b487d955eb9b 100644 --- a/services/lexmodelbuilding/src/main/resources/codegen-resources/service-2.json +++ b/services/lexmodelbuilding/src/main/resources/codegen-resources/service-2.json @@ -479,6 +479,39 @@ ], "documentation":"

Returns intent information as follows:

  • If you specify the nameContains field, returns the $LATEST version of all intents that contain the specified string.

  • If you don't specify the nameContains field, returns information about the $LATEST version of all intents.

The operation requires permission for the lex:GetIntents action.

" }, + "GetMigration":{ + "name":"GetMigration", + "http":{ + "method":"GET", + "requestUri":"/migrations/{migrationId}", + "responseCode":200 + }, + "input":{"shape":"GetMigrationRequest"}, + "output":{"shape":"GetMigrationResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Provides details about an ongoing or complete migration from an Amazon Lex V1 bot to an Amazon Lex V2 bot. Use this operation to view the migration alerts and warnings related to the migration.

" + }, + "GetMigrations":{ + "name":"GetMigrations", + "http":{ + "method":"GET", + "requestUri":"/migrations", + "responseCode":200 + }, + "input":{"shape":"GetMigrationsRequest"}, + "output":{"shape":"GetMigrationsResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Gets a list of migrations between Amazon Lex V1 and Amazon Lex V2.

" + }, "GetSlotType":{ "name":"GetSlotType", "http":{ @@ -544,7 +577,7 @@ {"shape":"InternalFailureException"}, {"shape":"BadRequestException"} ], - "documentation":"

Use the GetUtterancesView operation to get information about the utterances that your users have made to your bot. You can use this list to tune the utterances that your bot responds to.

For example, say that you have created a bot to order flowers. After your users have used your bot for a while, use the GetUtterancesView operation to see the requests that they have made and whether they have been successful. You might find that the utterance \"I want flowers\" is not being recognized. You could add this utterance to the OrderFlowers intent so that your bot recognizes that utterance.

After you publish a new version of a bot, you can get information about the old version and the new so that you can compare the performance across the two versions.

Utterance statistics are generated once a day. Data is available for the last 15 days. You can request information for up to 5 versions of your bot in each request. Amazon Lex returns the most frequent utterances received by the bot in the last 15 days. The response contains information about a maximum of 100 utterances for each version.

If you set childDirected field to true when you created your bot, or if you opted out of participating in improving Amazon Lex, utterances are not available.

This operation requires permissions for the lex:GetUtterancesView action.

" + "documentation":"

Use the GetUtterancesView operation to get information about the utterances that your users have made to your bot. You can use this list to tune the utterances that your bot responds to.

For example, say that you have created a bot to order flowers. After your users have used your bot for a while, use the GetUtterancesView operation to see the requests that they have made and whether they have been successful. You might find that the utterance \"I want flowers\" is not being recognized. You could add this utterance to the OrderFlowers intent so that your bot recognizes that utterance.

After you publish a new version of a bot, you can get information about the old version and the new so that you can compare the performance across the two versions.

Utterance statistics are generated once a day. Data is available for the last 15 days. You can request information for up to 5 versions of your bot in each request. Amazon Lex returns the most frequent utterances received by the bot in the last 15 days. The response contains information about a maximum of 100 utterances for each version.

If you set childDirected field to true when you created your bot, if you are using slot obfuscation with one or more slots, or if you opted out of participating in improving Amazon Lex, utterances are not available.

This operation requires permissions for the lex:GetUtterancesView action.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -651,6 +684,24 @@ ], "documentation":"

Starts a job to import a resource to Amazon Lex.

" }, + "StartMigration":{ + "name":"StartMigration", + "http":{ + "method":"POST", + "requestUri":"/migrations", + "responseCode":202 + }, + "input":{"shape":"StartMigrationRequest"}, + "output":{"shape":"StartMigrationResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Starts migrating a bot from Amazon Lex V1 to Amazon Lex V2. Migrate your bot when you want to take advantage of the new features of Amazon Lex V2.

For more information, see Migrating a bot in the Amazon Lex developer guide.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -689,6 +740,15 @@ } }, "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Your IAM user or role does not have permission to call the Amazon Lex V2 APIs required to migrate your bot.

", + "error":{"httpStatusCode":403}, + "exception":true + }, "AliasName":{ "type":"string", "max":100, @@ -2318,6 +2378,117 @@ } } }, + "GetMigrationRequest":{ + "type":"structure", + "required":["migrationId"], + "members":{ + "migrationId":{ + "shape":"MigrationId", + "documentation":"

The unique identifier of the migration to view. The migrationID is returned by the operation.

", + "location":"uri", + "locationName":"migrationId" + } + } + }, + "GetMigrationResponse":{ + "type":"structure", + "members":{ + "migrationId":{ + "shape":"MigrationId", + "documentation":"

The unique identifier of the migration. This is the same as the identifier used when calling the GetMigration operation.

" + }, + "v1BotName":{ + "shape":"BotName", + "documentation":"

The name of the Amazon Lex V1 bot migrated to Amazon Lex V2.

" + }, + "v1BotVersion":{ + "shape":"Version", + "documentation":"

The version of the Amazon Lex V1 bot migrated to Amazon Lex V2.

" + }, + "v1BotLocale":{ + "shape":"Locale", + "documentation":"

The locale of the Amazon Lex V1 bot migrated to Amazon Lex V2.

" + }, + "v2BotId":{ + "shape":"V2BotId", + "documentation":"

The unique identifier of the Amazon Lex V2 bot that the Amazon Lex V1 is being migrated to.

" + }, + "v2BotRole":{ + "shape":"IamRoleArn", + "documentation":"

The IAM role that Amazon Lex uses to run the Amazon Lex V2 bot.

" + }, + "migrationStatus":{ + "shape":"MigrationStatus", + "documentation":"

Indicates the status of the migration. When the status is COMPLETE the migration is finished and the bot is available in Amazon Lex V2. There may be alerts and warnings that need to be resolved to complete the migration.

" + }, + "migrationStrategy":{ + "shape":"MigrationStrategy", + "documentation":"

The strategy used to conduct the migration.

  • CREATE_NEW - Creates a new Amazon Lex V2 bot and migrates the Amazon Lex V1 bot to the new bot.

  • UPDATE_EXISTING - Overwrites the existing Amazon Lex V2 bot metadata and the locale being migrated. It doesn't change any other locales in the Amazon Lex V2 bot. If the locale doesn't exist, a new locale is created in the Amazon Lex V2 bot.

" + }, + "migrationTimestamp":{ + "shape":"Timestamp", + "documentation":"

The date and time that the migration started.

" + }, + "alerts":{ + "shape":"MigrationAlerts", + "documentation":"

A list of alerts and warnings that indicate issues with the migration for the Amazon Lex V1 bot to Amazon Lex V2. You receive a warning when an Amazon Lex V1 feature has a different implementation if Amazon Lex V2.

For more information, see Migrating a bot in the Amazon Lex V2 developer guide.

" + } + } + }, + "GetMigrationsRequest":{ + "type":"structure", + "members":{ + "sortByAttribute":{ + "shape":"MigrationSortAttribute", + "documentation":"

The field to sort the list of migrations by. You can sort by the Amazon Lex V1 bot name or the date and time that the migration was started.

", + "location":"querystring", + "locationName":"sortByAttribute" + }, + "sortByOrder":{ + "shape":"SortOrder", + "documentation":"

The order so sort the list.

", + "location":"querystring", + "locationName":"sortByOrder" + }, + "v1BotNameContains":{ + "shape":"BotName", + "documentation":"

Filters the list to contain only bots whose name contains the specified string. The string is matched anywhere in bot name.

", + "location":"querystring", + "locationName":"v1BotNameContains" + }, + "migrationStatusEquals":{ + "shape":"MigrationStatus", + "documentation":"

Filters the list to contain only migrations in the specified state.

", + "location":"querystring", + "locationName":"migrationStatusEquals" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of migrations to return in the response. The default is 10.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that fetches the next page of migrations. If the response to this operation is truncated, Amazon Lex returns a pagination token in the response. To fetch the next page of migrations, specify the pagination token in the request.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "GetMigrationsResponse":{ + "type":"structure", + "members":{ + "migrationSummaries":{ + "shape":"MigrationSummaryList", + "documentation":"

An array of summaries for migrations from Amazon Lex V1 to Amazon Lex V2. To see details of the migration, use the migrationId from the summary in a call to the operation.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If the response is truncated, it includes a pagination token that you can specify in your next request to fetch the next page of migrations.

" + } + } + }, "GetSlotTypeRequest":{ "type":"structure", "required":[ @@ -2653,7 +2824,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:aws:lambda:[a-z]+-[a-z]+-[0-9]:[0-9]{12}:function:[a-zA-Z0-9-_]+(/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})?(:[a-zA-Z0-9-_]+)?" + "pattern":"arn:aws[a-zA-Z-]*:lambda:[a-z]+-[a-z]+(-[a-z]+)*-[0-9]:[0-9]{12}:function:[a-zA-Z0-9-_]+(\\/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})?(:[a-zA-Z0-9-_]+)?" }, "LimitExceededException":{ "type":"structure", @@ -2704,6 +2875,7 @@ "de-DE", "en-AU", "en-GB", + "en-IN", "en-US", "es-419", "es-ES", @@ -2832,6 +3004,124 @@ "max":5, "min":1 }, + "MigrationAlert":{ + "type":"structure", + "members":{ + "type":{ + "shape":"MigrationAlertType", + "documentation":"

The type of alert. There are two kinds of alerts:

  • ERROR - There was an issue with the migration that can't be resolved. The migration stops.

  • WARN - There was an issue with the migration that requires manual changes to the new Amazon Lex V2 bot. The migration continues.

" + }, + "message":{ + "shape":"MigrationAlertMessage", + "documentation":"

A message that describes why the alert was issued.

" + }, + "details":{ + "shape":"MigrationAlertDetails", + "documentation":"

Additional details about the alert.

" + }, + "referenceURLs":{ + "shape":"MigrationAlertReferenceURLs", + "documentation":"

A link to the Amazon Lex documentation that describes how to resolve the alert.

" + } + }, + "documentation":"

Provides information about alerts and warnings that Amazon Lex sends during a migration. The alerts include information about how to resolve the issue.

" + }, + "MigrationAlertDetail":{"type":"string"}, + "MigrationAlertDetails":{ + "type":"list", + "member":{"shape":"MigrationAlertDetail"} + }, + "MigrationAlertMessage":{"type":"string"}, + "MigrationAlertReferenceURL":{"type":"string"}, + "MigrationAlertReferenceURLs":{ + "type":"list", + "member":{"shape":"MigrationAlertReferenceURL"} + }, + "MigrationAlertType":{ + "type":"string", + "enum":[ + "ERROR", + "WARN" + ] + }, + "MigrationAlerts":{ + "type":"list", + "member":{"shape":"MigrationAlert"} + }, + "MigrationId":{ + "type":"string", + "max":10, + "min":10, + "pattern":"^[0-9a-zA-Z]+$" + }, + "MigrationSortAttribute":{ + "type":"string", + "enum":[ + "V1_BOT_NAME", + "MIGRATION_DATE_TIME" + ] + }, + "MigrationStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "COMPLETED", + "FAILED" + ] + }, + "MigrationStrategy":{ + "type":"string", + "enum":[ + "CREATE_NEW", + "UPDATE_EXISTING" + ] + }, + "MigrationSummary":{ + "type":"structure", + "members":{ + "migrationId":{ + "shape":"MigrationId", + "documentation":"

The unique identifier that Amazon Lex assigned to the migration.

" + }, + "v1BotName":{ + "shape":"BotName", + "documentation":"

The name of the Amazon Lex V1 bot that is the source of the migration.

" + }, + "v1BotVersion":{ + "shape":"Version", + "documentation":"

The version of the Amazon Lex V1 bot that is the source of the migration.

" + }, + "v1BotLocale":{ + "shape":"Locale", + "documentation":"

The locale of the Amazon Lex V1 bot that is the source of the migration.

" + }, + "v2BotId":{ + "shape":"V2BotId", + "documentation":"

The unique identifier of the Amazon Lex V2 that is the destination of the migration.

" + }, + "v2BotRole":{ + "shape":"IamRoleArn", + "documentation":"

The IAM role that Amazon Lex uses to run the Amazon Lex V2 bot.

" + }, + "migrationStatus":{ + "shape":"MigrationStatus", + "documentation":"

The status of the operation. When the status is COMPLETE the bot is available in Amazon Lex V2. There may be alerts and warnings that need to be resolved to complete the migration.

" + }, + "migrationStrategy":{ + "shape":"MigrationStrategy", + "documentation":"

The strategy used to conduct the migration.

" + }, + "migrationTimestamp":{ + "shape":"Timestamp", + "documentation":"

The date and time that the migration started.

" + } + }, + "documentation":"

Provides information about migrating a bot from Amazon Lex V1 to Amazon Lex V2.

" + }, + "MigrationSummaryList":{ + "type":"list", + "member":{"shape":"MigrationSummary"} + }, "Name":{ "type":"string", "max":100, @@ -3682,6 +3972,13 @@ "TOP_RESOLUTION" ] }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, "StartImportRequest":{ "type":"structure", "required":[ @@ -3741,6 +4038,75 @@ } } }, + "StartMigrationRequest":{ + "type":"structure", + "required":[ + "v1BotName", + "v1BotVersion", + "v2BotName", + "v2BotRole", + "migrationStrategy" + ], + "members":{ + "v1BotName":{ + "shape":"BotName", + "documentation":"

The name of the Amazon Lex V1 bot that you are migrating to Amazon Lex V2.

" + }, + "v1BotVersion":{ + "shape":"Version", + "documentation":"

The version of the bot to migrate to Amazon Lex V2. You can migrate the $LATEST version as well as any numbered version.

" + }, + "v2BotName":{ + "shape":"V2BotName", + "documentation":"

The name of the Amazon Lex V2 bot that you are migrating the Amazon Lex V1 bot to.

  • If the Amazon Lex V2 bot doesn't exist, you must use the CREATE_NEW migration strategy.

  • If the Amazon Lex V2 bot exists, you must use the UPDATE_EXISTING migration strategy to change the contents of the Amazon Lex V2 bot.

" + }, + "v2BotRole":{ + "shape":"IamRoleArn", + "documentation":"

The IAM role that Amazon Lex uses to run the Amazon Lex V2 bot.

" + }, + "migrationStrategy":{ + "shape":"MigrationStrategy", + "documentation":"

The strategy used to conduct the migration.

  • CREATE_NEW - Creates a new Amazon Lex V2 bot and migrates the Amazon Lex V1 bot to the new bot.

  • UPDATE_EXISTING - Overwrites the existing Amazon Lex V2 bot metadata and the locale being migrated. It doesn't change any other locales in the Amazon Lex V2 bot. If the locale doesn't exist, a new locale is created in the Amazon Lex V2 bot.

" + } + } + }, + "StartMigrationResponse":{ + "type":"structure", + "members":{ + "v1BotName":{ + "shape":"BotName", + "documentation":"

The name of the Amazon Lex V1 bot that you are migrating to Amazon Lex V2.

" + }, + "v1BotVersion":{ + "shape":"Version", + "documentation":"

The version of the bot to migrate to Amazon Lex V2.

" + }, + "v1BotLocale":{ + "shape":"Locale", + "documentation":"

The locale used for the Amazon Lex V1 bot.

" + }, + "v2BotId":{ + "shape":"V2BotId", + "documentation":"

The unique identifier for the Amazon Lex V2 bot.

" + }, + "v2BotRole":{ + "shape":"IamRoleArn", + "documentation":"

The IAM role that Amazon Lex uses to run the Amazon Lex V2 bot.

" + }, + "migrationId":{ + "shape":"MigrationId", + "documentation":"

The unique identifier that Amazon Lex assigned to the migration.

" + }, + "migrationStrategy":{ + "shape":"MigrationStrategy", + "documentation":"

The strategy used to conduct the migration.

" + }, + "migrationTimestamp":{ + "shape":"Timestamp", + "documentation":"

The date and time that the migration started.

" + } + } + }, "Statement":{ "type":"structure", "required":["messages"], @@ -3928,6 +4294,18 @@ "max":2000, "min":1 }, + "V2BotId":{ + "type":"string", + "max":10, + "min":10, + "pattern":"^[0-9a-zA-Z]+$" + }, + "V2BotName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^([0-9a-zA-Z][_-]?)+$" + }, "Value":{ "type":"string", "max":140, diff --git a/services/lexmodelsv2/pom.xml b/services/lexmodelsv2/pom.xml index 4defb05a0b72..7810d23311d6 100644 --- a/services/lexmodelsv2/pom.xml +++ b/services/lexmodelsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT lexmodelsv2 AWS Java SDK :: Services :: Lex Models V2 diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json b/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json index dbef22d84109..10ac14ce5e42 100644 --- a/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json @@ -1186,7 +1186,7 @@ "documentation":"

The version of the bot that was exported. This will be either DRAFT or the version number.

" } }, - "documentation":"

Provided the identity of a the bot that was exported.

" + "documentation":"

Provides the identity of a the bot that was exported.

" }, "BotFilter":{ "type":"structure", @@ -4192,6 +4192,10 @@ "closingResponse":{ "shape":"ResponseSpecification", "documentation":"

The response that Amazon Lex sends to the user when the intent is complete.

" + }, + "active":{ + "shape":"BoxedBoolean", + "documentation":"

Specifies whether an intent's closing response is used. When this field is false, the closing response isn't sent to the user and no closing input from the user is used. If the active field isn't specified, the default is true.

" } }, "documentation":"

Provides a statement the Amazon Lex conveys to the user when the intent is successfully fulfilled.

" @@ -4210,6 +4214,10 @@ "declinationResponse":{ "shape":"ResponseSpecification", "documentation":"

When the user answers \"no\" to the question defined in promptSpecification, Amazon Lex responds with this response to acknowledge that the intent was canceled.

" + }, + "active":{ + "shape":"BoxedBoolean", + "documentation":"

Specifies whether the intent's confirmation is sent to the user. When this field is false, confirmation and declination responses aren't sent and processing continues as if the responses aren't present. If the active field isn't specified, the default is true.

" } }, "documentation":"

Provides a prompt for making sure that the user is ready for the intent to be fulfilled.

" @@ -5182,7 +5190,7 @@ }, "PriorityValue":{ "type":"integer", - "max":25, + "max":100, "min":0 }, "PromptMaxRetries":{ @@ -6700,6 +6708,10 @@ "stillWaitingResponse":{ "shape":"StillWaitingResponseSpecification", "documentation":"

A response that Amazon Lex sends periodically to the user to indicate that the bot is still waiting for input from the user.

" + }, + "active":{ + "shape":"BoxedBoolean", + "documentation":"

Specifies whether the bot will wait for a user to respond. When this field is false, wait and continue responses for a slot aren't used and the bot expects an appropriate response within the configured timeout. If the active field isn't specified, the default is true.

" } }, "documentation":"

Specifies the prompts that Amazon Lex uses while a bot is waiting for customer input.

" diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/waiters-2.json b/services/lexmodelsv2/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..1ec96048eca8 --- /dev/null +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,255 @@ +{ + "version":2, + "waiters":{ + "BotAvailable":{ + "delay":10, + "operation":"DescribeBot", + "maxAttempts":35, + "description":"Wait until a bot is available", + "acceptors":[ + { + "expected":"Available", + "matcher":"path", + "state":"success", + "argument":"botStatus" + }, + { + "expected":"Deleting", + "matcher":"path", + "state":"failure", + "argument":"botStatus" + }, + { + "expected":"Failed", + "matcher":"path", + "state":"failure", + "argument":"botStatus" + }, + { + "expected":"Inactive", + "matcher":"path", + "state":"failure", + "argument":"botStatus" + } + ] + }, + "BotAliasAvailable":{ + "delay":10, + "operation":"DescribeBotAlias", + "maxAttempts":35, + "description":"Wait until a bot alias is available", + "acceptors":[ + { + "expected":"Available", + "matcher":"path", + "state":"success", + "argument":"botAliasStatus" + }, + { + "expected":"Failed", + "matcher":"path", + "state":"failure", + "argument":"botAliasStatus" + }, + { + "expected":"Deleting", + "matcher":"path", + "state":"failure", + "argument":"botAliasStatus" + } + ] + }, + "BotExportCompleted":{ + "delay":10, + "operation":"DescribeExport", + "maxAttempts":35, + "description":"Wait until a bot has been exported", + "acceptors":[ + { + "expected":"Completed", + "matcher":"path", + "state":"success", + "argument":"exportStatus" + }, + { + "expected":"Deleting", + "matcher":"path", + "state":"failure", + "argument":"exportStatus" + }, + { + "expected":"Failed", + "matcher":"path", + "state":"failure", + "argument":"exportStatus" + } + ] + }, + "BotImportCompleted":{ + "delay":10, + "operation":"DescribeImport", + "maxAttempts":35, + "description":"Wait until a bot has been imported", + "acceptors":[ + { + "expected":"Completed", + "matcher":"path", + "state":"success", + "argument":"importStatus" + }, + { + "expected":"Deleting", + "matcher":"path", + "state":"failure", + "argument":"importStatus" + }, + { + "expected":"Failed", + "matcher":"path", + "state":"failure", + "argument":"importStatus" + } + ] + }, + "BotLocaleBuilt":{ + "delay":10, + "operation":"DescribeBotLocale", + "maxAttempts":35, + "description":"Wait until a bot locale is built", + "acceptors":[ + { + "expected":"Built", + "matcher":"path", + "state":"success", + "argument":"botLocaleStatus" + }, + { + "expected":"Deleting", + "matcher":"path", + "state":"failure", + "argument":"botLocaleStatus" + }, + { + "expected":"Failed", + "matcher":"path", + "state":"failure", + "argument":"botLocaleStatus" + }, + { + "expected":"NotBuilt", + "matcher":"path", + "state":"failure", + "argument":"botLocaleStatus" + } + ] + }, + "BotLocaleExpressTestingAvailable":{ + "delay":10, + "operation":"DescribeBotLocale", + "maxAttempts":35, + "description":"Wait until a bot locale build is ready for express testing", + "acceptors":[ + { + "expected":"Built", + "matcher":"path", + "state":"success", + "argument":"botLocaleStatus" + }, + { + "expected":"ReadyExpressTesting", + "matcher":"path", + "state":"success", + "argument":"botLocaleStatus" + }, + { + "expected":"Deleting", + "matcher":"path", + "state":"failure", + "argument":"botLocaleStatus" + }, + { + "expected":"Failed", + "matcher":"path", + "state":"failure", + "argument":"botLocaleStatus" + }, + { + "expected":"NotBuilt", + "matcher":"path", + "state":"failure", + "argument":"botLocaleStatus" + } + ] + }, + "BotVersionAvailable":{ + "delay":10, + "operation":"DescribeBotVersion", + "maxAttempts":35, + "description":"Wait until a bot version is available", + "acceptors":[ + { + "expected":"Available", + "matcher":"path", + "state":"success", + "argument":"botStatus" + }, + { + "expected":"Deleting", + "matcher":"path", + "state":"failure", + "argument":"botStatus" + }, + { + "expected":"Failed", + "matcher":"path", + "state":"failure", + "argument":"botStatus" + }, + { + "state":"retry", + "matcher":"status", + "expected":404 + } + ] + }, + "BotLocaleCreated":{ + "delay":10, + "operation":"DescribeBotLocale", + "maxAttempts":35, + "description":"Wait unit a bot locale is created", + "acceptors":[ + { + "expected":"Built", + "matcher":"path", + "state":"success", + "argument":"botLocaleStatus" + }, + { + "expected":"ReadyExpressTesting", + "matcher":"path", + "state":"success", + "argument":"botLocaleStatus" + }, + { + "expected":"NotBuilt", + "matcher":"path", + "state":"success", + "argument":"botLocaleStatus" + }, + { + "expected":"Deleting", + "matcher":"path", + "state":"failure", + "argument":"botLocaleStatus" + }, + { + "expected":"Failed", + "matcher":"path", + "state":"failure", + "argument":"botLocaleStatus" + } + ] + } + } +} + diff --git a/services/lexruntime/pom.xml b/services/lexruntime/pom.xml index f40c7227f337..0247202fbc11 100644 --- a/services/lexruntime/pom.xml +++ b/services/lexruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT lexruntime AWS Java SDK :: Services :: Amazon Lex Runtime diff --git a/services/lexruntimev2/pom.xml b/services/lexruntimev2/pom.xml index b4ea018b67a1..a7e472b010cd 100644 --- a/services/lexruntimev2/pom.xml +++ b/services/lexruntimev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT lexruntimev2 AWS Java SDK :: Services :: Lex Runtime V2 diff --git a/services/licensemanager/pom.xml b/services/licensemanager/pom.xml index c71750ea43f4..1d791d47ca41 100644 --- a/services/licensemanager/pom.xml +++ b/services/licensemanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT licensemanager AWS Java SDK :: Services :: License Manager diff --git a/services/lightsail/pom.xml b/services/lightsail/pom.xml index 6e6399d95ee8..397de2901181 100644 --- a/services/lightsail/pom.xml +++ b/services/lightsail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT lightsail AWS Java SDK :: Services :: Amazon Lightsail diff --git a/services/lightsail/src/main/resources/codegen-resources/service-2.json b/services/lightsail/src/main/resources/codegen-resources/service-2.json index 18ed0b0d0aa3..f84f4d4aa015 100644 --- a/services/lightsail/src/main/resources/codegen-resources/service-2.json +++ b/services/lightsail/src/main/resources/codegen-resources/service-2.json @@ -66,7 +66,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Attaches a block storage disk to a running or stopped Lightsail instance and exposes it to the instance with the specified disk name.

The attach disk operation supports tag-based access control via resource tags applied to the resource identified by disk name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Attaches a block storage disk to a running or stopped Lightsail instance and exposes it to the instance with the specified disk name.

The attach disk operation supports tag-based access control via resource tags applied to the resource identified by disk name. For more information, see the Amazon Lightsail Developer Guide.

" }, "AttachInstancesToLoadBalancer":{ "name":"AttachInstancesToLoadBalancer", @@ -85,7 +85,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Attaches one or more Lightsail instances to a load balancer.

After some time, the instances are attached to the load balancer and the health check status is available.

The attach instances to load balancer operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Attaches one or more Lightsail instances to a load balancer.

After some time, the instances are attached to the load balancer and the health check status is available.

The attach instances to load balancer operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Developer Guide.

" }, "AttachLoadBalancerTlsCertificate":{ "name":"AttachLoadBalancerTlsCertificate", @@ -104,7 +104,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Attaches a Transport Layer Security (TLS) certificate to your load balancer. TLS is just an updated, more secure version of Secure Socket Layer (SSL).

Once you create and validate your certificate, you can attach it to your load balancer. You can also use this API to rotate the certificates on your account. Use the AttachLoadBalancerTlsCertificate action with the non-attached certificate, and it will replace the existing one and become the attached certificate.

The AttachLoadBalancerTlsCertificate operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Attaches a Transport Layer Security (TLS) certificate to your load balancer. TLS is just an updated, more secure version of Secure Socket Layer (SSL).

Once you create and validate your certificate, you can attach it to your load balancer. You can also use this API to rotate the certificates on your account. Use the AttachLoadBalancerTlsCertificate action with the non-attached certificate, and it will replace the existing one and become the attached certificate.

The AttachLoadBalancerTlsCertificate operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Amazon Lightsail Developer Guide.

" }, "AttachStaticIp":{ "name":"AttachStaticIp", @@ -142,7 +142,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Closes ports for a specific Amazon Lightsail instance.

The CloseInstancePublicPorts action supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Closes ports for a specific Amazon Lightsail instance.

The CloseInstancePublicPorts action supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Amazon Lightsail Developer Guide.

" }, "CopySnapshot":{ "name":"CopySnapshot", @@ -163,6 +163,39 @@ ], "documentation":"

Copies a manual snapshot of an instance or disk as another manual snapshot, or copies an automatic snapshot of an instance or disk as a manual snapshot. This operation can also be used to copy a manual or automatic snapshot of an instance or a disk from one AWS Region to another in Amazon Lightsail.

When copying a manual snapshot, be sure to define the source region, source snapshot name, and target snapshot name parameters.

When copying an automatic snapshot, be sure to define the source region, source resource name, target snapshot name, and either the restore date or the use latest restorable auto snapshot parameters.

" }, + "CreateBucket":{ + "name":"CreateBucket", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateBucketRequest"}, + "output":{"shape":"CreateBucketResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Creates an Amazon Lightsail bucket.

A bucket is a cloud storage resource available in the Lightsail object storage service. Use buckets to store objects such as data and its descriptive metadata. For more information about buckets, see Buckets in Amazon Lightsail in the Amazon Lightsail Developer Guide.

" + }, + "CreateBucketAccessKey":{ + "name":"CreateBucketAccessKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateBucketAccessKeyRequest"}, + "output":{"shape":"CreateBucketAccessKeyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"NotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Creates a new access key for the specified Amazon Lightsail bucket. Access keys consist of an access key ID and corresponding secret access key.

Access keys grant full programmatic access to the specified bucket and its objects. You can have a maximum of two access keys per bucket. Use the GetBucketAccessKeys action to get a list of current access keys for a specific bucket. For more information about access keys, see Creating access keys for a bucket in Amazon Lightsail in the Amazon Lightsail Developer Guide.

The secretAccessKey value is returned only in response to the CreateBucketAccessKey action. You can get a secret access key only when you first create an access key; you cannot get the secret access key later. If you lose the secret access key, you must create a new access key.

" + }, "CreateCertificate":{ "name":"CreateCertificate", "http":{ @@ -249,7 +282,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a deployment for your Amazon Lightsail container service.

A deployment specifies the containers that will be launched on the container service and their settings, such as the ports to open, the environment variables to apply, and the launch command to run. It also specifies the container that will serve as the public endpoint of the deployment and its settings, such as the HTTP or HTTPS port to use, and the health check configuration.

You can deploy containers to your container service using container images from a public registry like Docker Hub, or from your local machine. For more information, see Creating container images for your Amazon Lightsail container services in the Lightsail Dev Guide.

" + "documentation":"

Creates a deployment for your Amazon Lightsail container service.

A deployment specifies the containers that will be launched on the container service and their settings, such as the ports to open, the environment variables to apply, and the launch command to run. It also specifies the container that will serve as the public endpoint of the deployment and its settings, such as the HTTP or HTTPS port to use, and the health check configuration.

You can deploy containers to your container service using container images from a public registry like Docker Hub, or from your local machine. For more information, see Creating container images for your Amazon Lightsail container services in the Amazon Lightsail Developer Guide.

" }, "CreateContainerServiceRegistryLogin":{ "name":"CreateContainerServiceRegistryLogin", @@ -266,7 +299,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a temporary set of log in credentials that you can use to log in to the Docker process on your local machine. After you're logged in, you can use the native Docker commands to push your local container images to the container image registry of your Amazon Lightsail account so that you can use them with your Lightsail container service. The log in credentials expire 12 hours after they are created, at which point you will need to create a new set of log in credentials.

You can only push container images to the container service registry of your Lightsail account. You cannot pull container images or perform any other container image management actions on the container service registry.

After you push your container images to the container image registry of your Lightsail account, use the RegisterContainerImage action to register the pushed images to a specific Lightsail container service.

This action is not required if you install and use the Lightsail Control (lightsailctl) plugin to push container images to your Lightsail container service. For more information, see Pushing and managing container images on your Amazon Lightsail container services in the Lightsail Dev Guide.

" + "documentation":"

Creates a temporary set of log in credentials that you can use to log in to the Docker process on your local machine. After you're logged in, you can use the native Docker commands to push your local container images to the container image registry of your Amazon Lightsail account so that you can use them with your Lightsail container service. The log in credentials expire 12 hours after they are created, at which point you will need to create a new set of log in credentials.

You can only push container images to the container service registry of your Lightsail account. You cannot pull container images or perform any other container image management actions on the container service registry.

After you push your container images to the container image registry of your Lightsail account, use the RegisterContainerImage action to register the pushed images to a specific Lightsail container service.

This action is not required if you install and use the Lightsail Control (lightsailctl) plugin to push container images to your Lightsail container service. For more information, see Pushing and managing container images on your Amazon Lightsail container services in the Amazon Lightsail Developer Guide.

" }, "CreateDisk":{ "name":"CreateDisk", @@ -285,7 +318,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a block storage disk that can be attached to an Amazon Lightsail instance in the same Availability Zone (e.g., us-east-2a).

The create disk operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a block storage disk that can be attached to an Amazon Lightsail instance in the same Availability Zone (e.g., us-east-2a).

The create disk operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.

" }, "CreateDiskFromSnapshot":{ "name":"CreateDiskFromSnapshot", @@ -304,7 +337,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a block storage disk from a manual or automatic snapshot of a disk. The resulting disk can be attached to an Amazon Lightsail instance in the same Availability Zone (e.g., us-east-2a).

The create disk from snapshot operation supports tag-based access control via request tags and resource tags applied to the resource identified by disk snapshot name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a block storage disk from a manual or automatic snapshot of a disk. The resulting disk can be attached to an Amazon Lightsail instance in the same Availability Zone (e.g., us-east-2a).

The create disk from snapshot operation supports tag-based access control via request tags and resource tags applied to the resource identified by disk snapshot name. For more information, see the Amazon Lightsail Developer Guide.

" }, "CreateDiskSnapshot":{ "name":"CreateDiskSnapshot", @@ -323,7 +356,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a snapshot of a block storage disk. You can use snapshots for backups, to make copies of disks, and to save data before shutting down a Lightsail instance.

You can take a snapshot of an attached disk that is in use; however, snapshots only capture data that has been written to your disk at the time the snapshot command is issued. This may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the disk long enough to take a snapshot, your snapshot should be complete. Nevertheless, if you cannot pause all file writes to the disk, you should unmount the disk from within the Lightsail instance, issue the create disk snapshot command, and then remount the disk to ensure a consistent and complete snapshot. You may remount and use your disk while the snapshot status is pending.

You can also use this operation to create a snapshot of an instance's system volume. You might want to do this, for example, to recover data from the system volume of a botched instance or to create a backup of the system volume like you would for a block storage disk. To create a snapshot of a system volume, just define the instance name parameter when issuing the snapshot command, and a snapshot of the defined instance's system volume will be created. After the snapshot is available, you can create a block storage disk from the snapshot and attach it to a running instance to access the data on the disk.

The create disk snapshot operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a snapshot of a block storage disk. You can use snapshots for backups, to make copies of disks, and to save data before shutting down a Lightsail instance.

You can take a snapshot of an attached disk that is in use; however, snapshots only capture data that has been written to your disk at the time the snapshot command is issued. This may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the disk long enough to take a snapshot, your snapshot should be complete. Nevertheless, if you cannot pause all file writes to the disk, you should unmount the disk from within the Lightsail instance, issue the create disk snapshot command, and then remount the disk to ensure a consistent and complete snapshot. You may remount and use your disk while the snapshot status is pending.

You can also use this operation to create a snapshot of an instance's system volume. You might want to do this, for example, to recover data from the system volume of a botched instance or to create a backup of the system volume like you would for a block storage disk. To create a snapshot of a system volume, just define the instance name parameter when issuing the snapshot command, and a snapshot of the defined instance's system volume will be created. After the snapshot is available, you can create a block storage disk from the snapshot and attach it to a running instance to access the data on the disk.

The create disk snapshot operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.

" }, "CreateDistribution":{ "name":"CreateDistribution", @@ -360,7 +393,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a domain resource for the specified domain (e.g., example.com).

The create domain operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a domain resource for the specified domain (e.g., example.com).

The create domain operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.

" }, "CreateDomainEntry":{ "name":"CreateDomainEntry", @@ -379,7 +412,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates one of the following domain name system (DNS) records in a domain DNS zone: Address (A), canonical name (CNAME), mail exchanger (MX), name server (NS), start of authority (SOA), service locator (SRV), or text (TXT).

The create domain entry operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates one of the following domain name system (DNS) records in a domain DNS zone: Address (A), canonical name (CNAME), mail exchanger (MX), name server (NS), start of authority (SOA), service locator (SRV), or text (TXT).

The create domain entry operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Amazon Lightsail Developer Guide.

" }, "CreateInstanceSnapshot":{ "name":"CreateInstanceSnapshot", @@ -398,7 +431,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a snapshot of a specific virtual private server, or instance. You can use a snapshot to create a new instance that is based on that snapshot.

The create instance snapshot operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a snapshot of a specific virtual private server, or instance. You can use a snapshot to create a new instance that is based on that snapshot.

The create instance snapshot operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.

" }, "CreateInstances":{ "name":"CreateInstances", @@ -417,7 +450,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates one or more Amazon Lightsail instances.

The create instances operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates one or more Amazon Lightsail instances.

The create instances operation supports tag-based access control via request tags. For more information, see the Lightsail Developer Guide.

" }, "CreateInstancesFromSnapshot":{ "name":"CreateInstancesFromSnapshot", @@ -436,7 +469,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates one or more new instances from a manual or automatic snapshot of an instance.

The create instances from snapshot operation supports tag-based access control via request tags and resource tags applied to the resource identified by instance snapshot name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates one or more new instances from a manual or automatic snapshot of an instance.

The create instances from snapshot operation supports tag-based access control via request tags and resource tags applied to the resource identified by instance snapshot name. For more information, see the Amazon Lightsail Developer Guide.

" }, "CreateKeyPair":{ "name":"CreateKeyPair", @@ -455,7 +488,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates an SSH key pair.

The create key pair operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates an SSH key pair.

The create key pair operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.

" }, "CreateLoadBalancer":{ "name":"CreateLoadBalancer", @@ -474,7 +507,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a Lightsail load balancer. To learn more about deciding whether to load balance your application, see Configure your Lightsail instances for load balancing. You can create up to 5 load balancers per AWS Region in your account.

When you create a load balancer, you can specify a unique name and port settings. To change additional load balancer settings, use the UpdateLoadBalancerAttribute operation.

The create load balancer operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a Lightsail load balancer. To learn more about deciding whether to load balance your application, see Configure your Lightsail instances for load balancing. You can create up to 5 load balancers per AWS Region in your account.

When you create a load balancer, you can specify a unique name and port settings. To change additional load balancer settings, use the UpdateLoadBalancerAttribute operation.

The create load balancer operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.

" }, "CreateLoadBalancerTlsCertificate":{ "name":"CreateLoadBalancerTlsCertificate", @@ -493,7 +526,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates an SSL/TLS certificate for an Amazon Lightsail load balancer.

TLS is just an updated, more secure version of Secure Socket Layer (SSL).

The CreateLoadBalancerTlsCertificate operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates an SSL/TLS certificate for an Amazon Lightsail load balancer.

TLS is just an updated, more secure version of Secure Socket Layer (SSL).

The CreateLoadBalancerTlsCertificate operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Amazon Lightsail Developer Guide.

" }, "CreateRelationalDatabase":{ "name":"CreateRelationalDatabase", @@ -512,7 +545,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a new database in Amazon Lightsail.

The create relational database operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a new database in Amazon Lightsail.

The create relational database operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.

" }, "CreateRelationalDatabaseFromSnapshot":{ "name":"CreateRelationalDatabaseFromSnapshot", @@ -531,7 +564,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a new database from an existing database snapshot in Amazon Lightsail.

You can create a new database from a snapshot in if something goes wrong with your original database, or to change it to a different plan, such as a high availability or standard plan.

The create relational database from snapshot operation supports tag-based access control via request tags and resource tags applied to the resource identified by relationalDatabaseSnapshotName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a new database from an existing database snapshot in Amazon Lightsail.

You can create a new database from a snapshot in if something goes wrong with your original database, or to change it to a different plan, such as a high availability or standard plan.

The create relational database from snapshot operation supports tag-based access control via request tags and resource tags applied to the resource identified by relationalDatabaseSnapshotName. For more information, see the Amazon Lightsail Developer Guide.

" }, "CreateRelationalDatabaseSnapshot":{ "name":"CreateRelationalDatabaseSnapshot", @@ -550,7 +583,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a snapshot of your database in Amazon Lightsail. You can use snapshots for backups, to make copies of a database, and to save data before deleting a database.

The create relational database snapshot operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a snapshot of your database in Amazon Lightsail. You can use snapshots for backups, to make copies of a database, and to save data before deleting a database.

The create relational database snapshot operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.

" }, "DeleteAlarm":{ "name":"DeleteAlarm", @@ -586,7 +619,41 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes an automatic snapshot of an instance or disk. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes an automatic snapshot of an instance or disk. For more information, see the Amazon Lightsail Developer Guide.

" + }, + "DeleteBucket":{ + "name":"DeleteBucket", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteBucketRequest"}, + "output":{"shape":"DeleteBucketResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Deletes a Amazon Lightsail bucket.

When you delete your bucket, the bucket name is released and can be reused for a new bucket in your account or another AWS account.

" + }, + "DeleteBucketAccessKey":{ + "name":"DeleteBucketAccessKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteBucketAccessKeyRequest"}, + "output":{"shape":"DeleteBucketAccessKeyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Deletes an access key for the specified Amazon Lightsail bucket.

We recommend that you delete an access key if the secret access key is compromised.

For more information about access keys, see Creating access keys for a bucket in Amazon Lightsail in the Amazon Lightsail Developer Guide.

" }, "DeleteCertificate":{ "name":"DeleteCertificate", @@ -674,7 +741,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes the specified block storage disk. The disk must be in the available state (not attached to a Lightsail instance).

The disk may remain in the deleting state for several minutes.

The delete disk operation supports tag-based access control via resource tags applied to the resource identified by disk name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes the specified block storage disk. The disk must be in the available state (not attached to a Lightsail instance).

The disk may remain in the deleting state for several minutes.

The delete disk operation supports tag-based access control via resource tags applied to the resource identified by disk name. For more information, see the Amazon Lightsail Developer Guide.

" }, "DeleteDiskSnapshot":{ "name":"DeleteDiskSnapshot", @@ -693,7 +760,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes the specified disk snapshot.

When you make periodic snapshots of a disk, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the disk.

The delete disk snapshot operation supports tag-based access control via resource tags applied to the resource identified by disk snapshot name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes the specified disk snapshot.

When you make periodic snapshots of a disk, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the disk.

The delete disk snapshot operation supports tag-based access control via resource tags applied to the resource identified by disk snapshot name. For more information, see the Amazon Lightsail Developer Guide.

" }, "DeleteDistribution":{ "name":"DeleteDistribution", @@ -730,7 +797,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes the specified domain recordset and all of its domain records.

The delete domain operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes the specified domain recordset and all of its domain records.

The delete domain operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Amazon Lightsail Developer Guide.

" }, "DeleteDomainEntry":{ "name":"DeleteDomainEntry", @@ -749,7 +816,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes a specific domain entry.

The delete domain entry operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes a specific domain entry.

The delete domain entry operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Amazon Lightsail Developer Guide.

" }, "DeleteInstance":{ "name":"DeleteInstance", @@ -768,7 +835,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes an Amazon Lightsail instance.

The delete instance operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes an Amazon Lightsail instance.

The delete instance operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Amazon Lightsail Developer Guide.

" }, "DeleteInstanceSnapshot":{ "name":"DeleteInstanceSnapshot", @@ -787,7 +854,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes a specific snapshot of a virtual private server (or instance).

The delete instance snapshot operation supports tag-based access control via resource tags applied to the resource identified by instance snapshot name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes a specific snapshot of a virtual private server (or instance).

The delete instance snapshot operation supports tag-based access control via resource tags applied to the resource identified by instance snapshot name. For more information, see the Amazon Lightsail Developer Guide.

" }, "DeleteKeyPair":{ "name":"DeleteKeyPair", @@ -806,7 +873,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes a specific SSH key pair.

The delete key pair operation supports tag-based access control via resource tags applied to the resource identified by key pair name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes a specific SSH key pair.

The delete key pair operation supports tag-based access control via resource tags applied to the resource identified by key pair name. For more information, see the Amazon Lightsail Developer Guide.

" }, "DeleteKnownHostKeys":{ "name":"DeleteKnownHostKeys", @@ -825,7 +892,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes the known host key or certificate used by the Amazon Lightsail browser-based SSH or RDP clients to authenticate an instance. This operation enables the Lightsail browser-based SSH or RDP clients to connect to the instance after a host key mismatch.

Perform this operation only if you were expecting the host key or certificate mismatch or if you are familiar with the new host key or certificate on the instance. For more information, see Troubleshooting connection issues when using the Amazon Lightsail browser-based SSH or RDP client.

" + "documentation":"

Deletes the known host key or certificate used by the Amazon Lightsail browser-based SSH or RDP clients to authenticate an instance. This operation enables the Lightsail browser-based SSH or RDP clients to connect to the instance after a host key mismatch.

Perform this operation only if you were expecting the host key or certificate mismatch or if you are familiar with the new host key or certificate on the instance. For more information, see Troubleshooting connection issues when using the Amazon Lightsail browser-based SSH or RDP client.

" }, "DeleteLoadBalancer":{ "name":"DeleteLoadBalancer", @@ -844,7 +911,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes a Lightsail load balancer and all its associated SSL/TLS certificates. Once the load balancer is deleted, you will need to create a new load balancer, create a new certificate, and verify domain ownership again.

The delete load balancer operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes a Lightsail load balancer and all its associated SSL/TLS certificates. Once the load balancer is deleted, you will need to create a new load balancer, create a new certificate, and verify domain ownership again.

The delete load balancer operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Amazon Lightsail Developer Guide.

" }, "DeleteLoadBalancerTlsCertificate":{ "name":"DeleteLoadBalancerTlsCertificate", @@ -863,7 +930,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes an SSL/TLS certificate associated with a Lightsail load balancer.

The DeleteLoadBalancerTlsCertificate operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes an SSL/TLS certificate associated with a Lightsail load balancer.

The DeleteLoadBalancerTlsCertificate operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Amazon Lightsail Developer Guide.

" }, "DeleteRelationalDatabase":{ "name":"DeleteRelationalDatabase", @@ -882,7 +949,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes a database in Amazon Lightsail.

The delete relational database operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes a database in Amazon Lightsail.

The delete relational database operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.

" }, "DeleteRelationalDatabaseSnapshot":{ "name":"DeleteRelationalDatabaseSnapshot", @@ -901,7 +968,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes a database snapshot in Amazon Lightsail.

The delete relational database snapshot operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes a database snapshot in Amazon Lightsail.

The delete relational database snapshot operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.

" }, "DetachCertificateFromDistribution":{ "name":"DetachCertificateFromDistribution", @@ -938,7 +1005,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Detaches a stopped block storage disk from a Lightsail instance. Make sure to unmount any file systems on the device within your operating system before stopping the instance and detaching the disk.

The detach disk operation supports tag-based access control via resource tags applied to the resource identified by disk name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Detaches a stopped block storage disk from a Lightsail instance. Make sure to unmount any file systems on the device within your operating system before stopping the instance and detaching the disk.

The detach disk operation supports tag-based access control via resource tags applied to the resource identified by disk name. For more information, see the Amazon Lightsail Developer Guide.

" }, "DetachInstancesFromLoadBalancer":{ "name":"DetachInstancesFromLoadBalancer", @@ -957,7 +1024,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Detaches the specified instances from a Lightsail load balancer.

This operation waits until the instances are no longer needed before they are detached from the load balancer.

The detach instances from load balancer operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Detaches the specified instances from a Lightsail load balancer.

This operation waits until the instances are no longer needed before they are detached from the load balancer.

The detach instances from load balancer operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Amazon Lightsail Developer Guide.

" }, "DetachStaticIp":{ "name":"DetachStaticIp", @@ -994,7 +1061,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Disables an add-on for an Amazon Lightsail resource. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Disables an add-on for an Amazon Lightsail resource. For more information, see the Amazon Lightsail Developer Guide.

" }, "DownloadDefaultKeyPair":{ "name":"DownloadDefaultKeyPair", @@ -1031,7 +1098,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Enables or modifies an add-on for an Amazon Lightsail resource. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Enables or modifies an add-on for an Amazon Lightsail resource. For more information, see the Amazon Lightsail Developer Guide.

" }, "ExportSnapshot":{ "name":"ExportSnapshot", @@ -1050,7 +1117,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2). This operation results in an export snapshot record that can be used with the create cloud formation stack operation to create new Amazon EC2 instances.

Exported instance snapshots appear in Amazon EC2 as Amazon Machine Images (AMIs), and the instance system disk appears as an Amazon Elastic Block Store (Amazon EBS) volume. Exported disk snapshots appear in Amazon EC2 as Amazon EBS volumes. Snapshots are exported to the same Amazon Web Services Region in Amazon EC2 as the source Lightsail snapshot.

The export snapshot operation supports tag-based access control via resource tags applied to the resource identified by source snapshot name. For more information, see the Lightsail Dev Guide.

Use the get instance snapshots or get disk snapshots operations to get a list of snapshots that you can export to Amazon EC2.

" + "documentation":"

Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2). This operation results in an export snapshot record that can be used with the create cloud formation stack operation to create new Amazon EC2 instances.

Exported instance snapshots appear in Amazon EC2 as Amazon Machine Images (AMIs), and the instance system disk appears as an Amazon Elastic Block Store (Amazon EBS) volume. Exported disk snapshots appear in Amazon EC2 as Amazon EBS volumes. Snapshots are exported to the same Amazon Web Services Region in Amazon EC2 as the source Lightsail snapshot.

The export snapshot operation supports tag-based access control via resource tags applied to the resource identified by source snapshot name. For more information, see the Amazon Lightsail Developer Guide.

Use the get instance snapshots or get disk snapshots operations to get a list of snapshots that you can export to Amazon EC2.

" }, "GetActiveNames":{ "name":"GetActiveNames", @@ -1105,7 +1172,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns the available automatic snapshots for an instance or disk. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Returns the available automatic snapshots for an instance or disk. For more information, see the Amazon Lightsail Developer Guide.

" }, "GetBlueprints":{ "name":"GetBlueprints", @@ -1126,6 +1193,73 @@ ], "documentation":"

Returns the list of available instance images, or blueprints. You can use a blueprint to create a new instance already running a specific operating system, as well as a preinstalled app or development stack. The software each instance is running depends on the blueprint image you choose.

Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.

" }, + "GetBucketAccessKeys":{ + "name":"GetBucketAccessKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBucketAccessKeysRequest"}, + "output":{"shape":"GetBucketAccessKeysResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Returns the existing access key IDs for the specified Amazon Lightsail bucket.

This action does not return the secret access key value of an access key. You can get a secret access key only when you create it from the response of the CreateBucketAccessKey action. If you lose the secret access key, you must create a new access key.

" + }, + "GetBucketBundles":{ + "name":"GetBucketBundles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBucketBundlesRequest"}, + "output":{"shape":"GetBucketBundlesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Returns the bundles that you can apply to a Amazon Lightsail bucket.

The bucket bundle specifies the monthly cost, storage quota, and data transfer quota for a bucket.

Use the UpdateBucketBundle action to update the bundle for a bucket.

" + }, + "GetBucketMetricData":{ + "name":"GetBucketMetricData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBucketMetricDataRequest"}, + "output":{"shape":"GetBucketMetricDataResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Returns the data points of a specific metric for an Amazon Lightsail bucket.

Metrics report the utilization of a bucket. View and collect metric data regularly to monitor the number of objects stored in a bucket (including object versions) and the storage space used by those objects.

" + }, + "GetBuckets":{ + "name":"GetBuckets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBucketsRequest"}, + "output":{"shape":"GetBucketsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Returns information about one or more Amazon Lightsail buckets.

For more information about buckets, see Buckets in Amazon Lightsail in the Amazon Lightsail Developer Guide..

" + }, "GetBundles":{ "name":"GetBundles", "http":{ @@ -1408,7 +1542,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns the list bundles that can be applied to you Amazon Lightsail content delivery network (CDN) distributions.

A distribution bundle specifies the monthly network transfer quota and monthly cost of your dsitribution.

" + "documentation":"

Returns the bundles that can be applied to your Amazon Lightsail content delivery network (CDN) distributions.

A distribution bundle specifies the monthly network transfer quota and monthly cost of your dsitribution.

" }, "GetDistributionLatestCacheReset":{ "name":"GetDistributionLatestCacheReset", @@ -1519,7 +1653,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns the export snapshot record created as a result of the export snapshot operation.

An export snapshot record can be used to create a new Amazon EC2 instance and its related resources with the create cloud formation stack operation.

" + "documentation":"

Returns all export snapshot records created as a result of the export snapshot operation.

An export snapshot record can be used to create a new Amazon EC2 instance and its related resources with the CreateCloudFormationStack action.

" }, "GetInstance":{ "name":"GetInstance", @@ -1557,7 +1691,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns temporary SSH keys you can use to connect to a specific virtual private server, or instance.

The get instance access details operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Returns temporary SSH keys you can use to connect to a specific virtual private server, or instance.

The get instance access details operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Amazon Lightsail Developer Guide.

" }, "GetInstanceMetricData":{ "name":"GetInstanceMetricData", @@ -2184,7 +2318,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Opens ports for a specific Amazon Lightsail instance, and specifies the IP addresses allowed to connect to the instance through the ports, and the protocol.

The OpenInstancePublicPorts action supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Opens ports for a specific Amazon Lightsail instance, and specifies the IP addresses allowed to connect to the instance through the ports, and the protocol.

The OpenInstancePublicPorts action supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Amazon Lightsail Developer Guide.

" }, "PeerVpc":{ "name":"PeerVpc", @@ -2203,7 +2337,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Tries to peer the Lightsail VPC with the user's default VPC.

" + "documentation":"

Peers the Lightsail VPC with the user's default VPC.

" }, "PutAlarm":{ "name":"PutAlarm", @@ -2240,7 +2374,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Opens ports for a specific Amazon Lightsail instance, and specifies the IP addresses allowed to connect to the instance through the ports, and the protocol. This action also closes all currently open ports that are not included in the request. Include all of the ports and the protocols you want to open in your PutInstancePublicPortsrequest. Or use the OpenInstancePublicPorts action to open ports without closing currently open ports.

The PutInstancePublicPorts action supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Opens ports for a specific Amazon Lightsail instance, and specifies the IP addresses allowed to connect to the instance through the ports, and the protocol. This action also closes all currently open ports that are not included in the request. Include all of the ports and the protocols you want to open in your PutInstancePublicPortsrequest. Or use the OpenInstancePublicPorts action to open ports without closing currently open ports.

The PutInstancePublicPorts action supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Amazon Lightsail Developer Guide.

" }, "RebootInstance":{ "name":"RebootInstance", @@ -2259,7 +2393,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Restarts a specific instance.

The reboot instance operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Restarts a specific instance.

The reboot instance operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Amazon Lightsail Developer Guide.

" }, "RebootRelationalDatabase":{ "name":"RebootRelationalDatabase", @@ -2278,7 +2412,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Restarts a specific database in Amazon Lightsail.

The reboot relational database operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Restarts a specific database in Amazon Lightsail.

The reboot relational database operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.

" }, "RegisterContainerImage":{ "name":"RegisterContainerImage", @@ -2295,7 +2429,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Registers a container image to your Amazon Lightsail container service.

This action is not required if you install and use the Lightsail Control (lightsailctl) plugin to push container images to your Lightsail container service. For more information, see Pushing and managing container images on your Amazon Lightsail container services in the Lightsail Dev Guide.

" + "documentation":"

Registers a container image to your Amazon Lightsail container service.

This action is not required if you install and use the Lightsail Control (lightsailctl) plugin to push container images to your Lightsail container service. For more information, see Pushing and managing container images on your Amazon Lightsail container services in the Amazon Lightsail Developer Guide.

" }, "ReleaseStaticIp":{ "name":"ReleaseStaticIp", @@ -2371,6 +2505,23 @@ ], "documentation":"

Sets the IP address type for an Amazon Lightsail resource.

Use this action to enable dual-stack for a resource, which enables IPv4 and IPv6 for the specified resource. Alternately, you can use this action to disable dual-stack, and enable IPv4 only.

" }, + "SetResourceAccessForBucket":{ + "name":"SetResourceAccessForBucket", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetResourceAccessForBucketRequest"}, + "output":{"shape":"SetResourceAccessForBucketResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Sets the Amazon Lightsail resources that can access the specified Lightsail bucket.

Lightsail buckets currently support setting access for Lightsail instances in the same AWS Region.

" + }, "StartInstance":{ "name":"StartInstance", "http":{ @@ -2388,7 +2539,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Starts a specific Amazon Lightsail instance from a stopped state. To restart an instance, use the reboot instance operation.

When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Lightsail Dev Guide.

The start instance operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Starts a specific Amazon Lightsail instance from a stopped state. To restart an instance, use the reboot instance operation.

When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Amazon Lightsail Developer Guide.

The start instance operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Amazon Lightsail Developer Guide.

" }, "StartRelationalDatabase":{ "name":"StartRelationalDatabase", @@ -2407,7 +2558,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Starts a specific database from a stopped state in Amazon Lightsail. To restart a database, use the reboot relational database operation.

The start relational database operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Starts a specific database from a stopped state in Amazon Lightsail. To restart a database, use the reboot relational database operation.

The start relational database operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.

" }, "StopInstance":{ "name":"StopInstance", @@ -2426,7 +2577,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Stops a specific Amazon Lightsail instance that is currently running.

When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Lightsail Dev Guide.

The stop instance operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Stops a specific Amazon Lightsail instance that is currently running.

When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Amazon Lightsail Developer Guide.

The stop instance operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Amazon Lightsail Developer Guide.

" }, "StopRelationalDatabase":{ "name":"StopRelationalDatabase", @@ -2445,7 +2596,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Stops a specific database that is currently running in Amazon Lightsail.

The stop relational database operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Stops a specific database that is currently running in Amazon Lightsail.

The stop relational database operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.

" }, "TagResource":{ "name":"TagResource", @@ -2464,7 +2615,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Adds one or more tags to the specified Amazon Lightsail resource. Each resource can have a maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see the Lightsail Dev Guide.

The tag resource operation supports tag-based access control via request tags and resource tags applied to the resource identified by resource name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Adds one or more tags to the specified Amazon Lightsail resource. Each resource can have a maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see the Amazon Lightsail Developer Guide.

The tag resource operation supports tag-based access control via request tags and resource tags applied to the resource identified by resource name. For more information, see the Amazon Lightsail Developer Guide.

" }, "TestAlarm":{ "name":"TestAlarm", @@ -2501,7 +2652,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Attempts to unpeer the Lightsail VPC from the user's default VPC.

" + "documentation":"

Unpeers the Lightsail VPC from the user's default VPC.

" }, "UntagResource":{ "name":"UntagResource", @@ -2520,7 +2671,41 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes the specified set of tag keys and their values from the specified Amazon Lightsail resource.

The untag resource operation supports tag-based access control via request tags and resource tags applied to the resource identified by resource name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes the specified set of tag keys and their values from the specified Amazon Lightsail resource.

The untag resource operation supports tag-based access control via request tags and resource tags applied to the resource identified by resource name. For more information, see the Amazon Lightsail Developer Guide.

" + }, + "UpdateBucket":{ + "name":"UpdateBucket", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateBucketRequest"}, + "output":{"shape":"UpdateBucketResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Updates an existing Amazon Lightsail bucket.

Use this action to update the configuration of an existing bucket, such as versioning, public accessibility, and the AWS accounts that can access the bucket.

" + }, + "UpdateBucketBundle":{ + "name":"UpdateBucketBundle", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateBucketBundleRequest"}, + "output":{"shape":"UpdateBucketBundleResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Updates the bundle, or storage plan, of an existing Amazon Lightsail bucket.

A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket. You can update a bucket's bundle only one time within a monthly AWS billing cycle. To determine if you can update a bucket's bundle, use the GetBuckets action. The ableToUpdateBundle parameter in the response will indicate whether you can currently update a bucket's bundle.

Update a bucket's bundle if it's consistently going over its storage space or data transfer quota, or if a bucket's usage is consistently in the lower range of its storage space or data transfer quota. Due to the unpredictable usage fluctuations that a bucket might experience, we strongly recommend that you update a bucket's bundle only as a long-term strategy, instead of as a short-term, monthly cost-cutting measure. Choose a bucket bundle that will provide the bucket with ample storage space and data transfer for a long time to come.

" }, "UpdateContainerService":{ "name":"UpdateContainerService", @@ -2555,7 +2740,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Updates an existing Amazon Lightsail content delivery network (CDN) distribution.

Use this action to update the configuration of your existing distribution

" + "documentation":"

Updates an existing Amazon Lightsail content delivery network (CDN) distribution.

Use this action to update the configuration of your existing distribution.

" }, "UpdateDistributionBundle":{ "name":"UpdateDistributionBundle", @@ -2592,7 +2777,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Updates a domain recordset after it is created.

The update domain entry operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Updates a domain recordset after it is created.

The update domain entry operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Amazon Lightsail Developer Guide.

" }, "UpdateLoadBalancerAttribute":{ "name":"UpdateLoadBalancerAttribute", @@ -2611,7 +2796,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Updates the specified attribute for a load balancer. You can only update one attribute at a time.

The update load balancer attribute operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Updates the specified attribute for a load balancer. You can only update one attribute at a time.

The update load balancer attribute operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Amazon Lightsail Developer Guide.

" }, "UpdateRelationalDatabase":{ "name":"UpdateRelationalDatabase", @@ -2630,7 +2815,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Allows the update of one or more attributes of a database in Amazon Lightsail.

Updates are applied immediately, or in cases where the updates could result in an outage, are applied during the database's predefined maintenance window.

The update relational database operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Allows the update of one or more attributes of a database in Amazon Lightsail.

Updates are applied immediately, or in cases where the updates could result in an outage, are applied during the database's predefined maintenance window.

The update relational database operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.

" }, "UpdateRelationalDatabaseParameters":{ "name":"UpdateRelationalDatabaseParameters", @@ -2649,7 +2834,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Allows the update of one or more parameters of a database in Amazon Lightsail.

Parameter updates don't cause outages; therefore, their application is not subject to the preferred maintenance window. However, there are two ways in which parameter updates are applied: dynamic or pending-reboot. Parameters marked with a dynamic apply type are applied immediately. Parameters marked with a pending-reboot apply type are applied only after the database is rebooted using the reboot relational database operation.

The update relational database parameters operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Allows the update of one or more parameters of a database in Amazon Lightsail.

Parameter updates don't cause outages; therefore, their application is not subject to the preferred maintenance window. However, there are two ways in which parameter updates are applied: dynamic or pending-reboot. Parameters marked with a dynamic apply type are applied immediately. Parameters marked with a pending-reboot apply type are applied only after the database is rebooted using the reboot relational database operation.

The update relational database parameters operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.

" } }, "shapes":{ @@ -2671,6 +2856,79 @@ "outbound" ] }, + "AccessKey":{ + "type":"structure", + "members":{ + "accessKeyId":{ + "shape":"IAMAccessKeyId", + "documentation":"

The ID of the access key.

" + }, + "secretAccessKey":{ + "shape":"NonEmptyString", + "documentation":"

The secret access key used to sign requests.

You should store the secret access key in a safe location. We recommend that you delete the access key if the secret access key is compromised.

" + }, + "status":{ + "shape":"StatusType", + "documentation":"

The status of the access key.

A status of Active means that the key is valid, while Inactive means it is not.

" + }, + "createdAt":{ + "shape":"IsoDate", + "documentation":"

The timestamp when the access key was created.

" + }, + "lastUsed":{ + "shape":"AccessKeyLastUsed", + "documentation":"

An object that describes the last time the access key was used.

This object does not include data in the response of a CreateBucketAccessKey action. If the access key has not been used, the region and serviceName values are N/A, and the lastUsedDate value is null.

" + } + }, + "documentation":"

Describes an access key for an Amazon Lightsail bucket.

Access keys grant full programmatic access to the specified bucket and its objects. You can have a maximum of two access keys per bucket. Use the CreateBucketAccessKey action to create an access key for a specific bucket. For more information about access keys, see Creating access keys for a bucket in Amazon Lightsail in the Amazon Lightsail Developer Guide.

The secretAccessKey value is returned only in response to the CreateBucketAccessKey action. You can get a secret access key only when you first create an access key; you cannot get the secret access key later. If you lose the secret access key, you must create a new access key.

" + }, + "AccessKeyLastUsed":{ + "type":"structure", + "members":{ + "lastUsedDate":{ + "shape":"IsoDate", + "documentation":"

The date and time when the access key was most recently used.

This value is null if the access key has not been used.

" + }, + "region":{ + "shape":"string", + "documentation":"

The AWS Region where this access key was most recently used.

This value is N/A if the access key has not been used.

" + }, + "serviceName":{ + "shape":"string", + "documentation":"

The name of the AWS service with which this access key was most recently used.

This value is N/A if the access key has not been used.

" + } + }, + "documentation":"

Describes the last time an access key was used.

This object does not include data in the response of a CreateBucketAccessKey action.

" + }, + "AccessKeyList":{ + "type":"list", + "member":{"shape":"AccessKey"} + }, + "AccessReceiverList":{ + "type":"list", + "member":{"shape":"ResourceReceivingAccess"} + }, + "AccessRules":{ + "type":"structure", + "members":{ + "getObject":{ + "shape":"AccessType", + "documentation":"

Specifies the anonymous access to all objects in a bucket.

The following options can be specified:

  • public - Sets all objects in the bucket to public (read-only), making them readable by anyone in the world.

    If the getObject value is set to public, then all objects in the bucket default to public regardless of the allowPublicOverrides value.

  • private - Sets all objects in the bucket to private, making them readable only by you or anyone you give access to.

    If the getObject value is set to private, and the allowPublicOverrides value is set to true, then all objects in the bucket default to private unless they are configured with a public-read ACL. Individual objects with a public-read ACL are readable by anyone in the world.

" + }, + "allowPublicOverrides":{ + "shape":"boolean", + "documentation":"

A Boolean value that indicates whether the access control list (ACL) permissions that are applied to individual objects override the getObject option that is currently specified.

When this is true, you can use the PutObjectAcl Amazon S3 API action to set individual objects to public (read-only) using the public-read ACL, or to private using the private ACL.

" + } + }, + "documentation":"

Describes the anonymous access permissions for an Amazon Lightsail bucket and its objects.

For more information about bucket access permissions, see Understanding bucket permissions in Amazon Lightsail in the

Amazon Lightsail Developer Guide.

" + }, + "AccessType":{ + "type":"string", + "enum":[ + "public", + "private" + ] + }, "AccountSetupInProgressException":{ "type":"structure", "members":{ @@ -2815,7 +3073,7 @@ "documentation":"

Indicates whether the alarm is enabled.

" } }, - "documentation":"

Describes an alarm.

An alarm is a way to monitor your Amazon Lightsail resource metrics. For more information, see Alarms in Amazon Lightsail.

" + "documentation":"

Describes an alarm.

An alarm is a way to monitor your Lightsail resource metrics. For more information, see Alarms in Amazon Lightsail.

" }, "AlarmState":{ "type":"string", @@ -3146,6 +3404,134 @@ "app" ] }, + "Bucket":{ + "type":"structure", + "members":{ + "resourceType":{ + "shape":"NonEmptyString", + "documentation":"

The Lightsail resource type of the bucket (for example, Bucket).

" + }, + "accessRules":{ + "shape":"AccessRules", + "documentation":"

An object that describes the access rules of the bucket.

" + }, + "arn":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Resource Name (ARN) of the bucket.

" + }, + "bundleId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the bundle currently applied to the bucket.

A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket.

Use the UpdateBucketBundle action to change the bundle of a bucket.

" + }, + "createdAt":{ + "shape":"IsoDate", + "documentation":"

The timestamp when the distribution was created.

" + }, + "url":{ + "shape":"NonEmptyString", + "documentation":"

The URL of the bucket.

" + }, + "location":{"shape":"ResourceLocation"}, + "name":{ + "shape":"BucketName", + "documentation":"

The name of the bucket.

" + }, + "supportCode":{ + "shape":"NonEmptyString", + "documentation":"

The support code for a bucket. Include this code in your email to support when you have questions about a Lightsail bucket. This code enables our support team to look up your Lightsail information more easily.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

The tag keys and optional values for the bucket. For more information, see Tags in Amazon Lightsail in the Amazon Lightsail Developer Guide.

" + }, + "objectVersioning":{ + "shape":"NonEmptyString", + "documentation":"

Indicates whether object versioning is enabled for the bucket.

The following options can be configured:

  • Enabled - Object versioning is enabled.

  • Suspended - Object versioning was previously enabled but is currently suspended. Existing object versions are retained.

  • NeverEnabled - Object versioning has never been enabled.

" + }, + "ableToUpdateBundle":{ + "shape":"boolean", + "documentation":"

Indicates whether the bundle that is currently applied to a bucket can be changed to another bundle.

You can update a bucket's bundle only one time within a monthly AWS billing cycle.

Use the UpdateBucketBundle action to change a bucket's bundle.

" + }, + "readonlyAccessAccounts":{ + "shape":"PartnerIdList", + "documentation":"

An array of strings that specify the AWS account IDs that have read-only access to the bucket.

" + }, + "resourcesReceivingAccess":{ + "shape":"AccessReceiverList", + "documentation":"

An array of objects that describe Lightsail instances that have access to the bucket.

Use the SetResourceAccessForBucket action to update the instances that have access to a bucket.

" + }, + "state":{ + "shape":"BucketState", + "documentation":"

An object that describes the state of the bucket.

" + } + }, + "documentation":"

Describes an Amazon Lightsail bucket.

" + }, + "BucketBundle":{ + "type":"structure", + "members":{ + "bundleId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the bundle.

" + }, + "name":{ + "shape":"NonEmptyString", + "documentation":"

The name of the bundle.

" + }, + "price":{ + "shape":"float", + "documentation":"

The monthly price of the bundle, in US dollars.

" + }, + "storagePerMonthInGb":{ + "shape":"integer", + "documentation":"

The storage size of the bundle, in GB.

" + }, + "transferPerMonthInGb":{ + "shape":"integer", + "documentation":"

The monthly network transfer quota of the bundle.

" + }, + "isActive":{ + "shape":"boolean", + "documentation":"

Indicates whether the bundle is active. Use for a new or existing bucket.

" + } + }, + "documentation":"

Describes the specifications of a bundle that can be applied to an Amazon Lightsail bucket.

A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket.

" + }, + "BucketBundleList":{ + "type":"list", + "member":{"shape":"BucketBundle"} + }, + "BucketList":{ + "type":"list", + "member":{"shape":"Bucket"} + }, + "BucketMetricName":{ + "type":"string", + "enum":[ + "BucketSizeBytes", + "NumberOfObjects" + ] + }, + "BucketName":{ + "type":"string", + "max":54, + "min":3, + "pattern":"^[a-z0-9][a-z0-9-]{1,52}[a-z0-9]$" + }, + "BucketState":{ + "type":"structure", + "members":{ + "code":{ + "shape":"NonEmptyString", + "documentation":"

The state code of the bucket.

The following codes are possible:

  • OK - The bucket is in a running state.

  • Unknown - Creation of the bucket might have timed-out. You might want to delete the bucket and create a new one.

" + }, + "message":{ + "shape":"string", + "documentation":"

A message that describes the state of the bucket.

" + } + }, + "documentation":"

Describes the state of an Amazon Lightsail bucket.

" + }, "Bundle":{ "type":"structure", "members":{ @@ -3347,7 +3733,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "supportCode":{ "shape":"string", @@ -3394,7 +3780,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" } }, "documentation":"

Describes an Amazon Lightsail SSL/TLS certificate.

" @@ -3465,7 +3851,7 @@ "documentation":"

A list of objects describing the destination service, which is AWS CloudFormation, and the Amazon Resource Name (ARN) of the AWS CloudFormation stack.

" } }, - "documentation":"

Describes a CloudFormation stack record created as a result of the create cloud formation stack operation.

A CloudFormation stack record provides information about the AWS CloudFormation stack used to create a new Amazon Elastic Compute Cloud instance from an exported Lightsail instance snapshot.

" + "documentation":"

Describes a CloudFormation stack record created as a result of the create cloud formation stack action.

A CloudFormation stack record provides information about the AWS CloudFormation stack used to create a new Amazon Elastic Compute Cloud instance from an exported Lightsail instance snapshot.

" }, "CloudFormationStackRecordList":{ "type":"list", @@ -3658,7 +4044,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "power":{ "shape":"ContainerServicePowerName", @@ -3807,7 +4193,7 @@ }, "successCodes":{ "shape":"string", - "documentation":"

The HTTP codes to use when checking for a successful response from a container. You can specify values between 200 and 499.

" + "documentation":"

The HTTP codes to use when checking for a successful response from a container. You can specify values between 200 and 499. You can specify multiple values (for example, 200,202) or a range of values (for example, 200-299).

" } }, "documentation":"

Describes the health check configuration of an Amazon Lightsail container service.

" @@ -4022,15 +4408,15 @@ }, "sourceResourceName":{ "shape":"string", - "documentation":"

The name of the source instance or disk from which the source automatic snapshot was created.

Constraint:

  • Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

The name of the source instance or disk from which the source automatic snapshot was created.

Constraint:

" }, "restoreDate":{ "shape":"string", - "documentation":"

The date of the source automatic snapshot to copy. Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

  • Must be specified in YYYY-MM-DD format.

  • This parameter cannot be defined together with the use latest restorable auto snapshot parameter. The restore date and use latest restorable auto snapshot parameters are mutually exclusive.

  • Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

The date of the source automatic snapshot to copy. Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

  • Must be specified in YYYY-MM-DD format.

  • This parameter cannot be defined together with the use latest restorable auto snapshot parameter. The restore date and use latest restorable auto snapshot parameters are mutually exclusive.

  • Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Amazon Lightsail Developer Guide.

" }, "useLatestRestorableAutoSnapshot":{ "shape":"boolean", - "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot of the specified source instance or disk.

Constraints:

  • This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

  • Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot of the specified source instance or disk.

Constraints:

  • This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

  • Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Amazon Lightsail Developer Guide.

" }, "targetSnapshotName":{ "shape":"ResourceName", @@ -4051,6 +4437,67 @@ } } }, + "CreateBucketAccessKeyRequest":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name of the bucket that the new access key will belong to, and grant access to.

" + } + } + }, + "CreateBucketAccessKeyResult":{ + "type":"structure", + "members":{ + "accessKey":{ + "shape":"AccessKey", + "documentation":"

An object that describes the access key that is created.

" + }, + "operations":{ + "shape":"OperationList", + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" + } + } + }, + "CreateBucketRequest":{ + "type":"structure", + "required":[ + "bucketName", + "bundleId" + ], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name for the bucket.

For more information about bucket names, see Bucket naming rules in Amazon Lightsail in the Amazon Lightsail Developer Guide.

" + }, + "bundleId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the bundle to use for the bucket.

A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket.

Use the GetBucketBundles action to get a list of bundle IDs that you can specify.

Use the UpdateBucketBundle action to change the bundle after the bucket is created.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

The tag keys and optional values to add to the bucket during creation.

Use the TagResource action to tag the bucket after it's created.

" + }, + "enableObjectVersioning":{ + "shape":"boolean", + "documentation":"

A Boolean value that indicates whether to enable versioning of objects in the bucket.

For more information about versioning, see Enabling and suspending object versioning in a bucket in Amazon Lightsail in the Amazon Lightsail Developer Guide.

" + } + } + }, + "CreateBucketResult":{ + "type":"structure", + "members":{ + "bucket":{ + "shape":"Bucket", + "documentation":"

An object that describes the bucket that is created.

" + }, + "operations":{ + "shape":"OperationList", + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" + } + } + }, "CreateCertificateRequest":{ "type":"structure", "required":[ @@ -4197,7 +4644,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the container service.

For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values to add to the certificate during create.

Use the TagResource action to tag a resource after it's created.

For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "publicDomainNames":{ "shape":"ContainerServicePublicDomains", @@ -4252,15 +4699,15 @@ }, "sourceDiskName":{ "shape":"string", - "documentation":"

The name of the source disk from which the source automatic snapshot was created.

Constraints:

  • This parameter cannot be defined together with the disk snapshot name parameter. The source disk name and disk snapshot name parameters are mutually exclusive.

  • Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

The name of the source disk from which the source automatic snapshot was created.

Constraints:

  • This parameter cannot be defined together with the disk snapshot name parameter. The source disk name and disk snapshot name parameters are mutually exclusive.

  • Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Amazon Lightsail Developer Guide.

" }, "restoreDate":{ "shape":"string", - "documentation":"

The date of the automatic snapshot to use for the new disk. Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

  • Must be specified in YYYY-MM-DD format.

  • This parameter cannot be defined together with the use latest restorable auto snapshot parameter. The restore date and use latest restorable auto snapshot parameters are mutually exclusive.

  • Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

The date of the automatic snapshot to use for the new disk. Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

  • Must be specified in YYYY-MM-DD format.

  • This parameter cannot be defined together with the use latest restorable auto snapshot parameter. The restore date and use latest restorable auto snapshot parameters are mutually exclusive.

  • Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Amazon Lightsail Developer Guide.

" }, "useLatestRestorableAutoSnapshot":{ "shape":"boolean", - "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot.

Constraints:

  • This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

  • Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot.

Constraints:

  • This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

  • Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Amazon Lightsail Developer Guide.

" } } }, @@ -4508,7 +4955,7 @@ }, "userData":{ "shape":"string", - "documentation":"

You can create a launch script that configures a server with additional user data. For example, apt-get -y update.

Depending on the machine image you choose, the command to get software on your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu use apt-get, and FreeBSD uses pkg. For a complete list, see the Dev Guide.

" + "documentation":"

You can create a launch script that configures a server with additional user data. For example, apt-get -y update.

Depending on the machine image you choose, the command to get software on your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu use apt-get, and FreeBSD uses pkg. For a complete list, see the Amazon Lightsail Developer Guide.

" }, "keyPairName":{ "shape":"ResourceName", @@ -4528,15 +4975,15 @@ }, "sourceInstanceName":{ "shape":"string", - "documentation":"

The name of the source instance from which the source automatic snapshot was created.

Constraints:

  • This parameter cannot be defined together with the instance snapshot name parameter. The source instance name and instance snapshot name parameters are mutually exclusive.

  • Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

The name of the source instance from which the source automatic snapshot was created.

Constraints:

  • This parameter cannot be defined together with the instance snapshot name parameter. The source instance name and instance snapshot name parameters are mutually exclusive.

  • Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Amazon Lightsail Developer Guide.

" }, "restoreDate":{ "shape":"string", - "documentation":"

The date of the automatic snapshot to use for the new instance. Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

  • Must be specified in YYYY-MM-DD format.

  • This parameter cannot be defined together with the use latest restorable auto snapshot parameter. The restore date and use latest restorable auto snapshot parameters are mutually exclusive.

  • Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

The date of the automatic snapshot to use for the new instance. Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

  • Must be specified in YYYY-MM-DD format.

  • This parameter cannot be defined together with the use latest restorable auto snapshot parameter. The restore date and use latest restorable auto snapshot parameters are mutually exclusive.

  • Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Amazon Lightsail Developer Guide.

" }, "useLatestRestorableAutoSnapshot":{ "shape":"boolean", - "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot.

Constraints:

  • This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

  • Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot.

Constraints:

  • This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

  • Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Amazon Lightsail Developer Guide.

" } } }, @@ -4581,7 +5028,7 @@ }, "userData":{ "shape":"string", - "documentation":"

A launch script you can create that configures a server with additional user data. For example, you might want to run apt-get -y update.

Depending on the machine image you choose, the command to get software on your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu use apt-get, and FreeBSD uses pkg. For a complete list, see the Dev Guide.

" + "documentation":"

A launch script you can create that configures a server with additional user data. For example, you might want to run apt-get -y update.

Depending on the machine image you choose, the command to get software on your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu use apt-get, and FreeBSD uses pkg. For a complete list, see the Amazon Lightsail Developer Guide.

" }, "keyPairName":{ "shape":"ResourceName", @@ -4925,6 +5372,55 @@ } } }, + "DeleteBucketAccessKeyRequest":{ + "type":"structure", + "required":[ + "bucketName", + "accessKeyId" + ], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name of the bucket that the access key belongs to.

" + }, + "accessKeyId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the access key to delete.

Use the GetBucketAccessKeys action to get a list of access key IDs that you can specify.

" + } + } + }, + "DeleteBucketAccessKeyResult":{ + "type":"structure", + "members":{ + "operations":{ + "shape":"OperationList", + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" + } + } + }, + "DeleteBucketRequest":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name of the bucket to delete.

Use the GetBuckets action to get a list of bucket names that you can specify.

" + }, + "forceDelete":{ + "shape":"boolean", + "documentation":"

A Boolean value that indicates whether to force delete the bucket.

You must force delete the bucket if it has one of the following conditions:

  • The bucket is the origin of a distribution.

  • The bucket has instances that were granted access to it using the SetResourceAccessForBucket action.

  • The bucket has objects.

  • The bucket has access keys.

Force deleting a bucket might impact other resources that rely on the bucket, such as instances, distributions, or software that use the issued access keys.

" + } + } + }, + "DeleteBucketResult":{ + "type":"structure", + "members":{ + "operations":{ + "shape":"OperationList", + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" + } + } + }, "DeleteCertificateRequest":{ "type":"structure", "required":["certificateName"], @@ -5432,7 +5928,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "addOns":{ "shape":"AddOnList", @@ -5477,7 +5973,7 @@ "deprecated":true } }, - "documentation":"

Describes a system disk or a block storage disk.

" + "documentation":"

Describes a block storage disk.

" }, "DiskInfo":{ "type":"structure", @@ -5556,7 +6052,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "sizeInGb":{ "shape":"integer", @@ -5647,7 +6143,7 @@ }, "isActive":{ "shape":"boolean", - "documentation":"

Indicates whether the bundle is active, and can be specified for a new distribution.

" + "documentation":"

Indicates whether the bundle is active, and can be specified for a new or existing distribution.

" } }, "documentation":"

Describes the specifications of a distribution bundle.

" @@ -5700,14 +6196,14 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "domainEntries":{ "shape":"DomainEntryList", "documentation":"

An array of key-value pairs containing information about the domain entries.

" } }, - "documentation":"

Describes a domain where you are storing recordsets in Lightsail.

" + "documentation":"

Describes a domain where you are storing recordsets.

" }, "DomainEntry":{ "type":"structure", @@ -6069,6 +6565,128 @@ } } }, + "GetBucketAccessKeysRequest":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name of the bucket for which to return access keys.

" + } + } + }, + "GetBucketAccessKeysResult":{ + "type":"structure", + "members":{ + "accessKeys":{ + "shape":"AccessKeyList", + "documentation":"

An object that describes the access keys for the specified bucket.

" + } + } + }, + "GetBucketBundlesRequest":{ + "type":"structure", + "members":{ + "includeInactive":{ + "shape":"boolean", + "documentation":"

A Boolean value that indicates whether to include inactive (unavailable) bundles in the response.

" + } + } + }, + "GetBucketBundlesResult":{ + "type":"structure", + "members":{ + "bundles":{ + "shape":"BucketBundleList", + "documentation":"

An object that describes bucket bundles.

" + } + } + }, + "GetBucketMetricDataRequest":{ + "type":"structure", + "required":[ + "bucketName", + "metricName", + "startTime", + "endTime", + "period", + "statistics", + "unit" + ], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name of the bucket for which to get metric data.

" + }, + "metricName":{ + "shape":"BucketMetricName", + "documentation":"

The metric for which you want to return information.

Valid bucket metric names are listed below, along with the most useful statistics to include in your request, and the published unit value.

These bucket metrics are reported once per day.

  • BucketSizeBytes - The amount of data in bytes stored in a bucket. This value is calculated by summing the size of all objects in the bucket (including object versions), including the size of all parts for all incomplete multipart uploads to the bucket.

    Statistics: The most useful statistic is Maximum.

    Unit: The published unit is Bytes.

  • NumberOfObjects - The total number of objects stored in a bucket. This value is calculated by counting all objects in the bucket (including object versions) and the total number of parts for all incomplete multipart uploads to the bucket.

    Statistics: The most useful statistic is Average.

    Unit: The published unit is Count.

" + }, + "startTime":{ + "shape":"IsoDate", + "documentation":"

The timestamp indicating the earliest data to be returned.

" + }, + "endTime":{ + "shape":"IsoDate", + "documentation":"

The timestamp indicating the latest data to be returned.

" + }, + "period":{ + "shape":"MetricPeriod", + "documentation":"

The granularity, in seconds, of the returned data points.

Bucket storage metrics are reported once per day. Therefore, you should specify a period of 86400 seconds, which is the number of seconds in a day.

" + }, + "statistics":{ + "shape":"MetricStatisticList", + "documentation":"

The statistic for the metric.

The following statistics are available:

  • Minimum - The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.

  • Maximum - The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

  • Sum - The sum of all values submitted for the matching metric. You can use this statistic to determine the total volume of a metric.

  • Average - The value of Sum / SampleCount during the specified period. By comparing this statistic with the Minimum and Maximum values, you can determine the full scope of a metric and how close the average use is to the Minimum and Maximum values. This comparison helps you to know when to increase or decrease your resources.

  • SampleCount - The count, or number, of data points used for the statistical calculation.

" + }, + "unit":{ + "shape":"MetricUnit", + "documentation":"

The unit for the metric data request.

Valid units depend on the metric data being requested. For the valid units with each available metric, see the metricName parameter.

" + } + } + }, + "GetBucketMetricDataResult":{ + "type":"structure", + "members":{ + "metricName":{ + "shape":"BucketMetricName", + "documentation":"

The name of the metric returned.

" + }, + "metricData":{ + "shape":"MetricDatapointList", + "documentation":"

An array of objects that describe the metric data returned.

" + } + } + }, + "GetBucketsRequest":{ + "type":"structure", + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name of the bucket for which to return information.

When omitted, the response includes all of your buckets in the AWS Region where the request is made.

" + }, + "pageToken":{ + "shape":"string", + "documentation":"

The token to advance to the next page of results from your request.

To get a page token, perform an initial GetBuckets request. If your results are paginated, the response will return a next page token that you can specify as the page token in a subsequent request.

" + }, + "includeConnectedResources":{ + "shape":"boolean", + "documentation":"

A Boolean value that indicates whether to include Lightsail instances that were given access to the bucket using the SetResourceAccessForBucket action.

" + } + } + }, + "GetBucketsResult":{ + "type":"structure", + "members":{ + "buckets":{ + "shape":"BucketList", + "documentation":"

An array of objects that describe buckets.

" + }, + "nextPageToken":{ + "shape":"string", + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetBuckets request and specify the next page token using the pageToken parameter.

" + } + } + }, "GetBundlesRequest":{ "type":"structure", "members":{ @@ -6510,7 +7128,7 @@ "members":{ "distributionName":{ "shape":"ResourceName", - "documentation":"

The name of the distribution for which to return information.

Use the GetDistributions action to get a list of distribution names that you can specify.

When omitted, the response includes all of your distributions in the AWS Region where the request is made.

" + "documentation":"

The name of the distribution for which to return information.

When omitted, the response includes all of your distributions in the AWS Region where the request is made.

" }, "pageToken":{ "shape":"string", @@ -7495,6 +8113,13 @@ "type":"list", "member":{"shape":"HostKeyAttributes"} }, + "IAMAccessKeyId":{ + "type":"string", + "max":20, + "min":20, + "pattern":"^[A-Z0-9]{20}$", + "sensitive":true + }, "ImportKeyPairRequest":{ "type":"structure", "required":[ @@ -7570,7 +8195,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "blueprintId":{ "shape":"NonEmptyString", @@ -7946,7 +8571,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "state":{ "shape":"InstanceSnapshotState", @@ -8107,14 +8732,14 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "fingerprint":{ "shape":"Base64", "documentation":"

The RSA fingerprint of the key pair.

" } }, - "documentation":"

Describes the SSH key pair.

" + "documentation":"

Describes an SSH key pair.

" }, "KeyPairList":{ "type":"list", @@ -8201,7 +8826,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" } }, "documentation":"

Describes an Amazon Lightsail content delivery network (CDN) distribution.

" @@ -8235,7 +8860,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "dnsName":{ "shape":"NonEmptyString", @@ -8278,7 +8903,7 @@ "documentation":"

The IP address type of the load balancer.

The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.

" } }, - "documentation":"

Describes the Lightsail load balancer.

" + "documentation":"

Describes a load balancer.

" }, "LoadBalancerAttributeName":{ "type":"string", @@ -8360,7 +8985,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "loadBalancerName":{ "shape":"ResourceName", @@ -8948,7 +9573,14 @@ "CreateContainerServiceDeployment", "CreateContainerServiceRegistryLogin", "RegisterContainerImage", - "DeleteContainerImage" + "DeleteContainerImage", + "CreateBucket", + "DeleteBucket", + "CreateBucketAccessKey", + "DeleteBucketAccessKey", + "UpdateBucketBundle", + "UpdateBucket", + "SetResourceAccessForBucket" ] }, "Origin":{ @@ -8980,6 +9612,11 @@ "https-only" ] }, + "PartnerIdList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":10 + }, "PasswordData":{ "type":"structure", "members":{ @@ -9321,7 +9958,8 @@ "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", - "ap-northeast-2" + "ap-northeast-2", + "eu-north-1" ] }, "RegisterContainerImageRequest":{ @@ -9381,7 +10019,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "relationalDatabaseBlueprintId":{ "shape":"NonEmptyString", @@ -9698,7 +10336,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "engine":{ "shape":"NonEmptyString", @@ -9821,6 +10459,13 @@ "type":"string", "pattern":"^arn:(aws[^:]*):([a-zA-Z0-9-]+):([a-z0-9-]+):([0-9]+):([a-zA-Z]+)/([a-zA-Z0-9-]+)$" }, + "ResourceBucketAccess":{ + "type":"string", + "enum":[ + "allow", + "deny" + ] + }, "ResourceLocation":{ "type":"structure", "members":{ @@ -9843,6 +10488,20 @@ "type":"list", "member":{"shape":"ResourceName"} }, + "ResourceReceivingAccess":{ + "type":"structure", + "members":{ + "name":{ + "shape":"NonEmptyString", + "documentation":"

The name of the Lightsail instance.

" + }, + "resourceType":{ + "shape":"NonEmptyString", + "documentation":"

The Lightsail resource type (for example, Instance).

" + } + }, + "documentation":"

Describes an Amazon Lightsail instance that has access to a Lightsail bucket.

" + }, "ResourceRecord":{ "type":"structure", "members":{ @@ -9882,7 +10541,8 @@ "Alarm", "ContactMethod", "Distribution", - "Certificate" + "Certificate", + "Bucket" ] }, "RevocationReason":{"type":"string"}, @@ -9953,6 +10613,37 @@ } } }, + "SetResourceAccessForBucketRequest":{ + "type":"structure", + "required":[ + "resourceName", + "bucketName", + "access" + ], + "members":{ + "resourceName":{ + "shape":"ResourceName", + "documentation":"

The name of the Lightsail instance for which to set bucket access. The instance must be in a running or stopped state.

" + }, + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name of the bucket for which to set access to another Lightsail resource.

" + }, + "access":{ + "shape":"ResourceBucketAccess", + "documentation":"

The access setting.

The following access settings are available:

  • allow - Allows access to the bucket and its objects.

  • deny - Denies access to the bucket and its objects. Use this setting to remove access for a resource previously set to allow.

" + } + } + }, + "SetResourceAccessForBucketResult":{ + "type":"structure", + "members":{ + "operations":{ + "shape":"OperationList", + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" + } + } + }, "StartInstanceRequest":{ "type":"structure", "required":["instanceName"], @@ -10031,12 +10722,19 @@ "documentation":"

A Boolean value indicating whether the static IP is attached.

" } }, - "documentation":"

Describes the static IP.

" + "documentation":"

Describes a static IP.

" }, "StaticIpList":{ "type":"list", "member":{"shape":"StaticIp"} }, + "StatusType":{ + "type":"string", + "enum":[ + "Active", + "Inactive" + ] + }, "StopInstanceRequest":{ "type":"structure", "required":["instanceName"], @@ -10108,7 +10806,7 @@ "documentation":"

The value of the tag.

Constraints: Tag values accept a maximum of 256 letters, numbers, spaces in UTF-8, or the following characters: + - = . _ : / @

" } }, - "documentation":"

Describes a tag key and optional value assigned to an Amazon Lightsail resource.

For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + "documentation":"

Describes a tag key and optional value assigned to an Amazon Lightsail resource.

For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.

" }, "TagKey":{"type":"string"}, "TagKeyList":{ @@ -10244,6 +10942,67 @@ } } }, + "UpdateBucketBundleRequest":{ + "type":"structure", + "required":[ + "bucketName", + "bundleId" + ], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name of the bucket for which to update the bundle.

" + }, + "bundleId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the new bundle to apply to the bucket.

Use the GetBucketBundles action to get a list of bundle IDs that you can specify.

" + } + } + }, + "UpdateBucketBundleResult":{ + "type":"structure", + "members":{ + "operations":{ + "shape":"OperationList", + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" + } + } + }, + "UpdateBucketRequest":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name of the bucket to update.

" + }, + "accessRules":{ + "shape":"AccessRules", + "documentation":"

An object that sets the public accessibility of objects in the specified bucket.

" + }, + "versioning":{ + "shape":"NonEmptyString", + "documentation":"

Specifies whether to enable or suspend versioning of objects in the bucket.

The following options can be specified:

  • Enabled - Enables versioning of objects in the specified bucket.

  • Suspended - Suspends versioning of objects in the specified bucket. Existing object versions are retained.

" + }, + "readonlyAccessAccounts":{ + "shape":"PartnerIdList", + "documentation":"

An array of strings to specify the AWS account IDs that can access the bucket.

You can give a maximum of 10 AWS accounts access to a bucket.

" + } + } + }, + "UpdateBucketResult":{ + "type":"structure", + "members":{ + "bucket":{ + "shape":"Bucket", + "documentation":"

An object that describes the bucket that is updated.

" + }, + "operations":{ + "shape":"OperationList", + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" + } + } + }, "UpdateContainerServiceRequest":{ "type":"structure", "required":["serviceName"], @@ -10483,5 +11242,5 @@ "string":{"type":"string"}, "timestamp":{"type":"timestamp"} }, - "documentation":"

Amazon Lightsail is the easiest way to get started with Amazon Web Services (AWS) for developers who need to build websites or web applications. It includes everything you need to launch your project quickly - instances (virtual private servers), container services, managed databases, SSD-based block storage, static IP addresses, load balancers, content delivery network (CDN) distributions, DNS management of registered domains, and resource snapshots (backups) - for a low, predictable monthly price.

You can manage your Lightsail resources using the Lightsail console, Lightsail API, AWS Command Line Interface (AWS CLI), or SDKs. For more information about Lightsail concepts and tasks, see the Lightsail Dev Guide.

This API Reference provides detailed information about the actions, data types, parameters, and errors of the Lightsail service. For more information about the supported AWS Regions, endpoints, and service quotas of the Lightsail service, see Amazon Lightsail Endpoints and Quotas in the AWS General Reference.

" + "documentation":"

Amazon Lightsail is the easiest way to get started with Amazon Web Services (AWS) for developers who need to build websites or web applications. It includes everything you need to launch your project quickly - instances (virtual private servers), container services, storage buckets, managed databases, SSD-based block storage, static IP addresses, load balancers, content delivery network (CDN) distributions, DNS management of registered domains, and resource snapshots (backups) - for a low, predictable monthly price.

You can manage your Lightsail resources using the Lightsail console, Lightsail API, AWS Command Line Interface (AWS CLI), or SDKs. For more information about Lightsail concepts and tasks, see the Amazon Lightsail Developer Guide.

This API Reference provides detailed information about the actions, data types, parameters, and errors of the Lightsail service. For more information about the supported AWS Regions, endpoints, and service quotas of the Lightsail service, see Amazon Lightsail Endpoints and Quotas in the AWS General Reference.

" } diff --git a/services/location/pom.xml b/services/location/pom.xml index 79de2bb37c3e..62cb1c3667cb 100644 --- a/services/location/pom.xml +++ b/services/location/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT location AWS Java SDK :: Services :: Location diff --git a/services/location/src/main/resources/codegen-resources/service-2.json b/services/location/src/main/resources/codegen-resources/service-2.json index 6ed3d75d1617..906bebbb83b9 100644 --- a/services/location/src/main/resources/codegen-resources/service-2.json +++ b/services/location/src/main/resources/codegen-resources/service-2.json @@ -27,9 +27,10 @@ {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates an association between a geofence collection and a tracker resource. This allows the tracker resource to communicate location data to the linked geofence collection.

Currently not supported — Cross-account configurations, such as creating associations between a tracker resource in one account and a geofence collection in another account.

", + "documentation":"

Creates an association between a geofence collection and a tracker resource. This allows the tracker resource to communicate location data to the linked geofence collection.

You can associate up to five geofence collections to each tracker resource.

Currently not supported — Cross-account configurations, such as creating associations between a tracker resource in one account and a geofence collection in another account.

", "endpoint":{"hostPrefix":"tracking."} }, "BatchDeleteDevicePositionHistory":{ @@ -86,7 +87,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Evaluates device positions against the geofence geometries from a given geofence collection. The evaluation determines if the device has entered or exited a geofenced area, which publishes ENTER or EXIT geofence events to Amazon EventBridge.

The last geofence that a device was observed within, if any, is tracked for 30 days after the most recent device position update

", + "documentation":"

Evaluates device positions against the geofence geometries from a given geofence collection.

This operation always returns an empty response because geofences are asynchronously evaluated. The evaluation determines if the device has entered or exited a geofenced area, and then publishes one of the following events to Amazon EventBridge:

  • ENTER if Amazon Location determines that the tracked device has entered a geofenced area.

  • EXIT if Amazon Location determines that the tracked device has exited a geofenced area.

The last geofence that a device was observed within is tracked for 30 days after the most recent device position update.

", "endpoint":{"hostPrefix":"geofencing."} }, "BatchGetDevicePosition":{ @@ -105,7 +106,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

A batch request to retrieve all device positions.

", + "documentation":"

Lists the latest device positions for requested devices.

", "endpoint":{"hostPrefix":"tracking."} }, "BatchPutGeofence":{ @@ -162,7 +163,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Calculates a route given the following required parameters: DeparturePostiton and DestinationPosition. Requires that you first create aroute calculator resource

By default, a request that doesn't specify a departure time uses the best time of day to travel with the best traffic conditions when calculating the route.

Additional options include:

  • Specifying a departure time using either DepartureTime or DepartureNow. This calculates a route based on predictive traffic data at the given time.

    You can't specify both DepartureTime and DepartureNow in a single request. Specifying both parameters returns an error message.

  • Specifying a travel mode using TravelMode. This lets you specify additional route preference such as CarModeOptions if traveling by Car, or TruckModeOptions if traveling by Truck.

", + "documentation":"

Calculates a route given the following required parameters: DeparturePostiton and DestinationPosition. Requires that you first create a route calculator resource

By default, a request that doesn't specify a departure time uses the best time of day to travel with the best traffic conditions when calculating the route.

Additional options include:

  • Specifying a departure time using either DepartureTime or DepartureNow. This calculates a route based on predictive traffic data at the given time.

    You can't specify both DepartureTime and DepartureNow in a single request. Specifying both parameters returns an error message.

  • Specifying a travel mode using TravelMode. This lets you specify an additional route preference such as CarModeOptions if traveling by Car, or TruckModeOptions if traveling by Truck.

", "endpoint":{"hostPrefix":"routes."} }, "CreateGeofenceCollection":{ @@ -627,7 +628,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists the latest device positions for requested devices.

", + "documentation":"

A batch request to retrieve all device positions.

", "endpoint":{"hostPrefix":"tracking."} }, "ListGeofenceCollections":{ @@ -737,7 +738,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns the tags for the specified Amazon Location Service resource.

", + "documentation":"

Returns a list of tags that are applied to the specified Amazon Location resource.

", "endpoint":{"hostPrefix":"metadata."} }, "ListTrackerConsumers":{ @@ -851,7 +852,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Assigns one or more tags (key-value pairs) to the specified Amazon Location Service resource.

 <p>Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values.</p> <p>Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.</p> <p>You can use the <code>TagResource</code> action with an Amazon Location Service resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the tags already associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag. </p> <p>You can associate as many as 50 tags with a resource.</p> 
", + "documentation":"

Assigns one or more tags (key-value pairs) to the specified Amazon Location Service resource.

 <p>Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values.</p> <p>You can use the <code>TagResource</code> operation with an Amazon Location Service resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the tags already associated with the resource. If you specify a tag key that's already associated with the resource, the new tag value that you specify replaces the previous value for that tag. </p> <p>You can associate up to 50 tags with a resource.</p> 
", "endpoint":{"hostPrefix":"metadata."} }, "UntagResource":{ @@ -870,9 +871,109 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Removes one or more tags from the specified Amazon Location Service resource.

", + "documentation":"

Removes one or more tags from the specified Amazon Location resource.

", "endpoint":{"hostPrefix":"metadata."}, "idempotent":true + }, + "UpdateGeofenceCollection":{ + "name":"UpdateGeofenceCollection", + "http":{ + "method":"PATCH", + "requestUri":"/geofencing/v0/collections/{CollectionName}", + "responseCode":200 + }, + "input":{"shape":"UpdateGeofenceCollectionRequest"}, + "output":{"shape":"UpdateGeofenceCollectionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates the specified properties of a given geofence collection.

", + "endpoint":{"hostPrefix":"geofencing."}, + "idempotent":true + }, + "UpdateMap":{ + "name":"UpdateMap", + "http":{ + "method":"PATCH", + "requestUri":"/maps/v0/maps/{MapName}", + "responseCode":200 + }, + "input":{"shape":"UpdateMapRequest"}, + "output":{"shape":"UpdateMapResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates the specified properties of a given map resource.

", + "endpoint":{"hostPrefix":"maps."}, + "idempotent":true + }, + "UpdatePlaceIndex":{ + "name":"UpdatePlaceIndex", + "http":{ + "method":"PATCH", + "requestUri":"/places/v0/indexes/{IndexName}", + "responseCode":200 + }, + "input":{"shape":"UpdatePlaceIndexRequest"}, + "output":{"shape":"UpdatePlaceIndexResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates the specified properties of a given place index resource.

", + "endpoint":{"hostPrefix":"places."}, + "idempotent":true + }, + "UpdateRouteCalculator":{ + "name":"UpdateRouteCalculator", + "http":{ + "method":"PATCH", + "requestUri":"/routes/v0/calculators/{CalculatorName}", + "responseCode":200 + }, + "input":{"shape":"UpdateRouteCalculatorRequest"}, + "output":{"shape":"UpdateRouteCalculatorResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates the specified properties for a given route calculator resource.

", + "endpoint":{"hostPrefix":"routes."}, + "idempotent":true + }, + "UpdateTracker":{ + "name":"UpdateTracker", + "http":{ + "method":"PATCH", + "requestUri":"/tracking/v0/trackers/{TrackerName}", + "responseCode":200 + }, + "input":{"shape":"UpdateTrackerRequest"}, + "output":{"shape":"UpdateTrackerResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates the specified properties of a given tracker resource.

", + "endpoint":{"hostPrefix":"tracking."}, + "idempotent":true } }, "shapes":{ @@ -885,7 +986,7 @@ "locationName":"message" } }, - "documentation":"

The request was denied due to insufficient access or permission. Check with an administrator to verify your permissions.

", + "documentation":"

The request was denied because of insufficient access or permissions. Check with an administrator to verify your permissions.

", "error":{ "httpStatusCode":403, "senderFault":true @@ -1461,11 +1562,11 @@ "members":{ "Legs":{ "shape":"LegList", - "documentation":"

Contains details about each path between a pair of positions included along a route such as: StartPosition, EndPosition, Distance, DurationSeconds, Geometry, and Steps. The number of legs returned corresponds to one less than the total number of positions in the request.

For example, a route with a departure position and destination position returns one leg with the positions snapped to a nearby road:

  • The StartPosition is the departure position.

  • The EndPosition is the destination position.

A route with a waypoint between the departure and destination position returns two legs with the positions snapped to a nearby road.:

  • Leg 1: The StartPosition is the departure position . The EndPosition is the waypoint positon.

  • Leg 2: The StartPosition is the waypoint position. The EndPosition is the destination position.

" + "documentation":"

Contains details about each path between a pair of positions included along a route such as: StartPosition, EndPosition, Distance, DurationSeconds, Geometry, and Steps. The number of legs returned corresponds to one fewer than the total number of positions in the request.

For example, a route with a departure position and destination position returns one leg with the positions snapped to a nearby road:

  • The StartPosition is the departure position.

  • The EndPosition is the destination position.

A route with a waypoint between the departure and destination position returns two legs with the positions snapped to a nearby road:

  • Leg 1: The StartPosition is the departure position . The EndPosition is the waypoint positon.

  • Leg 2: The StartPosition is the waypoint position. The EndPosition is the destination position.

" }, "Summary":{ "shape":"CalculateRouteSummary", - "documentation":"

Contains information about the whole route, such as: RouteBBox, DataSource, Distance, DistanceUnit, and DurationSeconds

" + "documentation":"

Contains information about the whole route, such as: RouteBBox, DataSource, Distance, DistanceUnit, and DurationSeconds.

" } }, "documentation":"

Returns the result of the route calculation. Metadata includes legs and route summary.

" @@ -1498,7 +1599,7 @@ }, "RouteBBox":{ "shape":"BoundingBox", - "documentation":"

Specifies a geographical box surrounding a route. Used to zoom into a route when displaying it in a map. For example, [min x, min y, max x, max y]

The first 2 bbox parameters describe the lower southwest corner:

  • The first bbox position is the X coordinate or longitude of the lower southwest corner.

  • The second bbox position is the Y coordinate or latitude of the lower southwest corner.

The next 2 bbox parameters describe the upper northeast corner:

  • The third bbox position is the X coordinate, or longitude of the upper northeast corner.

  • The fourth bbox position is the Y coordinate, or longitude of the upper northeast corner.

" + "documentation":"

Specifies a geographical box surrounding a route. Used to zoom into a route when displaying it in a map. For example, [min x, min y, max x, max y].

The first 2 bbox parameters describe the lower southwest corner:

  • The first bbox position is the X coordinate or longitude of the lower southwest corner.

  • The second bbox position is the Y coordinate or latitude of the lower southwest corner.

The next 2 bbox parameters describe the upper northeast corner:

  • The third bbox position is the X coordinate, or longitude of the upper northeast corner.

  • The fourth bbox position is the Y coordinate, or longitude of the upper northeast corner.

" } }, "documentation":"

A summary of the calculated route.

" @@ -1544,7 +1645,7 @@ "locationName":"message" } }, - "documentation":"

The request was unsuccessful due to a conflict.

", + "documentation":"

The request was unsuccessful because of a conflict.

", "error":{ "httpStatusCode":409, "senderFault":true @@ -1660,7 +1761,7 @@ }, "MapArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for the map resource. Used when you need to specify a resource across all AWS.

  • Format example: arn:aws:geo:region:account-id:maps/ExampleMap

" + "documentation":"

The Amazon Resource Name (ARN) for the map resource. Used to specify a resource across all AWS.

  • Format example: arn:aws:geo:region:account-id:maps/ExampleMap

" }, "MapName":{ "shape":"ResourceName", @@ -1678,11 +1779,11 @@ "members":{ "DataSource":{ "shape":"String", - "documentation":"

Specifies the data provider of geospatial data.

This field is case-sensitive. Enter the valid values as shown. For example, entering HERE will return an error.

Valid values include:

  • Esri

  • Here

    Place index resources using HERE as a data provider can't be used to store results for locations in Japan. For more information, see the AWS Service Terms for Amazon Location Service.

For additional details on data providers, see the Amazon Location Service data providers page.

" + "documentation":"

Specifies the data provider of geospatial data.

This field is case-sensitive. Enter the valid values as shown. For example, entering HERE returns an error.

Valid values include:

For additional information , see Data providers on the Amazon Location Service Developer Guide.

" }, "DataSourceConfiguration":{ "shape":"DataSourceConfiguration", - "documentation":"

Specifies the data storage option for requesting Places.

" + "documentation":"

Specifies the data storage option requesting Places.

" }, "Description":{ "shape":"ResourceDescription", @@ -1716,7 +1817,7 @@ }, "IndexArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for the place index resource. Used to specify a resource across all AWS.

  • Format example: arn:aws:geo:region:account-id:place-index/ExamplePlaceIndex

" + "documentation":"

The Amazon Resource Name (ARN) for the place index resource. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:place-index/ExamplePlaceIndex

" }, "IndexName":{ "shape":"ResourceName", @@ -1738,7 +1839,7 @@ }, "DataSource":{ "shape":"String", - "documentation":"

Specifies the data provider of traffic and road network data.

This field is case-sensitive. Enter the valid values as shown. For example, entering HERE returns an error.

Valid Values: Esri | Here

For more information about data providers, see Amazon Location Service data providers.

" + "documentation":"

Specifies the data provider of traffic and road network data.

This field is case-sensitive. Enter the valid values as shown. For example, entering HERE returns an error.

Valid values include:

For additional information , see Data providers on the Amazon Location Service Developer Guide.

" }, "Description":{ "shape":"ResourceDescription", @@ -1836,10 +1937,10 @@ "members":{ "IntendedUse":{ "shape":"IntendedUse", - "documentation":"

Specifies how the results of an operation will be stored by the caller.

Valid values include:

  • SingleUse specifies that the results won't be stored.

  • Storage specifies that the result can be cached or stored in a database.

    Place index resources using HERE as a data provider can't be configured to store results for locations in Japan when choosing Storage for the IntendedUse parameter.

Default value: SingleUse

" + "documentation":"

Specifies how the results of an operation will be stored by the caller.

Valid values include:

  • SingleUse specifies that the results won't be stored.

  • Storage specifies that the result can be cached or stored in a database.

Default value: SingleUse

" } }, - "documentation":"

Specifies the data storage option chosen for requesting Places.

" + "documentation":"

Specifies the data storage option chosen for requesting Places.

When using Amazon Location Places:

  • If using HERE Technologies as a data provider, you can't store results for locations in Japan by setting IntendedUse to Storage. parameter.

  • Under the MobileAssetTracking or MobilAssetManagement pricing plan, you can't store results from your place index resources by setting IntendedUse to Storage. This returns a validation exception error.

For more information, see the AWS Service Terms for Amazon Location Service.

" }, "DeleteGeofenceCollectionRequest":{ "type":"structure", @@ -2030,7 +2131,7 @@ }, "MapArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for the map resource. Used when you need to specify a resource across all AWS.

  • Format example: arn:aws:geo:region:account-id:maps/ExampleMap

" + "documentation":"

The Amazon Resource Name (ARN) for the map resource. Used to specify a resource across all AWS.

  • Format example: arn:aws:geo:region:account-id:maps/ExampleMap

" }, "MapName":{ "shape":"ResourceName", @@ -2093,7 +2194,7 @@ }, "IndexArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for the place index resource. Used to specify a resource across all AWS.

  • Format example: arn:aws:geo:region:account-id:place-index/ExamplePlaceIndex

" + "documentation":"

The Amazon Resource Name (ARN) for the place index resource. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:place-index/ExamplePlaceIndex

" }, "IndexName":{ "shape":"ResourceName", @@ -2497,7 +2598,7 @@ "members":{ "FontStack":{ "shape":"String", - "documentation":"

A comma-separated list of fonts to load glyphs from in order of preference.. For example, Noto Sans, Arial Unicode.

", + "documentation":"

A comma-separated list of fonts to load glyphs from in order of preference. For example, Noto Sans Regular, Arial Unicode.

Valid fonts for Esri styles:

  • VectorEsriDarkGrayCanvas – Ubuntu Medium Italic | Ubuntu Medium | Ubuntu Italic | Ubuntu Regular | Ubuntu Bold

  • VectorEsriLightGrayCanvas – Ubuntu Italic | Ubuntu Regular | Ubuntu Light | Ubuntu Bold

  • VectorEsriTopographic – Noto Sans Italic | Noto Sans Regular | Noto Sans Bold | Noto Serif Regular | Roboto Condensed Light Italic

  • VectorEsriStreets – Arial Regular | Arial Italic | Arial Bold

  • VectorEsriNavigation – Arial Regular | Arial Italic | Arial Bold

Valid fonts for HERE Technologies styles:

  • VectorHereBerlinFira GO Regular | Fira GO Bold

", "location":"uri", "locationName":"FontStack" }, @@ -2735,7 +2836,7 @@ "documentation":"

Contains a list of steps, which represent subsections of a leg. Each step provides instructions for how to move to the next step in the leg such as the step's start position, end position, travel distance, travel duration, and geometry offset.

" } }, - "documentation":"

Contains the calculated route's details for each path between a pair of positions. The number of legs returned corresponds to one less than the total number of positions in the request.

For example, a route with a departure position and destination position returns one leg with the positions snapped to a nearby road:

  • The StartPosition is the departure position.

  • The EndPosition is the destination position.

A route with a waypoint between the departure and destination position returns two legs with the positions snapped to a nearby road.:

  • Leg 1: The StartPosition is the departure position . The EndPosition is the waypoint positon.

  • Leg 2: The StartPosition is the waypoint position. The EndPosition is the destination position.

" + "documentation":"

Contains the calculated route's details for each path between a pair of positions. The number of legs returned corresponds to one fewer than the total number of positions in the request.

For example, a route with a departure position and destination position returns one leg with the positions snapped to a nearby road:

  • The StartPosition is the departure position.

  • The EndPosition is the destination position.

A route with a waypoint between the departure and destination position returns two legs with the positions snapped to a nearby road:

  • Leg 1: The StartPosition is the departure position . The EndPosition is the waypoint positon.

  • Leg 2: The StartPosition is the waypoint position. The EndPosition is the destination position.

" }, "LegDistanceDouble":{ "type":"double", @@ -3215,7 +3316,7 @@ "members":{ "ResourceArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the resource whose tags you want to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the resource whose tags you want to retrieve.

  • Format example: arn:aws:geo:region:account-id:resourcetype/ExampleResource

", "location":"uri", "locationName":"ResourceArn" } @@ -3226,7 +3327,7 @@ "members":{ "Tags":{ "shape":"TagMap", - "documentation":"

The mapping from tag key to tag value for each tag associated with the specified resource.

" + "documentation":"

Tags that have been applied to the specified resource. Tags are mapped from the tag key to the tag value: \"TagKey\" : \"TagValue\".

  • Format example: {\"tag1\" : \"value1\", \"tag2\" : \"value2\"}

" } } }, @@ -3350,7 +3451,7 @@ "members":{ "Style":{ "shape":"MapStyle", - "documentation":"

Specifies the map style selected from an available data provider.

Valid styles: RasterEsriImagery, VectorEsriStreets, VectorEsriTopographic, VectorEsriNavigation, VectorEsriDarkGrayCanvas, VectorEsriLightGrayCanvas, VectorHereBerlin.

When using HERE as your data provider, and selecting the Style VectorHereBerlin, you may not use HERE Maps for Asset Management. See the AWS Service Terms for Amazon Location Service.

" + "documentation":"

Specifies the map style selected from an available data provider. For additional information on each map style and to preview each map style, see Esri map styles and HERE map styles.

Valid Esri styles:

  • VectorEsriDarkGrayCanvas – The Esri Dark Gray Canvas map style. A vector basemap with a dark gray, neutral background with minimal colors, labels, and features that's designed to draw attention to your thematic content.

  • RasterEsriImagery – The Esri Imagery map style. A raster basemap that provides one meter or better satellite and aerial imagery in many parts of the world and lower resolution satellite imagery worldwide.

  • VectorEsriLightGrayCanvas – The Esri Light Gray Canvas map style, which provides a detailed vector basemap with a light gray, neutral background style with minimal colors, labels, and features that's designed to draw attention to your thematic content.

  • VectorEsriTopographic – The Esri Light map style, which provides a detailed vector basemap with a classic Esri map style.

  • VectorEsriStreets – The Esri World Streets map style, which provides a detailed vector basemap for the world symbolized with a classic Esri street map style. The vector tile layer is similar in content and style to the World Street Map raster map.

  • VectorEsriNavigation – The Esri World Navigation map style, which provides a detailed basemap for the world symbolized with a custom navigation map style that's designed for use during the day in mobile devices.

Valid HERE Technologies styles:

  • VectorHereBerlin – The HERE Berlin map style is a high contrast detailed base map of the world that blends 3D and 2D rendering.

    When using HERE as your data provider, and selecting the Style VectorHereBerlin, you may not use HERE Technologies maps for Asset Management. See the AWS Service Terms for Amazon Location Service.

" } }, "documentation":"

Specifies the map tile style selected from an available provider.

" @@ -3692,6 +3793,23 @@ }, "documentation":"

A summary of the geocoding request sent using SearchPlaceIndexForText.

" }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

A message with the reason for the service quota exceeded exception error.

", + "locationName":"message" + } + }, + "documentation":"

The operation was denied because the request would exceed the maximum quota set for Amazon Location Service.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, "Step":{ "type":"structure", "required":[ @@ -3782,13 +3900,13 @@ "members":{ "ResourceArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the resource whose tags you want to update.

", + "documentation":"

The Amazon Resource Name (ARN) of the resource whose tags you want to update.

  • Format example: arn:aws:geo:region:account-id:resourcetype/ExampleResource

", "location":"uri", "locationName":"ResourceArn" }, "Tags":{ "shape":"TagMap", - "documentation":"

The mapping from tag key to tag value for each tag associated with the specified resource.

" + "documentation":"

Tags that have been applied to the specified resource. Tags are mapped from the tag key to the tag value: \"TagKey\" : \"TagValue\".

  • Format example: {\"tag1\" : \"value1\", \"tag2\" : \"value2\"}

" } } }, @@ -3812,7 +3930,7 @@ "locationName":"message" } }, - "documentation":"

The request was denied due to request throttling.

", + "documentation":"

The request was denied because of request throttling.

", "error":{ "httpStatusCode":429, "senderFault":true @@ -3902,13 +4020,13 @@ "members":{ "ResourceArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the resource from which you want to remove tags.

", + "documentation":"

The Amazon Resource Name (ARN) of the resource from which you want to remove tags.

  • Format example: arn:aws:geo:region:account-id:resourcetype/ExampleResource

", "location":"uri", "locationName":"ResourceArn" }, "TagKeys":{ "shape":"TagKeys", - "documentation":"

The list of tag keys to remove from the resource.

", + "documentation":"

The list of tag keys to remove from the specified resource.

", "location":"querystring", "locationName":"tagKeys" } @@ -3919,6 +4037,228 @@ "members":{ } }, + "UpdateGeofenceCollectionRequest":{ + "type":"structure", + "required":["CollectionName"], + "members":{ + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

The name of the geofence collection to update.

", + "location":"uri", + "locationName":"CollectionName" + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

Updates the description for the geofence collection.

" + }, + "PricingPlan":{ + "shape":"PricingPlan", + "documentation":"

Updates the pricing plan for the geofence collection.

For more information about each pricing plan option restrictions, see Amazon Location Service pricing.

" + }, + "PricingPlanDataSource":{ + "shape":"String", + "documentation":"

Updates the data provider for the geofence collection.

A required value for the following pricing plans: MobileAssetTracking| MobileAssetManagement

For more information about data providers and pricing plans, see the Amazon Location Service product page.

This can only be updated when updating the PricingPlan in the same request.

Amazon Location Service uses PricingPlanDataSource to calculate billing for your geofence collection. Your data won't be shared with the data provider, and will remain in your AWS account and Region unless you move it.

" + } + } + }, + "UpdateGeofenceCollectionResponse":{ + "type":"structure", + "required":[ + "CollectionArn", + "CollectionName", + "UpdateTime" + ], + "members":{ + "CollectionArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the updated geofence collection. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollection

" + }, + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

The name of the updated geofence collection.

" + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

The time when the geofence collection was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

" + } + } + }, + "UpdateMapRequest":{ + "type":"structure", + "required":["MapName"], + "members":{ + "Description":{ + "shape":"ResourceDescription", + "documentation":"

Updates the description for the map resource.

" + }, + "MapName":{ + "shape":"ResourceName", + "documentation":"

The name of the map resource to update.

", + "location":"uri", + "locationName":"MapName" + }, + "PricingPlan":{ + "shape":"PricingPlan", + "documentation":"

Updates the pricing plan for the map resource.

For more information about each pricing plan option restrictions, see Amazon Location Service pricing.

" + } + } + }, + "UpdateMapResponse":{ + "type":"structure", + "required":[ + "MapArn", + "MapName", + "UpdateTime" + ], + "members":{ + "MapArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the updated map resource. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:maps/ExampleMap

" + }, + "MapName":{ + "shape":"ResourceName", + "documentation":"

The name of the updated map resource.

" + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the map resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + } + } + }, + "UpdatePlaceIndexRequest":{ + "type":"structure", + "required":["IndexName"], + "members":{ + "DataSourceConfiguration":{ + "shape":"DataSourceConfiguration", + "documentation":"

Updates the data storage option for the place index resource.

" + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

Updates the description for the place index resource.

" + }, + "IndexName":{ + "shape":"ResourceName", + "documentation":"

The name of the place index resource to update.

", + "location":"uri", + "locationName":"IndexName" + }, + "PricingPlan":{ + "shape":"PricingPlan", + "documentation":"

Updates the pricing plan for the place index resource.

For more information about each pricing plan option restrictions, see Amazon Location Service pricing.

" + } + } + }, + "UpdatePlaceIndexResponse":{ + "type":"structure", + "required":[ + "IndexArn", + "IndexName", + "UpdateTime" + ], + "members":{ + "IndexArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the upated place index resource. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:place- index/ExamplePlaceIndex

" + }, + "IndexName":{ + "shape":"ResourceName", + "documentation":"

The name of the updated place index resource.

" + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the place index resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + } + } + }, + "UpdateRouteCalculatorRequest":{ + "type":"structure", + "required":["CalculatorName"], + "members":{ + "CalculatorName":{ + "shape":"ResourceName", + "documentation":"

The name of the route calculator resource to update.

", + "location":"uri", + "locationName":"CalculatorName" + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

Updates the description for the route calculator resource.

" + }, + "PricingPlan":{ + "shape":"PricingPlan", + "documentation":"

Updates the pricing plan for the route calculator resource.

For more information about each pricing plan option restrictions, see Amazon Location Service pricing.

" + } + } + }, + "UpdateRouteCalculatorResponse":{ + "type":"structure", + "required":[ + "CalculatorArn", + "CalculatorName", + "UpdateTime" + ], + "members":{ + "CalculatorArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the updated route calculator resource. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:route- calculator/ExampleCalculator

" + }, + "CalculatorName":{ + "shape":"ResourceName", + "documentation":"

The name of the updated route calculator resource.

" + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the route calculator was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + } + } + }, + "UpdateTrackerRequest":{ + "type":"structure", + "required":["TrackerName"], + "members":{ + "Description":{ + "shape":"ResourceDescription", + "documentation":"

Updates the description for the tracker resource.

" + }, + "PricingPlan":{ + "shape":"PricingPlan", + "documentation":"

Updates the pricing plan for the tracker resource.

For more information about each pricing plan option restrictions, see Amazon Location Service pricing.

" + }, + "PricingPlanDataSource":{ + "shape":"String", + "documentation":"

Updates the data provider for the tracker resource.

A required value for the following pricing plans: MobileAssetTracking| MobileAssetManagement

For more information about data providers and pricing plans, see the Amazon Location Service product page

This can only be updated when updating the PricingPlan in the same request.

Amazon Location Service uses PricingPlanDataSource to calculate billing for your tracker resource. Your data won't be shared with the data provider, and will remain in your AWS account and Region unless you move it.

" + }, + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

The name of the tracker resource to update.

", + "location":"uri", + "locationName":"TrackerName" + } + } + }, + "UpdateTrackerResponse":{ + "type":"structure", + "required":[ + "TrackerArn", + "TrackerName", + "UpdateTime" + ], + "members":{ + "TrackerArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the updated tracker resource. Used to specify a resource across AWS.

  • Format example: arn:aws:geo:region:account-id:tracker/ExampleTracker

" + }, + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

The name of the updated tracker resource.

" + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp for when the tracker resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

" + } + } + }, "ValidationException":{ "type":"structure", "required":[ diff --git a/services/lookoutequipment/pom.xml b/services/lookoutequipment/pom.xml index 02a9b6fe74ee..98776e868316 100644 --- a/services/lookoutequipment/pom.xml +++ b/services/lookoutequipment/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT lookoutequipment AWS Java SDK :: Services :: Lookout Equipment diff --git a/services/lookoutmetrics/pom.xml b/services/lookoutmetrics/pom.xml index 38c53acb92d5..16306462fa8b 100644 --- a/services/lookoutmetrics/pom.xml +++ b/services/lookoutmetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT lookoutmetrics AWS Java SDK :: Services :: Lookout Metrics diff --git a/services/lookoutvision/pom.xml b/services/lookoutvision/pom.xml index 058335e7cd66..db8fdc7acef6 100644 --- a/services/lookoutvision/pom.xml +++ b/services/lookoutvision/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT lookoutvision AWS Java SDK :: Services :: Lookout Vision diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index f0fdbb68bfcd..ec49b5c0d3c6 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT machinelearning AWS Java SDK :: Services :: Amazon Machine Learning diff --git a/services/macie/pom.xml b/services/macie/pom.xml index 9e62565100ac..661934b3876e 100644 --- a/services/macie/pom.xml +++ b/services/macie/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT macie AWS Java SDK :: Services :: Macie diff --git a/services/macie2/pom.xml b/services/macie2/pom.xml index 5ab7252473d3..55775827effb 100644 --- a/services/macie2/pom.xml +++ b/services/macie2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT macie2 AWS Java SDK :: Services :: Macie2 diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index 8381374c8312..1d4b339c17c6 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT managedblockchain AWS Java SDK :: Services :: ManagedBlockchain diff --git a/services/marketplacecatalog/pom.xml b/services/marketplacecatalog/pom.xml index 0bd7cdcfe7c2..bb0e753e4a4a 100644 --- a/services/marketplacecatalog/pom.xml +++ b/services/marketplacecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT marketplacecatalog AWS Java SDK :: Services :: Marketplace Catalog diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index 290941101c8d..a1809be00d1c 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT marketplacecommerceanalytics AWS Java SDK :: Services :: AWS Marketplace Commerce Analytics diff --git a/services/marketplaceentitlement/pom.xml b/services/marketplaceentitlement/pom.xml index 781cc1372291..b0844e9085ca 100644 --- a/services/marketplaceentitlement/pom.xml +++ b/services/marketplaceentitlement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT marketplaceentitlement AWS Java SDK :: Services :: AWS Marketplace Entitlement diff --git a/services/marketplacemetering/pom.xml b/services/marketplacemetering/pom.xml index bfe6c18e5551..1833aede1a22 100644 --- a/services/marketplacemetering/pom.xml +++ b/services/marketplacemetering/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT marketplacemetering AWS Java SDK :: Services :: AWS Marketplace Metering Service diff --git a/services/mediaconnect/pom.xml b/services/mediaconnect/pom.xml index 8fe58e1289f2..e2689e78fae7 100644 --- a/services/mediaconnect/pom.xml +++ b/services/mediaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT mediaconnect AWS Java SDK :: Services :: MediaConnect diff --git a/services/mediaconvert/pom.xml b/services/mediaconvert/pom.xml index 4b6bce823419..06f8285e8513 100644 --- a/services/mediaconvert/pom.xml +++ b/services/mediaconvert/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 mediaconvert diff --git a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json index a86be86e3ad1..d437e8d0b719 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json @@ -3064,6 +3064,14 @@ "RAW" ] }, + "CopyProtectionAction": { + "type": "string", + "documentation": "The action to take on copy and redistribution control XDS packets. If you select PASSTHROUGH, packets will not be changed. If you select STRIP, any packets will be removed in output captions.", + "enum": [ + "PASSTHROUGH", + "STRIP" + ] + }, "CreateJobRequest": { "type": "structure", "members": { @@ -4553,6 +4561,22 @@ } } }, + "ExtendedDataServices": { + "type": "structure", + "members": { + "CopyProtectionAction": { + "shape": "CopyProtectionAction", + "locationName": "copyProtectionAction", + "documentation": "The action to take on copy and redistribution control XDS packets. If you select PASSTHROUGH, packets will not be changed. If you select STRIP, any packets will be removed in output captions." + }, + "VchipAction": { + "shape": "VchipAction", + "locationName": "vchipAction", + "documentation": "The action to take on content advisory XDS packets. If you select PASSTHROUGH, packets will not be changed. If you select STRIP, any packets will be removed in output captions." + } + }, + "documentation": "Hexadecimal value as per EIA-608 Line 21 Data Services, section 9.5.1.5 05h Content Advisory." + }, "F4vMoovPlacement": { "type": "string", "documentation": "If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning of the archive as required for progressive downloading. Otherwise it is placed normally at the end.", @@ -6866,6 +6890,11 @@ "locationName": "esam", "documentation": "Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion, you can ignore these settings." }, + "ExtendedDataServices": { + "shape": "ExtendedDataServices", + "locationName": "extendedDataServices", + "documentation": "Hexadecimal value as per EIA-608 Line 21 Data Services, section 9.5.1.5 05h Content Advisory." + }, "Inputs": { "shape": "__listOfInput", "locationName": "inputs", @@ -7022,6 +7051,11 @@ "locationName": "esam", "documentation": "Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion, you can ignore these settings." }, + "ExtendedDataServices": { + "shape": "ExtendedDataServices", + "locationName": "extendedDataServices", + "documentation": "Hexadecimal value as per EIA-608 Line 21 Data Services, section 9.5.1.5 05h Content Advisory." + }, "Inputs": { "shape": "__listOfInputTemplate", "locationName": "inputs", @@ -10514,6 +10548,14 @@ "HARD" ] }, + "VchipAction": { + "type": "string", + "documentation": "The action to take on content advisory XDS packets. If you select PASSTHROUGH, packets will not be changed. If you select STRIP, any packets will be removed in output captions.", + "enum": [ + "PASSTHROUGH", + "STRIP" + ] + }, "VideoCodec": { "type": "string", "documentation": "Type of video codec", diff --git a/services/medialive/pom.xml b/services/medialive/pom.xml index 72a172405543..486de5b78a9a 100644 --- a/services/medialive/pom.xml +++ b/services/medialive/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 medialive diff --git a/services/medialive/src/main/resources/codegen-resources/service-2.json b/services/medialive/src/main/resources/codegen-resources/service-2.json index 434030df224e..6159bafacd49 100644 --- a/services/medialive/src/main/resources/codegen-resources/service-2.json +++ b/services/medialive/src/main/resources/codegen-resources/service-2.json @@ -3112,6 +3112,26 @@ "USE_CONFIGURED" ] }, + "AudioHlsRenditionSelection": { + "type": "structure", + "members": { + "GroupId": { + "shape": "__stringMin1", + "locationName": "groupId", + "documentation": "Specifies the GROUP-ID in the #EXT-X-MEDIA tag of the target HLS audio rendition." + }, + "Name": { + "shape": "__stringMin1", + "locationName": "name", + "documentation": "Specifies the NAME in the #EXT-X-MEDIA tag of the target HLS audio rendition." + } + }, + "documentation": "Audio Hls Rendition Selection", + "required": [ + "Name", + "GroupId" + ] + }, "AudioLanguageSelection": { "type": "structure", "members": { @@ -3255,6 +3275,10 @@ "AudioSelectorSettings": { "type": "structure", "members": { + "AudioHlsRenditionSelection": { + "shape": "AudioHlsRenditionSelection", + "locationName": "audioHlsRenditionSelection" + }, "AudioLanguageSelection": { "shape": "AudioLanguageSelection", "locationName": "audioLanguageSelection" @@ -15161,9 +15185,22 @@ "WebvttDestinationSettings": { "type": "structure", "members": { + "StyleControl": { + "shape": "WebvttDestinationStyleControl", + "locationName": "styleControl", + "documentation": "Controls whether the color and position of the source captions is passed through to the WebVTT output captions. PASSTHROUGH - Valid only if the source captions are EMBEDDED or TELETEXT. NO_STYLE_DATA - Don't pass through the style. The output captions will not contain any font styling information." + } }, "documentation": "Webvtt Destination Settings" }, + "WebvttDestinationStyleControl": { + "type": "string", + "documentation": "Webvtt Destination Style Control", + "enum": [ + "NO_STYLE_DATA", + "PASSTHROUGH" + ] + }, "__boolean": { "type": "boolean", "documentation": "Placeholder documentation for __boolean" diff --git a/services/mediapackage/pom.xml b/services/mediapackage/pom.xml index 63618560c7c4..16d4ae4f1179 100644 --- a/services/mediapackage/pom.xml +++ b/services/mediapackage/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 mediapackage diff --git a/services/mediapackagevod/pom.xml b/services/mediapackagevod/pom.xml index 1f659194c002..ff5b16b23f2f 100644 --- a/services/mediapackagevod/pom.xml +++ b/services/mediapackagevod/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT mediapackagevod AWS Java SDK :: Services :: MediaPackage Vod diff --git a/services/mediastore/pom.xml b/services/mediastore/pom.xml index ac259b6f3981..62a63ac9ca20 100644 --- a/services/mediastore/pom.xml +++ b/services/mediastore/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 mediastore diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index 8e50a48591dd..e356cd83f31d 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 mediastoredata diff --git a/services/mediatailor/pom.xml b/services/mediatailor/pom.xml index 1f791e9d196f..b82011bdf25f 100644 --- a/services/mediatailor/pom.xml +++ b/services/mediatailor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT mediatailor AWS Java SDK :: Services :: MediaTailor diff --git a/services/mgn/pom.xml b/services/mgn/pom.xml index 8b5021a4bb64..02ddfd3415f3 100644 --- a/services/mgn/pom.xml +++ b/services/mgn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT mgn AWS Java SDK :: Services :: Mgn diff --git a/services/migrationhub/pom.xml b/services/migrationhub/pom.xml index 11c23e6ae388..091fd82eeb5b 100644 --- a/services/migrationhub/pom.xml +++ b/services/migrationhub/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 migrationhub diff --git a/services/migrationhubconfig/pom.xml b/services/migrationhubconfig/pom.xml index cbe2efe88c3f..7f1488a41977 100644 --- a/services/migrationhubconfig/pom.xml +++ b/services/migrationhubconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT migrationhubconfig AWS Java SDK :: Services :: MigrationHub Config diff --git a/services/mobile/pom.xml b/services/mobile/pom.xml index 7bc9ec85b672..9151654edafc 100644 --- a/services/mobile/pom.xml +++ b/services/mobile/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 mobile diff --git a/services/mq/pom.xml b/services/mq/pom.xml index 3c8ec72534df..4307ea08ef7b 100644 --- a/services/mq/pom.xml +++ b/services/mq/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 mq diff --git a/services/mturk/pom.xml b/services/mturk/pom.xml index d95a4b4502a2..56d70b2d9d19 100644 --- a/services/mturk/pom.xml +++ b/services/mturk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT mturk AWS Java SDK :: Services :: Amazon Mechanical Turk Requester diff --git a/services/mwaa/pom.xml b/services/mwaa/pom.xml index 82c8023bef49..b35b71a71f24 100644 --- a/services/mwaa/pom.xml +++ b/services/mwaa/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT mwaa AWS Java SDK :: Services :: MWAA diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index 6efbda66a86e..c9782a3559a8 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT neptune AWS Java SDK :: Services :: Neptune diff --git a/services/networkfirewall/pom.xml b/services/networkfirewall/pom.xml index 5842fd7c667f..1f3052ac28c8 100644 --- a/services/networkfirewall/pom.xml +++ b/services/networkfirewall/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT networkfirewall AWS Java SDK :: Services :: Network Firewall diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml index 51fd22460536..b5af11d47c25 100644 --- a/services/networkmanager/pom.xml +++ b/services/networkmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT networkmanager AWS Java SDK :: Services :: NetworkManager diff --git a/services/nimble/pom.xml b/services/nimble/pom.xml index 7ed0fa7600d8..0ba436524e87 100644 --- a/services/nimble/pom.xml +++ b/services/nimble/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT nimble AWS Java SDK :: Services :: Nimble diff --git a/services/opsworks/pom.xml b/services/opsworks/pom.xml index 5aa5f3c6c8ed..0ad1e75e3f50 100644 --- a/services/opsworks/pom.xml +++ b/services/opsworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT opsworks AWS Java SDK :: Services :: AWS OpsWorks diff --git a/services/opsworkscm/pom.xml b/services/opsworkscm/pom.xml index 477c67f56aec..204c955b3c67 100644 --- a/services/opsworkscm/pom.xml +++ b/services/opsworkscm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT opsworkscm AWS Java SDK :: Services :: AWS OpsWorks for Chef Automate diff --git a/services/organizations/pom.xml b/services/organizations/pom.xml index 1ecbb6d59f27..177645bf406b 100644 --- a/services/organizations/pom.xml +++ b/services/organizations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT organizations AWS Java SDK :: Services :: AWS Organizations diff --git a/services/outposts/pom.xml b/services/outposts/pom.xml index 3b20d20dda52..7fa960ba685e 100644 --- a/services/outposts/pom.xml +++ b/services/outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT outposts AWS Java SDK :: Services :: Outposts diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index dbca08ccf4c4..d0e726e43ac3 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT personalize AWS Java SDK :: Services :: Personalize diff --git a/services/personalize/src/main/resources/codegen-resources/service-2.json b/services/personalize/src/main/resources/codegen-resources/service-2.json index 69802bd99937..bdfd34eff7ed 100644 --- a/services/personalize/src/main/resources/codegen-resources/service-2.json +++ b/services/personalize/src/main/resources/codegen-resources/service-2.json @@ -81,7 +81,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Creates a job that exports data from your dataset to an Amazon S3 bucket. To allow Amazon Personalize to export the training data, you must specify an service-linked AWS Identity and Access Management (IAM) role that gives Amazon Personalize PutObject permissions for your Amazon S3 bucket. For information, see Exporting a dataset in the Amazon Personalize developer guide.

Status

A dataset export job can be in one of the following states:

  • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

To get the status of the export job, call DescribeDatasetExportJob, and specify the Amazon Resource Name (ARN) of the dataset export job. The dataset export is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the job failed.

", + "documentation":"

Creates a job that exports data from your dataset to an Amazon S3 bucket. To allow Amazon Personalize to export the training data, you must specify an service-linked IAM role that gives Amazon Personalize PutObject permissions for your Amazon S3 bucket. For information, see Exporting a dataset in the Amazon Personalize developer guide.

Status

A dataset export job can be in one of the following states:

  • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

To get the status of the export job, call DescribeDatasetExportJob, and specify the Amazon Resource Name (ARN) of the dataset export job. The dataset export is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the job failed.

", "idempotent":true }, "CreateDatasetGroup":{ @@ -97,7 +97,7 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates an empty dataset group. A dataset group contains related datasets that supply data for training a model. A dataset group can contain at most three datasets, one for each type of dataset:

  • Interactions

  • Items

  • Users

To train a model (create a solution), a dataset group that contains an Interactions dataset is required. Call CreateDataset to add a dataset to the group.

A dataset group can be in one of the following states:

  • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

  • DELETE PENDING

To get the status of the dataset group, call DescribeDatasetGroup. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the creation failed.

You must wait until the status of the dataset group is ACTIVE before adding a dataset to the group.

You can specify an AWS Key Management Service (KMS) key to encrypt the datasets in the group. If you specify a KMS key, you must also include an AWS Identity and Access Management (IAM) role that has permission to access the key.

APIs that require a dataset group ARN in the request

Related APIs

" + "documentation":"

Creates an empty dataset group. A dataset group contains related datasets that supply data for training a model. A dataset group can contain at most three datasets, one for each type of dataset:

  • Interactions

  • Items

  • Users

To train a model (create a solution), a dataset group that contains an Interactions dataset is required. Call CreateDataset to add a dataset to the group.

A dataset group can be in one of the following states:

  • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

  • DELETE PENDING

To get the status of the dataset group, call DescribeDatasetGroup. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the creation failed.

You must wait until the status of the dataset group is ACTIVE before adding a dataset to the group.

You can specify an Key Management Service (KMS) key to encrypt the datasets in the group. If you specify a KMS key, you must also include an Identity and Access Management (IAM) role that has permission to access the key.

APIs that require a dataset group ARN in the request

Related APIs

" }, "CreateDatasetImportJob":{ "name":"CreateDatasetImportJob", @@ -114,7 +114,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Creates a job that imports training data from your data source (an Amazon S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize to import the training data, you must specify an AWS Identity and Access Management (IAM) service role that has permission to read from the data source, as Amazon Personalize makes a copy of your data and processes it in an internal AWS system. For information on granting access to your Amazon S3 bucket, see Giving Amazon Personalize Access to Amazon S3 Resources.

The dataset import job replaces any existing data in the dataset that you imported in bulk.

Status

A dataset import job can be in one of the following states:

  • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

To get the status of the import job, call DescribeDatasetImportJob, providing the Amazon Resource Name (ARN) of the dataset import job. The dataset import is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the job failed.

Importing takes time. You must wait until the status shows as ACTIVE before training a model using the dataset.

Related APIs

" + "documentation":"

Creates a job that imports training data from your data source (an Amazon S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize to import the training data, you must specify an IAM service role that has permission to read from the data source, as Amazon Personalize makes a copy of your data and processes it internally. For information on granting access to your Amazon S3 bucket, see Giving Amazon Personalize Access to Amazon S3 Resources.

The dataset import job replaces any existing data in the dataset that you imported in bulk.

Status

A dataset import job can be in one of the following states:

  • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

To get the status of the import job, call DescribeDatasetImportJob, providing the Amazon Resource Name (ARN) of the dataset import job. The dataset import is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the job failed.

Importing takes time. You must wait until the status shows as ACTIVE before training a model using the dataset.

Related APIs

" }, "CreateEventTracker":{ "name":"CreateEventTracker", @@ -1207,8 +1207,7 @@ "type":"structure", "required":[ "name", - "solutionVersionArn", - "minProvisionedTPS" + "solutionVersionArn" ], "members":{ "name":{ @@ -1261,7 +1260,7 @@ }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management service role that has permissions to add data to your output Amazon S3 bucket.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that has permissions to add data to your output Amazon S3 bucket.

" }, "jobOutput":{ "shape":"DatasetExportJobOutput", @@ -1288,11 +1287,11 @@ }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the IAM role that has permissions to access the KMS key. Supplying an IAM role is only valid when also specifying a KMS key.

" + "documentation":"

The ARN of the Identity and Access Management (IAM) role that has permissions to access the Key Management Service (KMS) key. Supplying an IAM role is only valid when also specifying a KMS key.

" }, "kmsKeyArn":{ "shape":"KmsKeyArn", - "documentation":"

The Amazon Resource Name (ARN) of a KMS key used to encrypt the datasets.

" + "documentation":"

The Amazon Resource Name (ARN) of a Key Management Service (KMS) key used to encrypt the datasets.

" } } }, @@ -1602,7 +1601,7 @@ }, "roleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management service role that has permissions to add data to your output Amazon S3 bucket.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that has permissions to add data to your output Amazon S3 bucket.

" }, "status":{ "shape":"Status", @@ -1691,7 +1690,7 @@ }, "kmsKeyArn":{ "shape":"KmsKeyArn", - "documentation":"

The Amazon Resource Name (ARN) of the KMS key used to encrypt the datasets.

" + "documentation":"

The Amazon Resource Name (ARN) of the Key Management Service (KMS) key used to encrypt the datasets.

" }, "creationDateTime":{ "shape":"Date", @@ -1706,7 +1705,7 @@ "documentation":"

If creating a dataset group fails, provides the reason why.

" } }, - "documentation":"

A dataset group is a collection of related datasets (Interactions, User, and Item). You create a dataset group by calling CreateDatasetGroup. You then create a dataset and add it to a dataset group by calling CreateDataset. The dataset group is used to create and train a solution by calling CreateSolution. A dataset group can contain only one of each type of dataset.

You can specify an AWS Key Management Service (KMS) key to encrypt the datasets in the group.

" + "documentation":"

A dataset group is a collection of related datasets (Interactions, User, and Item). You create a dataset group by calling CreateDatasetGroup. You then create a dataset and add it to a dataset group by calling CreateDataset. The dataset group is used to create and train a solution by calling CreateSolution. A dataset group can contain only one of each type of dataset.

You can specify an Key Management Service (KMS) key to encrypt the datasets in the group.

" }, "DatasetGroupSummary":{ "type":"structure", @@ -1764,7 +1763,7 @@ }, "roleArn":{ "shape":"Arn", - "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that has permissions to read from the Amazon S3 data source.

" + "documentation":"

The ARN of the IAM role that has permissions to read from the Amazon S3 data source.

" }, "status":{ "shape":"Status", @@ -2358,7 +2357,7 @@ }, "accountId":{ "shape":"AccountId", - "documentation":"

The Amazon AWS account that owns the event tracker.

" + "documentation":"

The Amazon Web Services account that owns the event tracker.

" }, "trackingId":{ "shape":"TrackingId", @@ -3267,7 +3266,7 @@ }, "kmsKeyArn":{ "shape":"KmsKeyArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon Key Management Service (KMS) key that Amazon Personalize uses to encrypt or decrypt the input and output files of a batch inference job.

" + "documentation":"

The Amazon Resource Name (ARN) of the Key Management Service (KMS) key that Amazon Personalize uses to encrypt or decrypt the input and output files of a batch inference job.

" } }, "documentation":"

The configuration details of an Amazon S3 input or output bucket.

" diff --git a/services/personalizeevents/pom.xml b/services/personalizeevents/pom.xml index 5cf95aa10345..b53dcd7c9b4c 100644 --- a/services/personalizeevents/pom.xml +++ b/services/personalizeevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT personalizeevents AWS Java SDK :: Services :: Personalize Events diff --git a/services/personalizeruntime/pom.xml b/services/personalizeruntime/pom.xml index 4fad52c2a13f..5593e74a660d 100644 --- a/services/personalizeruntime/pom.xml +++ b/services/personalizeruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT personalizeruntime AWS Java SDK :: Services :: Personalize Runtime diff --git a/services/pi/pom.xml b/services/pi/pom.xml index be34fe0fe3e8..181e0b91064f 100644 --- a/services/pi/pom.xml +++ b/services/pi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT pi AWS Java SDK :: Services :: PI diff --git a/services/pinpoint/pom.xml b/services/pinpoint/pom.xml index dcbef291c33e..69836172ec82 100644 --- a/services/pinpoint/pom.xml +++ b/services/pinpoint/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT pinpoint AWS Java SDK :: Services :: Amazon Pinpoint diff --git a/services/pinpointemail/pom.xml b/services/pinpointemail/pom.xml index 8e11f2c68ea9..df7aa151b62e 100644 --- a/services/pinpointemail/pom.xml +++ b/services/pinpointemail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT pinpointemail AWS Java SDK :: Services :: Pinpoint Email diff --git a/services/pinpointsmsvoice/pom.xml b/services/pinpointsmsvoice/pom.xml index 6304ad907fef..f96332f63c2e 100644 --- a/services/pinpointsmsvoice/pom.xml +++ b/services/pinpointsmsvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT pinpointsmsvoice AWS Java SDK :: Services :: Pinpoint SMS Voice diff --git a/services/polly/pom.xml b/services/polly/pom.xml index 9e37c85ea5f3..5654f560ab7c 100644 --- a/services/polly/pom.xml +++ b/services/polly/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT polly AWS Java SDK :: Services :: Amazon Polly diff --git a/services/pom.xml b/services/pom.xml index 2756106e3566..b4e8066168f8 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT services AWS Java SDK :: Services @@ -290,6 +290,11 @@ applicationcostprofiler apprunner proton + route53recoveryreadiness + route53recoverycontrolconfig + route53recoverycluster + chimesdkmessaging + chimesdkidentity The AWS Java SDK services https://aws.amazon.com/sdkforjava diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index 9551b18297dd..e04c27b9e0ad 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 pricing diff --git a/services/pricing/src/main/resources/codegen-resources/service-2.json b/services/pricing/src/main/resources/codegen-resources/service-2.json index 42a82e015f4d..a8cd59842b55 100644 --- a/services/pricing/src/main/resources/codegen-resources/service-2.json +++ b/services/pricing/src/main/resources/codegen-resources/service-2.json @@ -46,7 +46,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"ExpiredNextTokenException"} ], - "documentation":"

Returns a list of attribute values. Attibutes are similar to the details in a Price List API offer file. For a list of available attributes, see Offer File Definitions in the AWS Billing and Cost Management User Guide.

" + "documentation":"

Returns a list of attribute values. Attibutes are similar to the details in a Price List API offer file. For a list of available attributes, see Offer File Definitions in the Amazon Web Services Billing and Cost Management User Guide.

" }, "GetProducts":{ "name":"GetProducts", @@ -295,7 +295,7 @@ "members":{ "ServiceCode":{ "shape":"String", - "documentation":"

The code for the AWS service.

" + "documentation":"

The code for the Amazon Web Services service.

" }, "AttributeNames":{ "shape":"AttributeNameList", @@ -311,5 +311,5 @@ "String":{"type":"string"}, "errorMessage":{"type":"string"} }, - "documentation":"

AWS Price List Service API (AWS Price List Service) is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The AWS Price List Service uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the AWS Price List Service to build cost control and scenario planning tools, reconcile billing data, forecast future spend for budgeting purposes, and provide cost benefit analysis that compare your internal workloads with AWS.

Use GetServices without a service code to retrieve the service codes for all AWS services, then GetServices with a service code to retreive the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType.

Service Endpoint

AWS Price List Service API provides the following two endpoints:

  • https://api.pricing.us-east-1.amazonaws.com

  • https://api.pricing.ap-south-1.amazonaws.com

" + "documentation":"

Amazon Web Services Price List Service API (Amazon Web Services Price List Service) is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List Service uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List Service to build cost control and scenario planning tools, reconcile billing data, forecast future spend for budgeting purposes, and provide cost benefit analysis that compare your internal workloads with Amazon Web Services.

Use GetServices without a service code to retrieve the service codes for all AWS services, then GetServices with a service code to retreive the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType.

Service Endpoint

Amazon Web Services Price List Service API provides the following two endpoints:

  • https://api.pricing.us-east-1.amazonaws.com

  • https://api.pricing.ap-south-1.amazonaws.com

" } diff --git a/services/proton/pom.xml b/services/proton/pom.xml index 4223e2f88dec..2a6b036a4c7e 100644 --- a/services/proton/pom.xml +++ b/services/proton/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT proton AWS Java SDK :: Services :: Proton diff --git a/services/proton/src/main/resources/codegen-resources/service-2.json b/services/proton/src/main/resources/codegen-resources/service-2.json index 10bc9a4cf099..5c6dfbc7d3a8 100644 --- a/services/proton/src/main/resources/codegen-resources/service-2.json +++ b/services/proton/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In a management account, an environment account connection request is accepted. When the environment account connection request is accepted, AWS Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account.

For more information, see Environment account connections in the AWS Proton Administration guide.

", + "documentation":"

In a management account, an environment account connection request is accepted. When the environment account connection request is accepted, AWS Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", "idempotent":true }, "CancelEnvironmentDeployment":{ @@ -48,7 +48,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Attempts to cancel an environment deployment on an UpdateEnvironment action, if the deployment is IN_PROGRESS. For more information, see Update an environment in the AWS Proton Administration guide.

The following list includes potential cancellation scenarios.

  • If the cancellation attempt succeeds, the resulting deployment state is CANCELLED.

  • If the cancellation attempt fails, the resulting deployment state is FAILED.

  • If the current UpdateEnvironment action succeeds before the cancellation attempt starts, the resulting deployment state is SUCCEEDED and the cancellation attempt has no effect.

" + "documentation":"

Attempts to cancel an environment deployment on an UpdateEnvironment action, if the deployment is IN_PROGRESS. For more information, see Update an environment in the AWS Proton Administrator guide.

The following list includes potential cancellation scenarios.

  • If the cancellation attempt succeeds, the resulting deployment state is CANCELLED.

  • If the cancellation attempt fails, the resulting deployment state is FAILED.

  • If the current UpdateEnvironment action succeeds before the cancellation attempt starts, the resulting deployment state is SUCCEEDED and the cancellation attempt has no effect.

" }, "CancelServiceInstanceDeployment":{ "name":"CancelServiceInstanceDeployment", @@ -66,7 +66,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Attempts to cancel a service instance deployment on an UpdateServiceInstance action, if the deployment is IN_PROGRESS. For more information, see Update a service instance in the AWS Proton Administration guide or the AWS Proton User guide.

The following list includes potential cancellation scenarios.

  • If the cancellation attempt succeeds, the resulting deployment state is CANCELLED.

  • If the cancellation attempt fails, the resulting deployment state is FAILED.

  • If the current UpdateServiceInstance action succeeds before the cancellation attempt starts, the resulting deployment state is SUCCEEDED and the cancellation attempt has no effect.

" + "documentation":"

Attempts to cancel a service instance deployment on an UpdateServiceInstance action, if the deployment is IN_PROGRESS. For more information, see Update a service instance in the AWS Proton Administrator guide or the AWS Proton User guide.

The following list includes potential cancellation scenarios.

  • If the cancellation attempt succeeds, the resulting deployment state is CANCELLED.

  • If the cancellation attempt fails, the resulting deployment state is FAILED.

  • If the current UpdateServiceInstance action succeeds before the cancellation attempt starts, the resulting deployment state is SUCCEEDED and the cancellation attempt has no effect.

" }, "CancelServicePipelineDeployment":{ "name":"CancelServicePipelineDeployment", @@ -84,7 +84,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Attempts to cancel a service pipeline deployment on an UpdateServicePipeline action, if the deployment is IN_PROGRESS. For more information, see Update a service pipeline in the AWS Proton Administration guide or the AWS Proton User guide.

The following list includes potential cancellation scenarios.

  • If the cancellation attempt succeeds, the resulting deployment state is CANCELLED.

  • If the cancellation attempt fails, the resulting deployment state is FAILED.

  • If the current UpdateServicePipeline action succeeds before the cancellation attempt starts, the resulting deployment state is SUCCEEDED and the cancellation attempt has no effect.

" + "documentation":"

Attempts to cancel a service pipeline deployment on an UpdateServicePipeline action, if the deployment is IN_PROGRESS. For more information, see Update a service pipeline in the AWS Proton Administrator guide or the AWS Proton User guide.

The following list includes potential cancellation scenarios.

  • If the cancellation attempt succeeds, the resulting deployment state is CANCELLED.

  • If the cancellation attempt fails, the resulting deployment state is FAILED.

  • If the current UpdateServicePipeline action succeeds before the cancellation attempt starts, the resulting deployment state is SUCCEEDED and the cancellation attempt has no effect.

" }, "CreateEnvironment":{ "name":"CreateEnvironment", @@ -103,7 +103,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deploy a new environment. An AWS Proton environment is created from an environment template that defines infrastructure and resources that can be shared across services. For more information, see the Environments in the AWS Proton Administration Guide.

", + "documentation":"

Deploy a new environment. An AWS Proton environment is created from an environment template that defines infrastructure and resources that can be shared across services. For more information, see the Environments in the AWS Proton Administrator Guide.

", "idempotent":true }, "CreateEnvironmentAccountConnection":{ @@ -122,7 +122,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create an environment account connection in an environment account so that environment infrastructure resources can be provisioned in the environment account from the management account.

An environment account connection is a secure bi-directional connection between a management account and an environment account that maintains authorization and permissions. For more information, see Environment account connections in the AWS Proton Administration guide.

", + "documentation":"

Create an environment account connection in an environment account so that environment infrastructure resources can be provisioned in the environment account from a management account.

An environment account connection is a secure bi-directional connection between a management account and an environment account that maintains authorization and permissions. For more information, see Environment account connections in the AWS Proton Administrator guide.

", "idempotent":true }, "CreateEnvironmentTemplate":{ @@ -141,7 +141,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create an environment template for AWS Proton. For more information, see Environment Templates in the AWS Proton Administration Guide.

You can create an environment template in one of the two following ways:

  • Register and publish a standard environment template that instructs AWS Proton to deploy and manage environment infrastructure.

  • Register and publish a customer managed environment template that connects AWS Proton to your existing provisioned infrastructure that you manage. AWS Proton doesn't manage your existing provisioned infrastructure. To create an environment template for customer provisioned and managed infrastructure, include the provisioning parameter and set the value to CUSTOMER_MANAGED. For more information, see Register and publish an environment template in the AWS Proton Administration Guide.

", + "documentation":"

Create an environment template for AWS Proton. For more information, see Environment Templates in the AWS Proton Administrator Guide.

You can create an environment template in one of the two following ways:

  • Register and publish a standard environment template that instructs AWS Proton to deploy and manage environment infrastructure.

  • Register and publish a customer managed environment template that connects AWS Proton to your existing provisioned infrastructure that you manage. AWS Proton doesn't manage your existing provisioned infrastructure. To create an environment template for customer provisioned and managed infrastructure, include the provisioning parameter and set the value to CUSTOMER_MANAGED. For more information, see Register and publish an environment template in the AWS Proton Administrator Guide.

", "idempotent":true }, "CreateEnvironmentTemplateVersion":{ @@ -181,7 +181,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create an AWS Proton service. An AWS Proton service is an instantiation of a service template and often includes several service instances and pipeline. For more information, see Services in the AWS Proton Administration Guide and Services in the AWS Proton User Guide.

", + "documentation":"

Create an AWS Proton service. An AWS Proton service is an instantiation of a service template and often includes several service instances and pipeline. For more information, see Services in the AWS Proton Administrator Guide and Services in the AWS Proton User Guide.

", "idempotent":true }, "CreateServiceTemplate":{ @@ -200,7 +200,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create a service template. The administrator creates a service template to define standardized infrastructure and an optional CICD service pipeline. Developers, in turn, select the service template from AWS Proton. If the selected service template includes a service pipeline definition, they provide a link to their source code repository. AWS Proton then deploys and manages the infrastructure defined by the selected service template. For more information, see Service Templates in the AWS Proton Administration Guide.

", + "documentation":"

Create a service template. The administrator creates a service template to define standardized infrastructure and an optional CICD service pipeline. Developers, in turn, select the service template from AWS Proton. If the selected service template includes a service pipeline definition, they provide a link to their source code repository. AWS Proton then deploys and manages the infrastructure defined by the selected service template. For more information, see Service Templates in the AWS Proton Administrator Guide.

", "idempotent":true }, "CreateServiceTemplateVersion":{ @@ -258,7 +258,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In an environment account, delete an environment account connection.

After you delete an environment account connection that’s in use by an AWS Proton environment, AWS Proton can’t manage the environment infrastructure resources until a new environment account connection is accepted for the environment account and associated environment. You're responsible for cleaning up provisioned resources that remain without an environment connection.

For more information, see Environment account connections in the AWS Proton Administration guide.

", + "documentation":"

In an environment account, delete an environment account connection.

After you delete an environment account connection that’s in use by an AWS Proton environment, AWS Proton can’t manage the environment infrastructure resources until a new environment account connection is accepted for the environment account and associated environment. You're responsible for cleaning up provisioned resources that remain without an environment connection.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", "idempotent":true }, "DeleteEnvironmentTemplate":{ @@ -405,7 +405,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In an environment account, view the detail data for an environment account connection.

For more information, see Environment account connections in the AWS Proton Administration guide.

" + "documentation":"

In an environment account, view the detail data for an environment account connection.

For more information, see Environment account connections in the AWS Proton Administrator guide.

" }, "GetEnvironmentTemplate":{ "name":"GetEnvironmentTemplate", @@ -523,7 +523,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

View a list of environment account connections.

For more information, see Environment account connections in the AWS Proton Administration guide.

" + "documentation":"

View a list of environment account connections.

For more information, see Environment account connections in the AWS Proton Administrator guide.

" }, "ListEnvironmentTemplateVersions":{ "name":"ListEnvironmentTemplateVersions", @@ -656,7 +656,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

List tags for a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

" + "documentation":"

List tags for a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

" }, "RejectEnvironmentAccountConnection":{ "name":"RejectEnvironmentAccountConnection", @@ -674,7 +674,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In a management account, reject an environment account connection from another environment account.

After you reject an environment account connection request, you won’t be able to accept or use the rejected environment account connection.

You can’t reject an environment account connection that is connected to an environment.

For more information, see Environment account connections in the AWS Proton Administration guide.

", + "documentation":"

In a management account, reject an environment account connection from another environment account.

After you reject an environment account connection request, you won’t be able to accept or use the rejected environment account connection.

You can’t reject an environment account connection that is connected to an environment.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", "idempotent":true }, "TagResource":{ @@ -693,7 +693,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Tag a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

", + "documentation":"

Tag a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

", "idempotent":true }, "UntagResource":{ @@ -712,7 +712,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Remove a tag from a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

", + "documentation":"

Remove a tag from a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

", "idempotent":true }, "UpdateAccountSettings":{ @@ -748,7 +748,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Update an environment.

If the environment is associated with an environment account connection, don't update or include the protonServiceRoleArn parameter to update or connect to an environment account connection.

You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and associated with the current environment.

If the environment isn't associated with an environment account connection, don't update or include the environmentAccountConnectionId parameter to update or connect to an environment account connection.

You can update either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. You can’t update both.

There are four modes for updating an environment as described in the following. The deploymentType field defines the mode.

NONE

In this mode, a deployment doesn't occur. Only the requested metadata parameters are updated.

CURRENT_VERSION

In this mode, the environment is deployed and updated with the new spec that you provide. Only requested parameters are updated. Don’t include minor or major version parameters when you use this deployment-type.

MINOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) minor version of the current major version in use, by default. You can also specify a different minor version of the current major version in use.

MAJOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) major and minor version of the current template, by default. You can also specify a different major version that's higher than the major version in use and a minor version (optional).

" + "documentation":"

Update an environment.

If the environment is associated with an environment account connection, don't update or include the protonServiceRoleArn parameter to update or connect to an environment account connection.

You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and is associated with the current environment.

If the environment isn't associated with an environment account connection, don't update or include the environmentAccountConnectionId parameter to update or connect to an environment account connection.

You can update either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. You can’t update both.

There are four modes for updating an environment as described in the following. The deploymentType field defines the mode.

NONE

In this mode, a deployment doesn't occur. Only the requested metadata parameters are updated.

CURRENT_VERSION

In this mode, the environment is deployed and updated with the new spec that you provide. Only requested parameters are updated. Don’t include minor or major version parameters when you use this deployment-type.

MINOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) minor version of the current major version in use, by default. You can also specify a different minor version of the current major version in use.

MAJOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) major and minor version of the current template, by default. You can also specify a different major version that's higher than the major version in use and a minor version (optional).

" }, "UpdateEnvironmentAccountConnection":{ "name":"UpdateEnvironmentAccountConnection", @@ -766,7 +766,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In an environment account, update an environment account connection to use a new IAM role.

For more information, see Environment account connections in the AWS Proton Administration guide.

", + "documentation":"

In an environment account, update an environment account connection to use a new IAM role.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", "idempotent":true }, "UpdateEnvironmentTemplate":{ @@ -1126,7 +1126,7 @@ }, "environmentAccountConnectionId":{ "shape":"EnvironmentAccountConnectionId", - "documentation":"

The ID of the environment account connection that you provide if you're provisioning your environment infrastructure resources to an environment account. You must include either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. For more information, see Environment account connections in the AWS Proton Administration guide.

" + "documentation":"

The ID of the environment account connection that you provide if you're provisioning your environment infrastructure resources to an environment account. You must include either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. For more information, see Environment account connections in the AWS Proton Administrator guide.

" }, "name":{ "shape":"ResourceName", @@ -1138,11 +1138,11 @@ }, "spec":{ "shape":"SpecContents", - "documentation":"

A link to a YAML formatted spec file that provides inputs as defined in the environment template bundle schema file. For more information, see Environments in the AWS Proton Administration Guide.

" + "documentation":"

A link to a YAML formatted spec file that provides inputs as defined in the environment template bundle schema file. For more information, see Environments in the AWS Proton Administrator Guide.

" }, "tags":{ "shape":"TagList", - "documentation":"

Create tags for your environment. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

" + "documentation":"

Create tags for your environment. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

" }, "templateMajorVersion":{ "shape":"TemplateVersionPart", @@ -1154,7 +1154,7 @@ }, "templateName":{ "shape":"ResourceName", - "documentation":"

The name of the environment template. For more information, see Environment Templates in the AWS Proton Administration Guide.

" + "documentation":"

The name of the environment template. For more information, see Environment Templates in the AWS Proton Administrator Guide.

" } } }, @@ -1194,7 +1194,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

Create tags for your environment template. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

" + "documentation":"

Create tags for your environment template. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

" } } }, @@ -1226,7 +1226,7 @@ }, "majorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

To create a new minor version of the environment template, include a majorVersion.

To create a new major and minor version of the environment template, exclude majorVersion.

" + "documentation":"

To create a new minor version of the environment template, include a majorVersion.

To create a new major and minor version of the environment template, exclude majorVersion.

" }, "source":{ "shape":"TemplateVersionSourceInput", @@ -1275,7 +1275,7 @@ }, "repositoryConnectionArn":{ "shape":"Arn", - "documentation":"

The ARN of the repository connection. For more information, see Set up repository connection in the AWS Proton Administration Guide and Getting started in the AWS Proton User Guide. Don't include this parameter if your service template doesn't include a service pipeline.

" + "documentation":"

The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up repository connection in the AWS Proton Administrator Guide and Setting up with AWS Proton in the AWS Proton User Guide. Don't include this parameter if your service template doesn't include a service pipeline.

" }, "repositoryId":{ "shape":"RepositoryId", @@ -1283,11 +1283,11 @@ }, "spec":{ "shape":"SpecContents", - "documentation":"

A link to a spec file that provides inputs as defined in the service template bundle schema file. The spec file is in YAML format. Don’t include pipeline inputs in the spec if your service template doesn’t include a service pipeline. For more information, see Create a service in the AWS Proton Administration Guide and Create a service in the AWS Proton User Guide.

" + "documentation":"

A link to a spec file that provides inputs as defined in the service template bundle schema file. The spec file is in YAML format. Don’t include pipeline inputs in the spec if your service template doesn’t include a service pipeline. For more information, see Create a service in the AWS Proton Administrator Guide and Create a service in the AWS Proton User Guide.

" }, "tags":{ "shape":"TagList", - "documentation":"

Create tags for your service. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

" + "documentation":"

Create tags for your service. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

" }, "templateMajorVersion":{ "shape":"TemplateVersionPart", @@ -1335,11 +1335,11 @@ }, "pipelineProvisioning":{ "shape":"Provisioning", - "documentation":"

AWS Proton includes a service pipeline for your service by default. When included, this parameter indicates that an AWS Proton service pipeline won't be included for your service. Once specified, this parameter can't be changed. For more information, see Service template bundles in the AWS Proton Administration Guide.

" + "documentation":"

AWS Proton includes a service pipeline for your service by default. When included, this parameter indicates that an AWS Proton service pipeline won't be included for your service. Once specified, this parameter can't be changed. For more information, see Service template bundles in the AWS Proton Administrator Guide.

" }, "tags":{ "shape":"TagList", - "documentation":"

Create tags for your service template. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

" + "documentation":"

Create tags for your service template. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

" } } }, @@ -1649,7 +1649,7 @@ }, "protonServiceRoleArn":{ "shape":"Arn", - "documentation":"

The ARN of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.

" }, "provisioning":{ "shape":"Provisioning", @@ -1669,7 +1669,7 @@ }, "templateName":{ "shape":"ResourceName", - "documentation":"

The ARN of the environment template.

" + "documentation":"

The Amazon Resource Name (ARN) of the environment template.

" } }, "documentation":"

The environment detail data. An AWS Proton environment is a set resources shared across an AWS Proton service.

" @@ -1865,7 +1865,7 @@ }, "protonServiceRoleArn":{ "shape":"Arn", - "documentation":"

The ARN of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.

" }, "provisioning":{ "shape":"Provisioning", @@ -2627,7 +2627,7 @@ }, "resourceArn":{ "shape":"Arn", - "documentation":"

The ARN of the resource for the listed tags.

" + "documentation":"

The Amazon Resource Name (ARN) of the resource for the listed tags.

" } } }, @@ -2770,7 +2770,7 @@ }, "repositoryConnectionArn":{ "shape":"Arn", - "documentation":"

The ARN of the repository connection. For more information, see Set up a repository connection in the AWS Proton Administration Guide and Getting started in the AWS Proton User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up a repository connection in the AWS Proton Administrator Guide and Setting up with AWS Proton in the AWS Proton User Guide.

" }, "repositoryId":{ "shape":"RepositoryId", @@ -3001,7 +3001,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

A quota was exceeded. For more information, see AWS Proton Quotas in the AWS Proton Administration Guide.

", + "documentation":"

A quota was exceeded. For more information, see AWS Proton Quotas in the AWS Proton Administrator Guide.

", "exception":true }, "ServiceStatus":{ @@ -3495,7 +3495,7 @@ }, "environmentAccountConnectionId":{ "shape":"EnvironmentAccountConnectionId", - "documentation":"

The ID of the environment account connection.

You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and associated with the current environment.

" + "documentation":"

The ID of the environment account connection.

You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and is associated with the current environment.

" }, "name":{ "shape":"ResourceName", @@ -3503,7 +3503,7 @@ }, "protonServiceRoleArn":{ "shape":"Arn", - "documentation":"

The ARN of the AWS Proton service role that allows AWS Proton to make API calls to other services your behalf.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make API calls to other services your behalf.

" }, "spec":{ "shape":"SpecContents", @@ -3611,7 +3611,7 @@ }, "spec":{ "shape":"SpecContents", - "documentation":"

Lists the service instances to add and the existing service instances to remain. Omit the existing service instances to delete from the list. Don't include edits to the existing service instances or pipeline. For more information, see Edit a service in the AWS Proton Administration Guide or the AWS Proton User Guide.

" + "documentation":"

Lists the service instances to add and the existing service instances to remain. Omit the existing service instances to delete from the list. Don't include edits to the existing service instances or pipeline. For more information, see Edit a service in the AWS Proton Administrator Guide or the AWS Proton User Guide.

" } } }, @@ -3791,5 +3791,5 @@ "exception":true } }, - "documentation":"

This is the AWS Proton Service API Reference. It provides descriptions, syntax and usage examples for each of the actions and data types for the AWS Proton service.

The documentation for each action shows the Query API request parameters and the XML response.

Alternatively, you can use the AWS CLI to access an API. For more information, see the AWS Command Line Interface User Guide.

The AWS Proton service is a two-pronged automation framework. Administrators create service templates to provide standardized infrastructure and deployment tooling for serverless and container based applications. Developers, in turn, select from the available service templates to automate their application or service deployments.

Because administrators define the infrastructure and tooling that AWS Proton deploys and manages, they need permissions to use all of the listed API operations.

When developers select a specific infrastructure and tooling set, AWS Proton deploys their applications. To monitor their applications that are running on AWS Proton, developers need permissions to the service create, list, update and delete API operations and the service instance list and update API operations.

To learn more about AWS Proton administration, see the AWS Proton Administration Guide.

To learn more about deploying serverless and containerized applications on AWS Proton, see the AWS Proton User Guide.

Ensuring Idempotency

When you make a mutating API request, the request typically returns a result before the asynchronous workflows of the operation are complete. Operations might also time out or encounter other server issues before they're complete, even if the request already returned a result. This might make it difficult to determine whether the request succeeded. Moreover, you might need to retry the request multiple times to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation occurs multiple times. This means that you might create more resources than you intended.

Idempotency ensures that an API request action completes no more than one time. With an idempotent request, if the original request action completes successfully, any subsequent retries complete successfully without performing any further actions. However, the result might contain updated information, such as the current creation status.

The following lists of APIs are grouped according to methods that ensure idempotency.

Idempotent create APIs with a client token

The API actions in this list support idempotency with the use of a client token. The corresponding AWS CLI commands also support idempotency using a client token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. To make an idempotent API request using one of these actions, specify a client token in the request. We recommend that you don't reuse the same client token for other API requests. If you don’t provide a client token for these APIs, a default client token is automatically provided by SDKs.

Given a request action that has succeeded:

If you retry the request using the same client token and the same parameters, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If you retry the request using the same client token, but one or more of the parameters are different, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Client tokens expire eight hours after a request is made. If you retry the request with the expired token, a new resource is created.

If the original resource is deleted and you retry the request, a new resource is created.

Idempotent create APIs with a client token:

  • CreateEnvironmentTemplateVersion

  • CreateServiceTemplateVersion

  • CreateEnvironmentAccountConnection

 <p> <b>Idempotent delete APIs</b> </p> <p>Given a request action that has succeeded:</p> <p>When you retry the request with an API from this group and the resource was deleted, its metadata is returned in the response.</p> <p>If you retry and the resource doesn't exist, the response is empty.</p> <p>In both cases, the retry succeeds.</p> <p>Idempotent delete APIs:</p> <ul> <li> <p>DeleteEnvironmentTemplate</p> </li> <li> <p>DeleteEnvironmentTemplateVersion</p> </li> <li> <p>DeleteServiceTemplate</p> </li> <li> <p>DeleteServiceTemplateVersion</p> </li> <li> <p>DeleteEnvironmentAccountConnection</p> </li> </ul> <p> <b>Asynchronous idempotent delete APIs</b> </p> <p>Given a request action that has succeeded:</p> <p>If you retry the request with an API from this group, if the original request delete operation status is <code>DELETE_IN_PROGRESS</code>, the retry returns the resource detail data in the response without performing any further actions.</p> <p>If the original request delete operation is complete, a retry returns an empty response.</p> <p>Asynchronous idempotent delete APIs:</p> <ul> <li> <p>DeleteEnvironment</p> </li> <li> <p>DeleteService</p> </li> </ul> 
" + "documentation":"

This is the AWS Proton Service API Reference. It provides descriptions, syntax and usage examples for each of the actions and data types for the AWS Proton service.

The documentation for each action shows the Query API request parameters and the XML response.

Alternatively, you can use the AWS CLI to access an API. For more information, see the AWS Command Line Interface User Guide.

The AWS Proton service is a two-pronged automation framework. Administrators create service templates to provide standardized infrastructure and deployment tooling for serverless and container based applications. Developers, in turn, select from the available service templates to automate their application or service deployments.

Because administrators define the infrastructure and tooling that AWS Proton deploys and manages, they need permissions to use all of the listed API operations.

When developers select a specific infrastructure and tooling set, AWS Proton deploys their applications. To monitor their applications that are running on AWS Proton, developers need permissions to the service create, list, update and delete API operations and the service instance list and update API operations.

To learn more about AWS Proton administration, see the AWS Proton Administrator Guide.

To learn more about deploying serverless and containerized applications on AWS Proton, see the AWS Proton User Guide.

Ensuring Idempotency

When you make a mutating API request, the request typically returns a result before the asynchronous workflows of the operation are complete. Operations might also time out or encounter other server issues before they're complete, even if the request already returned a result. This might make it difficult to determine whether the request succeeded. Moreover, you might need to retry the request multiple times to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation occurs multiple times. This means that you might create more resources than you intended.

Idempotency ensures that an API request action completes no more than one time. With an idempotent request, if the original request action completes successfully, any subsequent retries complete successfully without performing any further actions. However, the result might contain updated information, such as the current creation status.

The following lists of APIs are grouped according to methods that ensure idempotency.

Idempotent create APIs with a client token

The API actions in this list support idempotency with the use of a client token. The corresponding AWS CLI commands also support idempotency using a client token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. To make an idempotent API request using one of these actions, specify a client token in the request. We recommend that you don't reuse the same client token for other API requests. If you don’t provide a client token for these APIs, a default client token is automatically provided by SDKs.

Given a request action that has succeeded:

If you retry the request using the same client token and the same parameters, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If you retry the request using the same client token, but one or more of the parameters are different, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Client tokens expire eight hours after a request is made. If you retry the request with the expired token, a new resource is created.

If the original resource is deleted and you retry the request, a new resource is created.

Idempotent create APIs with a client token:

  • CreateEnvironmentTemplateVersion

  • CreateServiceTemplateVersion

  • CreateEnvironmentAccountConnection

Idempotent create APIs

Given a request action that has succeeded:

If you retry the request with an API from this group, and the original resource hasn't been modified, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If the original resource has been modified, the retry throws a ConflictException.

If you retry with different input parameters, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Idempotent create APIs:

  • CreateEnvironmentTemplate

  • CreateServiceTemplate

  • CreateEnvironment

  • CreateService

Idempotent delete APIs

Given a request action that has succeeded:

When you retry the request with an API from this group and the resource was deleted, its metadata is returned in the response.

If you retry and the resource doesn't exist, the response is empty.

In both cases, the retry succeeds.

Idempotent delete APIs:

  • DeleteEnvironmentTemplate

  • DeleteEnvironmentTemplateVersion

  • DeleteServiceTemplate

  • DeleteServiceTemplateVersion

  • DeleteEnvironmentAccountConnection

Asynchronous idempotent delete APIs

Given a request action that has succeeded:

If you retry the request with an API from this group, if the original request delete operation status is DELETE_IN_PROGRESS, the retry returns the resource detail data in the response without performing any further actions.

If the original request delete operation is complete, a retry returns an empty response.

Asynchronous idempotent delete APIs:

  • DeleteEnvironment

  • DeleteService

" } diff --git a/services/qldb/pom.xml b/services/qldb/pom.xml index 81ebc79781f2..b1313d052498 100644 --- a/services/qldb/pom.xml +++ b/services/qldb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT qldb AWS Java SDK :: Services :: QLDB diff --git a/services/qldb/src/main/resources/codegen-resources/service-2.json b/services/qldb/src/main/resources/codegen-resources/service-2.json index 2e9249601f55..995a2751977c 100644 --- a/services/qldb/src/main/resources/codegen-resources/service-2.json +++ b/services/qldb/src/main/resources/codegen-resources/service-2.json @@ -42,7 +42,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Creates a new ledger in your AWS account in the current Region.

" + "documentation":"

Creates a new ledger in your account in the current Region.

" }, "DeleteLedger":{ "name":"DeleteLedger", @@ -99,7 +99,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns information about a ledger, including its state and when it was created.

" + "documentation":"

Returns information about a ledger, including its state, permissions mode, encryption at rest settings, and when it was created.

" }, "ExportJournalToS3":{ "name":"ExportJournalToS3", @@ -183,7 +183,7 @@ }, "input":{"shape":"ListJournalS3ExportsRequest"}, "output":{"shape":"ListJournalS3ExportsResponse"}, - "documentation":"

Returns an array of journal export job descriptions for all ledgers that are associated with the current AWS account and Region.

This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3Exports multiple times.

This action does not return any expired export jobs. For more information, see Export job expiration in the Amazon QLDB Developer Guide.

" + "documentation":"

Returns an array of journal export job descriptions for all ledgers that are associated with the current account and Region.

This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3Exports multiple times.

This action does not return any expired export jobs. For more information, see Export job expiration in the Amazon QLDB Developer Guide.

" }, "ListJournalS3ExportsForLedger":{ "name":"ListJournalS3ExportsForLedger", @@ -203,7 +203,7 @@ }, "input":{"shape":"ListLedgersRequest"}, "output":{"shape":"ListLedgersResponse"}, - "documentation":"

Returns an array of ledger summaries that are associated with the current AWS account and Region.

This action returns a maximum of 100 items and is paginated so that you can retrieve all the items by calling ListLedgers multiple times.

" + "documentation":"

Returns an array of ledger summaries that are associated with the current account and Region.

This action returns a maximum of 100 items and is paginated so that you can retrieve all the items by calling ListLedgers multiple times.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -337,7 +337,7 @@ "members":{ "Name":{ "shape":"LedgerName", - "documentation":"

The name of the ledger that you want to create. The name must be unique among all of your ledgers in the current AWS Region.

Naming constraints for ledger names are defined in Quotas in Amazon QLDB in the Amazon QLDB Developer Guide.

" + "documentation":"

The name of the ledger that you want to create. The name must be unique among all of the ledgers in your account in the current Region.

Naming constraints for ledger names are defined in Quotas in Amazon QLDB in the Amazon QLDB Developer Guide.

" }, "Tags":{ "shape":"Tags", @@ -350,6 +350,10 @@ "DeletionProtection":{ "shape":"DeletionProtection", "documentation":"

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger. You can disable it by calling the UpdateLedger operation to set the flag to false.

" + }, + "KmsKey":{ + "shape":"KmsKey", + "documentation":"

The key in Key Management Service (KMS) to use for encryption of data at rest in the ledger. For more information, see Encryption at rest in the Amazon QLDB Developer Guide.

Use one of the following options to specify this parameter:

  • AWS_OWNED_KMS_KEY: Use an KMS key that is owned and managed by Amazon Web Services on your behalf.

  • Undefined: By default, use an Amazon Web Services owned KMS key.

  • A valid symmetric customer managed KMS key: Use the specified KMS key in your account that you create, own, and manage.

    Amazon QLDB does not support asymmetric keys. For more information, see Using symmetric and asymmetric keys in the Key Management Service Developer Guide.

To specify a customer managed KMS key, you can use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a key in a different account, you must use the key ARN or alias ARN.

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

For more information, see Key identifiers (KeyId) in the Key Management Service Developer Guide.

" } } }, @@ -379,6 +383,10 @@ "DeletionProtection":{ "shape":"DeletionProtection", "documentation":"

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger. You can disable it by calling the UpdateLedger operation to set the flag to false.

" + }, + "KmsKeyArn":{ + "shape":"Arn", + "documentation":"

The ARN of the customer managed KMS key that the ledger uses for encryption at rest. If this parameter is undefined, the ledger uses an Amazon Web Services owned KMS key for encryption.

" } } }, @@ -494,6 +502,10 @@ "DeletionProtection":{ "shape":"DeletionProtection", "documentation":"

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger. You can disable it by calling the UpdateLedger operation to set the flag to false.

" + }, + "EncryptionDescription":{ + "shape":"LedgerEncryptionDescription", + "documentation":"

Information about the encryption of data at rest in the ledger. This includes the current status, the KMS key, and when the key became inaccessible (in the case of an error).

" } } }, @@ -502,6 +514,14 @@ "max":32, "min":32 }, + "EncryptionStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "UPDATING", + "KMS_KEY_INACCESSIBLE" + ] + }, "ErrorCause":{ "type":"string", "enum":[ @@ -540,7 +560,7 @@ }, "RoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal export job to do the following:

  • Write objects into your Amazon Simple Storage Service (Amazon S3) bucket.

  • (Optional) Use your customer master key (CMK) in AWS Key Management Service (AWS KMS) for server-side encryption of your exported data.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal export job to do the following:

  • Write objects into your Amazon Simple Storage Service (Amazon S3) bucket.

  • (Optional) Use your customer master key (CMK) in Key Management Service (KMS) for server-side encryption of your exported data.

" } } }, @@ -714,7 +734,7 @@ }, "ExclusiveEndTime":{ "shape":"Timestamp", - "documentation":"

The exclusive date and time that specifies when the stream ends. If this parameter is blank, the stream runs indefinitely until you cancel it.

" + "documentation":"

The exclusive date and time that specifies when the stream ends. If this parameter is undefined, the stream runs indefinitely until you cancel it.

" }, "RoleArn":{ "shape":"Arn", @@ -791,7 +811,7 @@ "S3ExportConfiguration":{"shape":"S3ExportConfiguration"}, "RoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal export job to do the following:

  • Write objects into your Amazon Simple Storage Service (Amazon S3) bucket.

  • (Optional) Use your customer master key (CMK) in AWS Key Management Service (AWS KMS) for server-side encryption of your exported data.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal export job to do the following:

  • Write objects into your Amazon Simple Storage Service (Amazon S3) bucket.

  • (Optional) Use your customer master key (CMK) in Key Management Service (KMS) for server-side encryption of your exported data.

" } }, "documentation":"

Information about a journal export job, including the ledger name, export ID, creation time, current status, and the parameters of the original export creation request.

" @@ -815,6 +835,32 @@ }, "documentation":"

The configuration settings of the Amazon Kinesis Data Streams destination for an Amazon QLDB journal stream.

" }, + "KmsKey":{ + "type":"string", + "max":1600 + }, + "LedgerEncryptionDescription":{ + "type":"structure", + "required":[ + "KmsKeyArn", + "EncryptionStatus" + ], + "members":{ + "KmsKeyArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the customer managed KMS key that the ledger uses for encryption at rest. If this parameter is undefined, the ledger uses an Amazon Web Services owned KMS key for encryption.

" + }, + "EncryptionStatus":{ + "shape":"EncryptionStatus", + "documentation":"

The current state of encryption at rest for the ledger. This can be one of the following values:

  • ENABLED: Encryption is fully enabled using the specified key.

  • UPDATING: The ledger is actively processing the specified key change.

    Key changes in QLDB are asynchronous. The ledger is fully accessible without any performance impact while the key change is being processed. The amount of time it takes to update a key varies depending on the ledger size.

  • KMS_KEY_INACCESSIBLE: The specified customer managed KMS key is not accessible, and the ledger is impaired. Either the key was disabled or deleted, or the grants on the key were revoked. When a ledger is impaired, it is not accessible and does not accept any read or write requests.

    An impaired ledger automatically returns to an active state after you restore the grants on the key, or re-enable the key that was disabled. However, deleting a customer managed KMS key is irreversible. After a key is deleted, you can no longer access the ledgers that are protected with that key, and the data becomes unrecoverable permanently.

" + }, + "InaccessibleKmsKeyDateTime":{ + "shape":"Timestamp", + "documentation":"

The date and time, in epoch time format, when the KMS key first became inaccessible, in the case of an error. (Epoch time format is the number of seconds that have elapsed since 12:00:00 AM January 1, 1970 UTC.)

This parameter is undefined if the KMS key is accessible.

" + } + }, + "documentation":"

Information about the encryption of data at rest in an Amazon QLDB ledger. This includes the current status, the key in Key Management Service (KMS), and when the key became inaccessible (in the case of an error).

For more information, see Encryption at rest in the Amazon QLDB Developer Guide.

" + }, "LedgerList":{ "type":"list", "member":{"shape":"LedgerSummary"} @@ -961,7 +1007,7 @@ "members":{ "JournalS3Exports":{ "shape":"JournalS3ExportList", - "documentation":"

The array of journal export job descriptions for all ledgers that are associated with the current AWS account and Region.

" + "documentation":"

The array of journal export job descriptions for all ledgers that are associated with the current account and Region.

" }, "NextToken":{ "shape":"NextToken", @@ -991,7 +1037,7 @@ "members":{ "Ledgers":{ "shape":"LedgerList", - "documentation":"

The array of ledger summaries that are associated with the current AWS account and Region.

" + "documentation":"

The array of ledger summaries that are associated with the current account and Region.

" }, "NextToken":{ "shape":"NextToken", @@ -1125,7 +1171,7 @@ }, "KmsKeyArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for a symmetric customer master key (CMK) in AWS Key Management Service (AWS KMS). Amazon S3 does not support asymmetric CMKs.

You must provide a KmsKeyArn if you specify SSE_KMS as the ObjectEncryptionType.

KmsKeyArn is not required if you specify SSE_S3 as the ObjectEncryptionType.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) in Key Management Service (KMS). Amazon S3 does not support asymmetric CMKs.

You must provide a KmsKeyArn if you specify SSE_KMS as the ObjectEncryptionType.

KmsKeyArn is not required if you specify SSE_S3 as the ObjectEncryptionType.

" } }, "documentation":"

The encryption settings that are used by a journal export job to write data in an Amazon Simple Storage Service (Amazon S3) bucket.

" @@ -1362,6 +1408,10 @@ "DeletionProtection":{ "shape":"DeletionProtection", "documentation":"

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger. You can disable it by calling the UpdateLedger operation to set the flag to false.

" + }, + "KmsKey":{ + "shape":"KmsKey", + "documentation":"

The key in Key Management Service (KMS) to use for encryption of data at rest in the ledger. For more information, see Encryption at rest in the Amazon QLDB Developer Guide.

Use one of the following options to specify this parameter:

  • AWS_OWNED_KMS_KEY: Use an KMS key that is owned and managed by Amazon Web Services on your behalf.

  • Undefined: Make no changes to the KMS key of the ledger.

  • A valid symmetric customer managed KMS key: Use the specified KMS key in your account that you create, own, and manage.

    Amazon QLDB does not support asymmetric keys. For more information, see Using symmetric and asymmetric keys in the Key Management Service Developer Guide.

To specify a customer managed KMS key, you can use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a key in a different account, you must use the key ARN or alias ARN.

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

For more information, see Key identifiers (KeyId) in the Key Management Service Developer Guide.

" } } }, @@ -1387,6 +1437,10 @@ "DeletionProtection":{ "shape":"DeletionProtection", "documentation":"

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger. You can disable it by calling the UpdateLedger operation to set the flag to false.

" + }, + "EncryptionDescription":{ + "shape":"LedgerEncryptionDescription", + "documentation":"

Information about the encryption of data at rest in the ledger. This includes the current status, the KMS key, and when the key became inaccessible (in the case of an error).

" } } }, diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml index 77f4fc6accfb..4a66b9b6ace3 100644 --- a/services/qldbsession/pom.xml +++ b/services/qldbsession/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT qldbsession AWS Java SDK :: Services :: QLDB Session diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index 3e4b2c68fd68..117f2c5ad993 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT quicksight AWS Java SDK :: Services :: QuickSight diff --git a/services/quicksight/src/main/resources/codegen-resources/service-2.json b/services/quicksight/src/main/resources/codegen-resources/service-2.json index fe4cd856030c..0a923fb011b1 100644 --- a/services/quicksight/src/main/resources/codegen-resources/service-2.json +++ b/services/quicksight/src/main/resources/codegen-resources/service-2.json @@ -46,7 +46,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ], - "documentation":"

Creates Amazon QuickSight customizations the current AWS Region. Currently, you can add a custom default theme by using the CreateAccountCustomization or UpdateAccountCustomization API operation. To further customize QuickSight by removing QuickSight sample assets and videos for all new users, see Customizing QuickSight in the Amazon QuickSight User Guide.

You can create customizations for your AWS account or, if you specify a namespace, for a QuickSight namespace instead. Customizations that apply to a namespace always override customizations that apply to an AWS account. To find out which customizations apply, use the DescribeAccountCustomization API operation.

Before you use the CreateAccountCustomization API operation to add a theme as the namespace default, make sure that you first share the theme with the namespace. If you don't share it with the namespace, the theme isn't visible to your users even if you make it the default theme. To check if the theme is shared, view the current permissions by using the DescribeThemePermissions API operation. To share the theme, grant permissions by using the UpdateThemePermissions API operation.

" + "documentation":"

Creates Amazon QuickSight customizations the current Region;. Currently, you can add a custom default theme by using the CreateAccountCustomization or UpdateAccountCustomization API operation. To further customize QuickSight by removing QuickSight sample assets and videos for all new users, see Customizing QuickSight in the Amazon QuickSight User Guide.

You can create customizations for your Amazon Web Services account; or, if you specify a namespace, for a QuickSight namespace instead. Customizations that apply to a namespace always override customizations that apply to an Amazon Web Services account;. To find out which customizations apply, use the DescribeAccountCustomization API operation.

Before you use the CreateAccountCustomization API operation to add a theme as the namespace default, make sure that you first share the theme with the namespace. If you don't share it with the namespace, the theme isn't visible to your users even if you make it the default theme. To check if the theme is shared, view the current permissions by using the DescribeThemePermissions API operation. To share the theme, grant permissions by using the UpdateThemePermissions API operation.

" }, "CreateAnalysis":{ "name":"CreateAnalysis", @@ -84,7 +84,7 @@ {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Creates a dashboard from a template. To first create a template, see the CreateTemplate API operation.

A dashboard is an entity in QuickSight that identifies QuickSight reports, created from analyses. You can share QuickSight dashboards. With the right permissions, you can create scheduled email reports from them. If you have the correct permissions, you can create a dashboard from a template that exists in a different AWS account.

" + "documentation":"

Creates a dashboard from a template. To first create a template, see the CreateTemplate API operation.

A dashboard is an entity in QuickSight that identifies QuickSight reports, created from analyses. You can share QuickSight dashboards. With the right permissions, you can create scheduled email reports from them. If you have the correct permissions, you can create a dashboard from a template that exists in a different Amazon Web Services account;.

" }, "CreateDataSet":{ "name":"CreateDataSet", @@ -225,7 +225,7 @@ {"shape":"ConcurrentUpdatingException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Creates an assignment with one specified IAM policy, identified by its Amazon Resource Name (ARN). This policy assignment is attached to the specified groups or users of Amazon QuickSight. Assignment names are unique per AWS account. To avoid overwriting rules in other namespaces, use assignment names that are unique.

" + "documentation":"

Creates an assignment with one specified IAM policy, identified by its Amazon Resource Name (ARN). This policy assignment is attached to the specified groups or users of Amazon QuickSight. Assignment names are unique per Amazon Web Services account;. To avoid overwriting rules in other namespaces, use assignment names that are unique.

" }, "CreateIngestion":{ "name":"CreateIngestion", @@ -244,7 +244,7 @@ {"shape":"ResourceExistsException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Creates and starts a new SPICE ingestion on a dataset

Any ingestions operating on tagged datasets inherit the same tags automatically for use in access control. For an example, see How do I create an IAM policy to control access to Amazon EC2 resources using tags? in the AWS Knowledge Center. Tags are visible on the tagged dataset, but not on the ingestion resource.

" + "documentation":"

Creates and starts a new SPICE ingestion on a dataset

Any ingestions operating on tagged datasets inherit the same tags automatically for use in access control. For an example, see How do I create an IAM policy to control access to Amazon EC2 resources using tags? in the Amazon Web Services Knowledge Center. Tags are visible on the tagged dataset, but not on the ingestion resource.

" }, "CreateNamespace":{ "name":"CreateNamespace", @@ -266,7 +266,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ], - "documentation":"

(Enterprise edition only) Creates a new namespace for you to use with Amazon QuickSight.

A namespace allows you to isolate the QuickSight users and groups that are registered for that namespace. Users that access the namespace can share assets only with other users or groups in the same namespace. They can't see users and groups in other namespaces. You can create a namespace after your AWS account is subscribed to QuickSight. The namespace must be unique within the AWS account. By default, there is a limit of 100 namespaces per AWS account. To increase your limit, create a ticket with AWS Support.

" + "documentation":"

(Enterprise edition only) Creates a new namespace for you to use with Amazon QuickSight.

A namespace allows you to isolate the QuickSight users and groups that are registered for that namespace. Users that access the namespace can share assets only with other users or groups in the same namespace. They can't see users and groups in other namespaces. You can create a namespace after your Amazon Web Services account; is subscribed to QuickSight. The namespace must be unique within the Amazon Web Services account;. By default, there is a limit of 100 namespaces per Amazon Web Services account;. To increase your limit, create a ticket with Amazon Web Services Support.

" }, "CreateTemplate":{ "name":"CreateTemplate", @@ -287,7 +287,7 @@ {"shape":"ConflictException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Creates a template from an existing QuickSight analysis or template. You can use the resulting template to create a dashboard.

A template is an entity in QuickSight that encapsulates the metadata required to create an analysis and that you can use to create s dashboard. A template adds a layer of abstraction by using placeholders to replace the dataset associated with the analysis. You can use templates to create dashboards by replacing dataset placeholders with datasets that follow the same schema that was used to create the source analysis and template.

" + "documentation":"

Creates a template from an existing Amazon QuickSight analysis or template. You can use the resulting template to create a dashboard.

A template is an entity in QuickSight that encapsulates the metadata required to create an analysis and that you can use to create s dashboard. A template adds a layer of abstraction by using placeholders to replace the dataset associated with the analysis. You can use templates to create dashboards by replacing dataset placeholders with datasets that follow the same schema that was used to create the source analysis and template.

" }, "CreateTemplateAlias":{ "name":"CreateTemplateAlias", @@ -365,7 +365,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ], - "documentation":"

Deletes all Amazon QuickSight customizations in this AWS Region for the specified AWS account and QuickSight namespace.

" + "documentation":"

Deletes all Amazon QuickSight customizations in this Region; for the specified Amazon Web Services account; and QuickSight namespace.

" }, "DeleteAnalysis":{ "name":"DeleteAnalysis", @@ -641,7 +641,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ], - "documentation":"

Deletes the Amazon QuickSight user that is associated with the identity of the AWS Identity and Access Management (IAM) user or role that's making the call. The IAM user isn't deleted as a result of this call.

" + "documentation":"

Deletes the Amazon QuickSight user that is associated with the identity of the Identity and Access Management (IAM) user or role that's making the call. The IAM user isn't deleted as a result of this call.

" }, "DeleteUserByPrincipalId":{ "name":"DeleteUserByPrincipalId", @@ -678,7 +678,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ], - "documentation":"

Describes the customizations associated with the provided AWS account and Amazon QuickSight namespace in an AWS Region. The QuickSight console evaluates which customizations to apply by running this API operation with the Resolved flag included.

To determine what customizations display when you run this command, it can help to visualize the relationship of the entities involved.

  • AWS Account - The AWS account exists at the top of the hierarchy. It has the potential to use all of the AWS Regions and AWS Services. When you subscribe to QuickSight, you choose one AWS Region to use as your home Region. That's where your free SPICE capacity is located. You can use QuickSight in any supported AWS Region.

  • AWS Region - In each AWS Region where you sign in to QuickSight at least once, QuickSight acts as a separate instance of the same service. If you have a user directory, it resides in us-east-1, which is the US East (N. Virginia). Generally speaking, these users have access to QuickSight in any AWS Region, unless they are constrained to a namespace.

    To run the command in a different AWS Region, you change your Region settings. If you're using the AWS CLI, you can use one of the following options:

  • Namespace - A QuickSight namespace is a partition that contains users and assets (data sources, datasets, dashboards, and so on). To access assets that are in a specific namespace, users and groups must also be part of the same namespace. People who share a namespace are completely isolated from users and assets in other namespaces, even if they are in the same AWS account and AWS Region.

  • Applied customizations - Within an AWS Region, a set of QuickSight customizations can apply to an AWS account or to a namespace. Settings that you apply to a namespace override settings that you apply to an AWS account. All settings are isolated to a single AWS Region. To apply them in other AWS Regions, run the CreateAccountCustomization command in each AWS Region where you want to apply the same customizations.

" + "documentation":"

Describes the customizations associated with the provided Amazon Web Services account; and Amazon QuickSight namespace in an Region;. The QuickSight console evaluates which customizations to apply by running this API operation with the Resolved flag included.

To determine what customizations display when you run this command, it can help to visualize the relationship of the entities involved.

  • Amazon Web Services account; - The Amazon Web Services account; exists at the top of the hierarchy. It has the potential to use all of the Regions; and AWS Services. When you subscribe to QuickSight, you choose one Region; to use as your home Region. That's where your free SPICE capacity is located. You can use QuickSight in any supported Region;.

  • Region; - In each Region; where you sign in to QuickSight at least once, QuickSight acts as a separate instance of the same service. If you have a user directory, it resides in us-east-1, which is the US East (N. Virginia). Generally speaking, these users have access to QuickSight in any Region;, unless they are constrained to a namespace.

    To run the command in a different Region;, you change your Region settings. If you're using the AWS CLI, you can use one of the following options:

  • Namespace - A QuickSight namespace is a partition that contains users and assets (data sources, datasets, dashboards, and so on). To access assets that are in a specific namespace, users and groups must also be part of the same namespace. People who share a namespace are completely isolated from users and assets in other namespaces, even if they are in the same Amazon Web Services account; and Region;.

  • Applied customizations - Within an Region;, a set of QuickSight customizations can apply to an Amazon Web Services account; or to a namespace. Settings that you apply to a namespace override settings that you apply to an Amazon Web Services account;. All settings are isolated to a single Region;. To apply them in other Regions;, run the CreateAccountCustomization command in each Region; where you want to apply the same customizations.

" }, "DescribeAccountSettings":{ "name":"DescribeAccountSettings", @@ -696,7 +696,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ], - "documentation":"

Describes the settings that were used when your QuickSight subscription was first created in this AWS account.

" + "documentation":"

Describes the settings that were used when your QuickSight subscription was first created in this Amazon Web Services account;.

" }, "DescribeAnalysis":{ "name":"DescribeAnalysis", @@ -1091,6 +1091,47 @@ ], "documentation":"

Returns information about a user, given the user name.

" }, + "GenerateEmbedUrlForAnonymousUser":{ + "name":"GenerateEmbedUrlForAnonymousUser", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/embed-url/anonymous-user" + }, + "input":{"shape":"GenerateEmbedUrlForAnonymousUserRequest"}, + "output":{"shape":"GenerateEmbedUrlForAnonymousUserResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"SessionLifetimeInMinutesInvalidException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"UnsupportedPricingPlanException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Generates an embed URL that you can use to embed an Amazon QuickSight dashboard in your website, without having to register any reader users. Before you use this action, make sure that you have configured the dashboards and permissions.

The following rules apply to the generated URL:

  • It contains a temporary bearer token. It is valid for 5 minutes after it is generated. Once redeemed within this period, it cannot be re-used again.

  • The URL validity period should not be confused with the actual session lifetime that can be customized using the SessionLifetimeInMinutes parameter.

    The resulting user session is valid for 15 minutes (default) to 10 hours (maximum).

  • You are charged only when the URL is used or there is interaction with Amazon QuickSight.

For more information, see Embedded Analytics in the Amazon QuickSight User Guide.

For more information about the high-level steps for embedding and for an interactive demo of the ways you can customize embedding, visit the Amazon QuickSight Developer Portal.

" + }, + "GenerateEmbedUrlForRegisteredUser":{ + "name":"GenerateEmbedUrlForRegisteredUser", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/embed-url/registered-user" + }, + "input":{"shape":"GenerateEmbedUrlForRegisteredUserRequest"}, + "output":{"shape":"GenerateEmbedUrlForRegisteredUserResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"QuickSightUserNotFoundException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"SessionLifetimeInMinutesInvalidException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"UnsupportedPricingPlanException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Generates an embed URL that you can use to embed an Amazon QuickSight experience in your website. This action can be used for any type of user registered in an Amazon QuickSight account. Before you use this action, make sure that you have configured the relevant Amazon QuickSight resource and permissions.

The following rules apply to the generated URL:

  • It contains a temporary bearer token. It is valid for 5 minutes after it is generated. Once redeemed within this period, it cannot be re-used again.

  • The URL validity period should not be confused with the actual session lifetime that can be customized using the SessionLifetimeInMinutes parameter.

    The resulting user session is valid for 15 minutes (default) to 10 hours (maximum).

  • You are charged only when the URL is used or there is interaction with Amazon QuickSight.

For more information, see Embedded Analytics in the Amazon QuickSight User Guide.

For more information about the high-level steps for embedding and for an interactive demo of the ways you can customize embedding, visit the Amazon QuickSight Developer Portal.

" + }, "GetDashboardEmbedUrl":{ "name":"GetDashboardEmbedUrl", "http":{ @@ -1113,7 +1154,7 @@ {"shape":"UnsupportedPricingPlanException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Generates a session URL and authorization code that you can use to embed an Amazon QuickSight read-only dashboard in your web server code. Before you use this command, make sure that you have configured the dashboards and permissions.

Currently, you can use GetDashboardEmbedURL only from the server, not from the user's browser. The following rules apply to the combination of URL and authorization code:

  • They must be used together.

  • They can be used one time only.

  • They are valid for 5 minutes after you run this command.

  • The resulting user session is valid for 10 hours.

For more information, see Embedded Analytics in the Amazon QuickSight User Guide.

" + "documentation":"

Generates a session URL and authorization code that you can use to embed an Amazon QuickSight read-only dashboard in your web server code. Before you use this command, make sure that you have configured the dashboards and permissions.

Currently, you can use GetDashboardEmbedURL only from the server, not from the user's browser. The following rules apply to the combination of URL and authorization code:

  • They must be used together.

  • They can be used one time only.

  • They are valid for 5 minutes after you run this command.

  • The resulting user session is valid for 10 hours.

For more information, see Embedded Analytics in the Amazon QuickSight User Guide.

For more information about the high-level steps for embedding and for an interactive demo of the ways you can customize embedding, visit the Amazon QuickSight Developer Portal.

" }, "GetSessionEmbedUrl":{ "name":"GetSessionEmbedUrl", @@ -1150,7 +1191,7 @@ {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Lists Amazon QuickSight analyses that exist in the specified AWS account.

" + "documentation":"

Lists Amazon QuickSight analyses that exist in the specified Amazon Web Services account;.

" }, "ListDashboardVersions":{ "name":"ListDashboardVersions", @@ -1184,7 +1225,7 @@ {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Lists dashboards in an AWS account.

" + "documentation":"

Lists dashboards in an Amazon Web Services account;.

" }, "ListDataSets":{ "name":"ListDataSets", @@ -1201,7 +1242,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Lists all of the datasets belonging to the current AWS account in an AWS Region.

The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/*.

" + "documentation":"

Lists all of the datasets belonging to the current Amazon Web Services account; in an Region;.

The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/*.

" }, "ListDataSources":{ "name":"ListDataSources", @@ -1218,7 +1259,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Lists data sources in current AWS Region that belong to this AWS account.

" + "documentation":"

Lists data sources in current Region; that belong to this Amazon Web Services account;.

" }, "ListFolderMembers":{ "name":"ListFolderMembers", @@ -1372,7 +1413,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ], - "documentation":"

Lists the namespaces for the specified AWS account.

" + "documentation":"

Lists the namespaces for the specified Amazon Web Services account;.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1480,7 +1521,7 @@ {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Lists all the versions of the themes in the current AWS account.

" + "documentation":"

Lists all the versions of the themes in the current Amazon Web Services account;.

" }, "ListThemes":{ "name":"ListThemes", @@ -1499,7 +1540,7 @@ {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Lists all the themes in the current AWS account.

" + "documentation":"

Lists all the themes in the current Amazon Web Services account;.

" }, "ListUserGroups":{ "name":"ListUserGroups", @@ -1595,7 +1636,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Searches for analyses that belong to the user specified in the filter.

" + "documentation":"

Searches for analyses that belong to the user specified in the filter.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

" }, "SearchDashboards":{ "name":"SearchDashboards", @@ -1613,7 +1654,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Searches for dashboards that belong to a user.

" + "documentation":"

Searches for dashboards that belong to a user.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

" }, "SearchFolders":{ "name":"SearchFolders", @@ -1650,7 +1691,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Assigns one or more tags (key-value pairs) to the specified QuickSight resource.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values. You can use the TagResource operation with a resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a resource. QuickSight supports tagging on data set, data source, dashboard, and template.

Tagging for QuickSight works in a similar way to tagging for other AWS services, except for the following:

  • You can't use tags to track AWS costs for QuickSight. This restriction is because QuickSight costs are based on users and SPICE capacity, which aren't taggable resources.

  • QuickSight doesn't currently support the Tag Editor for AWS Resource Groups.

" + "documentation":"

Assigns one or more tags (key-value pairs) to the specified QuickSight resource.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values. You can use the TagResource operation with a resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a resource. QuickSight supports tagging on data set, data source, dashboard, and template.

Tagging for QuickSight works in a similar way to tagging for other AWS services, except for the following:

  • You can't use tags to track AWS costs for QuickSight. This restriction is because QuickSight costs are based on users and SPICE capacity, which aren't taggable resources.

  • QuickSight doesn't currently support the Tag Editor for Resource Groups.

" }, "UntagResource":{ "name":"UntagResource", @@ -1685,7 +1726,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ], - "documentation":"

Updates Amazon QuickSight customizations the current AWS Region. Currently, the only customization you can use is a theme.

You can use customizations for your AWS account or, if you specify a namespace, for a QuickSight namespace instead. Customizations that apply to a namespace override customizations that apply to an AWS account. To find out which customizations apply, use the DescribeAccountCustomization API operation.

" + "documentation":"

Updates Amazon QuickSight customizations the current Region;. Currently, the only customization you can use is a theme.

You can use customizations for your Amazon Web Services account; or, if you specify a namespace, for a QuickSight namespace instead. Customizations that apply to a namespace override customizations that apply to an Amazon Web Services account;. To find out which customizations apply, use the DescribeAccountCustomization API operation.

" }, "UpdateAccountSettings":{ "name":"UpdateAccountSettings", @@ -1703,7 +1744,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ], - "documentation":"

Updates the Amazon QuickSight settings in your AWS account.

" + "documentation":"

Updates the Amazon QuickSight settings in your Amazon Web Services account;.

" }, "UpdateAnalysis":{ "name":"UpdateAnalysis", @@ -1759,7 +1800,7 @@ {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Updates a dashboard in an AWS account.

" + "documentation":"

Updates a dashboard in an Amazon Web Services account;.

Updating a Dashboard creates a new dashboard version but does not immediately publish the new version. You can update the published version of a dashboard by using the UpdateDashboardPublishedVersion API operation.

" }, "UpdateDashboardPermissions":{ "name":"UpdateDashboardPermissions", @@ -2088,7 +2129,7 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

You don't have access to this item. The provided credentials couldn't be validated. You might not be authorized to carry out the request. Make sure that your account is authorized to use the Amazon QuickSight service, that your policies have the correct permissions, and that you are using the correct access keys.

", @@ -2103,14 +2144,14 @@ "documentation":"

The default theme for this QuickSight subscription.

" } }, - "documentation":"

The Amazon QuickSight customizations associated with your AWS account or a QuickSight namespace in a specific AWS Region.

" + "documentation":"

The Amazon QuickSight customizations associated with your Amazon Web Services account; or a QuickSight namespace in a specific Region;.

" }, "AccountSettings":{ "type":"structure", "members":{ "AccountName":{ "shape":"String", - "documentation":"

The \"account name\" you provided for the QuickSight subscription in your AWS account. You create this name when you sign up for QuickSight. It is unique in all of AWS and it appears only in the console when users sign in.

" + "documentation":"

The \"account name\" you provided for the QuickSight subscription in your Amazon Web Services account;. You create this name when you sign up for QuickSight. It is unique in all of Amazon Web Services and it appears only when users sign in.

" }, "Edition":{ "shape":"Edition", @@ -2118,14 +2159,14 @@ }, "DefaultNamespace":{ "shape":"Namespace", - "documentation":"

The default QuickSight namespace for your AWS account.

" + "documentation":"

The default QuickSight namespace for your Amazon Web Services account;.

" }, "NotificationEmail":{ "shape":"String", "documentation":"

The main notification email for your QuickSight subscription.

" } }, - "documentation":"

The QuickSight settings associated with your AWS account.

" + "documentation":"

The QuickSight settings associated with your Amazon Web Services account;.

" }, "ActionList":{ "type":"list", @@ -2145,7 +2186,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the resource.

" } }, - "documentation":"

The active AWS Identity and Access Management (IAM) policy assignment.

" + "documentation":"

The active Identity and Access Management (IAM) policy assignment.

" }, "ActiveIAMPolicyAssignmentList":{ "type":"list", @@ -2338,7 +2379,7 @@ }, "Name":{ "shape":"AnalysisName", - "documentation":"

The name of the analysis. This name is displayed in the QuickSight console.

" + "documentation":"

The name of the analysis. This name is displayed in the QuickSight console.

" }, "Status":{ "shape":"ResourceStatus", @@ -2360,7 +2401,32 @@ "member":{"shape":"AnalysisSummary"}, "max":100 }, + "AnonymousUserDashboardEmbeddingConfiguration":{ + "type":"structure", + "required":["InitialDashboardId"], + "members":{ + "InitialDashboardId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The dashboard ID for the dashboard that you want the user to see first. This ID is included in the output URL. When the URL in response is accessed, Amazon QuickSight renders this dashboard.

The Amazon Resource Name (ARN) of this dashboard must be included in the AuthorizedResourceArns parameter. Otherwise, the request will fail with InvalidParameterValueException.

" + } + }, + "documentation":"

Information about the dashboard that you want to embed.

" + }, + "AnonymousUserEmbeddingExperienceConfiguration":{ + "type":"structure", + "members":{ + "Dashboard":{ + "shape":"AnonymousUserDashboardEmbeddingConfiguration", + "documentation":"

The type of embedding experience. In this case, an Amazon QuickSight dashboard.

" + } + }, + "documentation":"

The type of experience you want to embed. For anonymous users, you can embed an Amazon QuickSight dashboard.

" + }, "Arn":{"type":"string"}, + "ArnList":{ + "type":"list", + "member":{"shape":"Arn"} + }, "AssignmentStatus":{ "type":"string", "enum":[ @@ -2444,7 +2510,7 @@ "documentation":"

Dataset name.

" } }, - "documentation":"

AWS IoT Analytics parameters.

" + "documentation":"

Amazon Web Services IoT Analytics parameters.

" }, "Boolean":{"type":"boolean"}, "BorderStyle":{ @@ -2497,7 +2563,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -2528,7 +2594,7 @@ }, "RequestId":{ "shape":"string", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -2751,7 +2817,7 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

Updating or deleting a resource can cause an inconsistent state.

", @@ -2771,7 +2837,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that you want to customize QuickSight for.

", + "documentation":"

The ID for the Amazon Web Services account; that you want to customize QuickSight for.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -2783,7 +2849,7 @@ }, "AccountCustomization":{ "shape":"AccountCustomization", - "documentation":"

The QuickSight customizations you're adding in the current AWS Region. You can add these to an AWS account and a QuickSight namespace.

For example, you can add a default theme by setting AccountCustomization to the midnight theme: \"AccountCustomization\": { \"DefaultTheme\": \"arn:aws:quicksight::aws:theme/MIDNIGHT\" }. Or, you can add a custom theme by specifying \"AccountCustomization\": { \"DefaultTheme\": \"arn:aws:quicksight:us-west-2:111122223333:theme/bdb844d0-0fe9-4d9d-b520-0fe602d93639\" }.

" + "documentation":"

The QuickSight customizations you're adding in the current Region;. You can add these to an Amazon Web Services account; and a QuickSight namespace.

For example, you can add a default theme by setting AccountCustomization to the midnight theme: \"AccountCustomization\": { \"DefaultTheme\": \"arn:aws:quicksight::aws:theme/MIDNIGHT\" }. Or, you can add a custom theme by specifying \"AccountCustomization\": { \"DefaultTheme\": \"arn:aws:quicksight:us-west-2:111122223333:theme/bdb844d0-0fe9-4d9d-b520-0fe602d93639\" }.

" }, "Tags":{ "shape":"TagList", @@ -2796,11 +2862,11 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for the customization that you created for this AWS account.

" + "documentation":"

The Amazon Resource Name (ARN) for the customization that you created for this Amazon Web Services account;.

" }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that you want to customize QuickSight for.

" + "documentation":"

The ID for the Amazon Web Services account; that you want to customize QuickSight for.

" }, "Namespace":{ "shape":"Namespace", @@ -2808,11 +2874,11 @@ }, "AccountCustomization":{ "shape":"AccountCustomization", - "documentation":"

The QuickSight customizations you're adding in the current AWS Region.

" + "documentation":"

The QuickSight customizations you're adding in the current Region;.

" }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -2832,7 +2898,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account where you are creating an analysis.

", + "documentation":"

The ID of the Amazon Web Services account; where you are creating an analysis.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -2852,7 +2918,7 @@ }, "Permissions":{ "shape":"ResourcePermissionList", - "documentation":"

A structure that describes the principals and the resource-level permissions on an analysis. You can use the Permissions structure to grant permissions by providing a list of AWS Identity and Access Management (IAM) action information for each principal listed by Amazon Resource Name (ARN).

To specify no permissions, omit Permissions.

" + "documentation":"

A structure that describes the principals and the resource-level permissions on an analysis. You can use the Permissions structure to grant permissions by providing a list of Identity and Access Management (IAM) action information for each principal listed by Amazon Resource Name (ARN).

To specify no permissions, omit Permissions.

" }, "SourceEntity":{ "shape":"AnalysisSourceEntity", @@ -2890,7 +2956,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -2916,7 +2982,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account where you want to create the dashboard.

", + "documentation":"

The ID of the Amazon Web Services account; where you want to create the dashboard.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -2940,7 +3006,7 @@ }, "SourceEntity":{ "shape":"DashboardSourceEntity", - "documentation":"

The entity that you are using as a source when you create the dashboard. In SourceEntity, you specify the type of object you're using as source. You can only create a dashboard from a template, so you use a SourceTemplate entity. If you need to create a dashboard from an analysis, first convert the analysis to a template by using the CreateTemplate API operation. For SourceTemplate, specify the Amazon Resource Name (ARN) of the source template. The SourceTemplateARN can contain any AWS Account and any QuickSight-supported AWS Region.

Use the DataSetReferences entity within SourceTemplate to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.

" + "documentation":"

The entity that you are using as a source when you create the dashboard. In SourceEntity, you specify the type of object you're using as source. You can only create a dashboard from a template, so you use a SourceTemplate entity. If you need to create a dashboard from an analysis, first convert the analysis to a template by using the CreateTemplate API operation. For SourceTemplate, specify the Amazon Resource Name (ARN) of the source template. The SourceTemplateARN can contain any Amazon Web Services account; and any QuickSight-supported Region;.

Use the DataSetReferences entity within SourceTemplate to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.

" }, "Tags":{ "shape":"TagList", @@ -2956,7 +3022,7 @@ }, "ThemeArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. If you add a value for this field, it overrides the value that is used in the source entity. The theme ARN must exist in the same AWS account where you create the dashboard.

" + "documentation":"

The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. If you add a value for this field, it overrides the value that is used in the source entity. The theme ARN must exist in the same Amazon Web Services account; where you create the dashboard.

" } } }, @@ -2986,7 +3052,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -3002,13 +3068,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, "DataSetId":{ "shape":"ResourceId", - "documentation":"

An ID for the dataset that you want to create. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

An ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.

" }, "Name":{ "shape":"ResourceName", @@ -3042,6 +3108,10 @@ "shape":"RowLevelPermissionDataSet", "documentation":"

The row-level security configuration for the data that you want to create.

" }, + "RowLevelPermissionTagConfiguration":{ + "shape":"RowLevelPermissionTagConfiguration", + "documentation":"

The configuration of tags on a dataset to set row-level security. Row-level security tags are currently supported for anonymous embedding only.

" + }, "ColumnLevelPermissionRules":{ "shape":"ColumnLevelPermissionRuleList", "documentation":"

A set of one or more definitions of a ColumnLevelPermissionRule .

" @@ -3061,7 +3131,7 @@ }, "DataSetId":{ "shape":"ResourceId", - "documentation":"

The ID for the dataset that you want to create. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.

" }, "IngestionArn":{ "shape":"Arn", @@ -3073,7 +3143,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -3093,13 +3163,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, "DataSourceId":{ "shape":"ResourceId", - "documentation":"

An ID for the data source. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

An ID for the data source. This ID is unique per Region; for each Amazon Web Services account;.

" }, "Name":{ "shape":"ResourceName", @@ -3144,7 +3214,7 @@ }, "DataSourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the data source. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.

" }, "CreationStatus":{ "shape":"ResourceStatus", @@ -3152,7 +3222,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -3299,7 +3369,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the group is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -3320,7 +3390,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -3347,7 +3417,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the group is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -3369,7 +3439,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -3390,13 +3460,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account where you want to assign an IAM policy to QuickSight users or groups.

", + "documentation":"

The ID of the Amazon Web Services account; where you want to assign an IAM policy to QuickSight users or groups.

", "location":"uri", "locationName":"AwsAccountId" }, "AssignmentName":{ "shape":"IAMPolicyAssignmentName", - "documentation":"

The name of the assignment, also called a rule. It must be unique within an AWS account.

" + "documentation":"

The name of the assignment, also called a rule. It must be unique within an Amazon Web Services account;.

" }, "AssignmentStatus":{ "shape":"AssignmentStatus", @@ -3423,7 +3493,7 @@ "members":{ "AssignmentName":{ "shape":"IAMPolicyAssignmentName", - "documentation":"

The name of the assignment. This name must be unique within the AWS account.

" + "documentation":"

The name of the assignment. This name must be unique within the Amazon Web Services account;.

" }, "AssignmentId":{ "shape":"String", @@ -3443,7 +3513,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -3474,7 +3544,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" } @@ -3497,7 +3567,7 @@ }, "RequestId":{ "shape":"string", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -3516,7 +3586,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that you want to create the QuickSight namespace in.

", + "documentation":"

The ID for the Amazon Web Services account; that you want to create the QuickSight namespace in.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -3547,7 +3617,7 @@ }, "CapacityRegion":{ "shape":"String", - "documentation":"

The AWS Region that you want to use for the free SPICE capacity for the new namespace. This is set to the region that you run CreateNamespace in.

" + "documentation":"

The Region; that you want to use for the free SPICE capacity for the new namespace. This is set to the region that you run CreateNamespace in.

" }, "CreationStatus":{ "shape":"NamespaceStatus", @@ -3559,7 +3629,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -3579,7 +3649,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the template that you creating an alias for.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the template that you creating an alias for.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -3591,7 +3661,7 @@ }, "AliasName":{ "shape":"AliasName", - "documentation":"

The name that you want to give to the template alias that you're creating. Don't start the alias name with the $ character. Alias names that start with $ are reserved by QuickSight.

", + "documentation":"

The name that you want to give to the template alias that you're creating. Don't start the alias name with the $ character. Alias names that start with $ are reserved by Amazon QuickSight.

", "location":"uri", "locationName":"AliasName" }, @@ -3615,7 +3685,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -3629,13 +3699,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the group is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, "TemplateId":{ "shape":"RestrictiveResourceId", - "documentation":"

An ID for the template that you want to create. This template is unique per AWS Region in each AWS account.

", + "documentation":"

An ID for the template that you want to create. This template is unique per Region; in each Amazon Web Services account;.

", "location":"uri", "locationName":"TemplateId" }, @@ -3649,7 +3719,7 @@ }, "SourceEntity":{ "shape":"TemplateSourceEntity", - "documentation":"

The entity that you are using as a source when you create the template. In SourceEntity, you specify the type of object you're using as source: SourceTemplate for a template or SourceAnalysis for an analysis. Both of these require an Amazon Resource Name (ARN). For SourceTemplate, specify the ARN of the source template. For SourceAnalysis, specify the ARN of the source analysis. The SourceTemplate ARN can contain any AWS Account and any QuickSight-supported AWS Region.

Use the DataSetReferences entity within SourceTemplate or SourceAnalysis to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.

" + "documentation":"

The entity that you are using as a source when you create the template. In SourceEntity, you specify the type of object you're using as source: SourceTemplate for a template or SourceAnalysis for an analysis. Both of these require an Amazon Resource Name (ARN). For SourceTemplate, specify the ARN of the source template. For SourceAnalysis, specify the ARN of the source analysis. The SourceTemplate ARN can contain any Amazon Web Services account; and any QuickSight-supported Region;.

Use the DataSetReferences entity within SourceTemplate or SourceAnalysis to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.

" }, "Tags":{ "shape":"TagList", @@ -3687,7 +3757,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -3702,7 +3772,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the theme for the new theme alias.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the theme for the new theme alias.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -3738,7 +3808,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -3754,13 +3824,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account where you want to store the new theme.

", + "documentation":"

The ID of the Amazon Web Services account; where you want to store the new theme.

", "location":"uri", "locationName":"AwsAccountId" }, "ThemeId":{ "shape":"RestrictiveResourceId", - "documentation":"

An ID for the theme that you want to create. The theme ID is unique per AWS Region in each AWS account.

", + "documentation":"

An ID for the theme that you want to create. The theme ID is unique per Region; in each Amazon Web Services account;.

", "location":"uri", "locationName":"ThemeId" }, @@ -3816,7 +3886,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -4229,6 +4299,10 @@ "shape":"RowLevelPermissionDataSet", "documentation":"

The row-level security configuration for the dataset.

" }, + "RowLevelPermissionTagConfiguration":{ + "shape":"RowLevelPermissionTagConfiguration", + "documentation":"

The element you can use to define tags for row-level security.

" + }, "ColumnLevelPermissionRules":{ "shape":"ColumnLevelPermissionRuleList", "documentation":"

A set of one or more definitions of a ColumnLevelPermissionRule .

" @@ -4340,6 +4414,10 @@ "shape":"RowLevelPermissionDataSet", "documentation":"

The row-level security configuration for the dataset.

" }, + "RowLevelPermissionTagConfigurationApplied":{ + "shape":"Boolean", + "documentation":"

Whether or not the row level permission tags are applied.

" + }, "ColumnLevelPermissionRulesApplied":{ "shape":"Boolean", "documentation":"

A value that indicates if the dataset has column level permission configured.

" @@ -4360,7 +4438,7 @@ }, "DataSourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the data source. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.

" }, "Name":{ "shape":"ResourceName", @@ -4472,7 +4550,7 @@ }, "AwsIotAnalyticsParameters":{ "shape":"AwsIotAnalyticsParameters", - "documentation":"

AWS IoT Analytics parameters.

" + "documentation":"

Amazon Web Services IoT Analytics parameters.

" }, "JiraParameters":{ "shape":"JiraParameters", @@ -4628,7 +4706,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that you want to delete QuickSight customizations from in this AWS Region.

", + "documentation":"

The ID for the Amazon Web Services account; that you want to delete QuickSight customizations from in this Region;.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -4645,7 +4723,7 @@ "members":{ "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -4663,7 +4741,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account where you want to delete an analysis.

", + "documentation":"

The ID of the Amazon Web Services account; where you want to delete an analysis.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -4709,7 +4787,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -4722,7 +4800,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the dashboard that you're deleting.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the dashboard that you're deleting.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -4758,7 +4836,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -4771,13 +4849,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, "DataSetId":{ "shape":"ResourceId", - "documentation":"

The ID for the dataset that you want to create. This ID is unique per AWS Region for each AWS account.

", + "documentation":"

The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.

", "location":"uri", "locationName":"DataSetId" } @@ -4792,11 +4870,11 @@ }, "DataSetId":{ "shape":"ResourceId", - "documentation":"

The ID for the dataset that you want to create. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.

" }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -4814,13 +4892,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, "DataSourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the data source. This ID is unique per AWS Region for each AWS account.

", + "documentation":"

The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.

", "location":"uri", "locationName":"DataSourceId" } @@ -4835,11 +4913,11 @@ }, "DataSourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the data source. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.

" }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -4962,7 +5040,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the group is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -4979,7 +5057,7 @@ "members":{ "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5004,7 +5082,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the group is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5021,7 +5099,7 @@ "members":{ "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5040,7 +5118,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID where you want to delete the IAM policy assignment.

", + "documentation":"

The Amazon Web Services account; ID where you want to delete the IAM policy assignment.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5067,7 +5145,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5085,7 +5163,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that you want to delete the QuickSight namespace from.

", + "documentation":"

The ID for the Amazon Web Services account; that you want to delete the QuickSight namespace from.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5102,7 +5180,7 @@ "members":{ "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5121,7 +5199,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the item to delete.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the item to delete.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5161,7 +5239,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -5174,7 +5252,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the template that you're deleting.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the template that you're deleting.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5197,7 +5275,7 @@ "members":{ "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Arn":{ "shape":"Arn", @@ -5224,7 +5302,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the theme alias to delete.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the theme alias to delete.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5255,7 +5333,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5277,7 +5355,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the theme that you're deleting.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the theme that you're deleting.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5304,7 +5382,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5333,7 +5411,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the user is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5351,7 +5429,7 @@ "members":{ "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5376,7 +5454,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the user is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5393,7 +5471,7 @@ "members":{ "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5413,7 +5491,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that you want to describe QuickSight customizations for.

", + "documentation":"

The ID for the Amazon Web Services account; that you want to describe QuickSight customizations for.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5436,11 +5514,11 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the customization that's associated with this AWS account.

" + "documentation":"

The Amazon Resource Name (ARN) of the customization that's associated with this Amazon Web Services account;.

" }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that you're describing.

" + "documentation":"

The ID for the Amazon Web Services account; that you're describing.

" }, "Namespace":{ "shape":"Namespace", @@ -5448,11 +5526,11 @@ }, "AccountCustomization":{ "shape":"AccountCustomization", - "documentation":"

The QuickSight customizations that exist in the current AWS Region.

" + "documentation":"

The QuickSight customizations that exist in the current Region;.

" }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5467,7 +5545,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that contains the settings that you want to list.

", + "documentation":"

The ID for the Amazon Web Services account; that contains the settings that you want to list.

", "location":"uri", "locationName":"AwsAccountId" } @@ -5478,11 +5556,11 @@ "members":{ "AccountSettings":{ "shape":"AccountSettings", - "documentation":"

The QuickSight settings for this AWS account. This information includes the edition of Amazon QuickSight that you subscribed to (Standard or Enterprise) and the notification email for the QuickSight subscription. In the QuickSight console, the QuickSight subscription is sometimes referred to as a QuickSight \"account\" even though it's technically not an account by itself. Instead, it's a subscription to the QuickSight service for your AWS account. The edition that you subscribe to applies to QuickSight in every AWS Region where you use it.

" + "documentation":"

The QuickSight settings for this Amazon Web Services account;. This information includes the edition of Amazon QuickSight that you subscribed to (Standard or Enterprise) and the notification email for the QuickSight subscription. In the QuickSight console, the QuickSight subscription is sometimes referred to as a QuickSight \"account\" even though it's technically not an account by itself. Instead, it's a subscription to the QuickSight service for your Amazon Web Services account;. The edition that you subscribe to applies to QuickSight in every Region; where you use it.

" }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5500,7 +5578,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the analysis whose permissions you're describing. You must be using the AWS account that the analysis is in.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the analysis whose permissions you're describing. You must be using the Amazon Web Services account; that the analysis is in.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5534,7 +5612,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -5547,7 +5625,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the analysis. You must be using the AWS account that the analysis is in.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the analysis. You must be using the Amazon Web Services account; that the analysis is in.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5573,7 +5651,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -5586,7 +5664,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the dashboard that you're describing permissions for.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the dashboard that you're describing permissions for.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5620,7 +5698,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -5633,7 +5711,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the dashboard that you're describing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the dashboard that you're describing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -5671,7 +5749,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -5684,13 +5762,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, "DataSetId":{ "shape":"ResourceId", - "documentation":"

The ID for the dataset that you want to create. This ID is unique per AWS Region for each AWS account.

", + "documentation":"

The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.

", "location":"uri", "locationName":"DataSetId" } @@ -5705,7 +5783,7 @@ }, "DataSetId":{ "shape":"ResourceId", - "documentation":"

The ID for the dataset that you want to create. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.

" }, "Permissions":{ "shape":"ResourcePermissionList", @@ -5713,7 +5791,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5731,13 +5809,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, "DataSetId":{ "shape":"ResourceId", - "documentation":"

The ID for the dataset that you want to create. This ID is unique per AWS Region for each AWS account.

", + "documentation":"

The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.

", "location":"uri", "locationName":"DataSetId" } @@ -5752,7 +5830,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5770,13 +5848,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, "DataSourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the data source. This ID is unique per AWS Region for each AWS account.

", + "documentation":"

The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.

", "location":"uri", "locationName":"DataSourceId" } @@ -5791,7 +5869,7 @@ }, "DataSourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the data source. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.

" }, "Permissions":{ "shape":"ResourcePermissionList", @@ -5799,7 +5877,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5817,13 +5895,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, "DataSourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the data source. This ID is unique per AWS Region for each AWS account.

", + "documentation":"

The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.

", "location":"uri", "locationName":"DataSourceId" } @@ -5838,7 +5916,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -5996,7 +6074,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the group is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -6017,7 +6095,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -6036,7 +6114,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the assignment that you want to describe.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the assignment that you want to describe.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -6063,7 +6141,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -6082,7 +6160,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -6109,7 +6187,7 @@ }, "RequestId":{ "shape":"string", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -6127,7 +6205,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that contains the QuickSight namespace that you want to describe.

", + "documentation":"

The ID for the Amazon Web Services account; that contains the QuickSight namespace that you want to describe.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -6144,11 +6222,11 @@ "members":{ "Namespace":{ "shape":"NamespaceInfoV2", - "documentation":"

The information about the namespace that you're describing. The response includes the namespace ARN, name, AWS Region, creation status, and identity store. DescribeNamespace also works for namespaces that are in the process of being created. For incomplete namespaces, this API operation lists the namespace error types and messages associated with the creation process.

" + "documentation":"

The information about the namespace that you're describing. The response includes the namespace ARN, name, Region;, creation status, and identity store. DescribeNamespace also works for namespaces that are in the process of being created. For incomplete namespaces, this API operation lists the namespace error types and messages associated with the creation process.

" }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -6167,7 +6245,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the template alias that you're describing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the template alias that you're describing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -6199,7 +6277,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -6212,7 +6290,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the template that you're describing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the template that you're describing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -6241,7 +6319,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -6259,7 +6337,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the template that you're describing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the template that you're describing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -6297,7 +6375,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -6311,7 +6389,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the theme alias that you're describing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the theme alias that you're describing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -6343,7 +6421,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -6356,7 +6434,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the theme that you're describing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the theme that you're describing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -6385,7 +6463,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -6403,7 +6481,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAndAccountId", - "documentation":"

The ID of the AWS account that contains the theme that you're describing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the theme that you're describing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -6441,7 +6519,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -6461,7 +6539,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the user is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -6482,7 +6560,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -6502,7 +6580,7 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

The domain specified isn't on the allow list. All domains for embedded dashboards must be added to the approved list by an Amazon QuickSight admin.

", @@ -6533,6 +6611,11 @@ "type":"string", "sensitive":true }, + "EntryPath":{ + "type":"string", + "max":1000, + "min":1 + }, "EntryPoint":{ "type":"string", "max":1000, @@ -6764,6 +6847,117 @@ "type":"string", "enum":["SHARED"] }, + "GenerateEmbedUrlForAnonymousUserRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "Namespace", + "AuthorizedResourceArns", + "ExperienceConfiguration" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID for the Amazon Web Services account that contains the dashboard that you're embedding.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "SessionLifetimeInMinutes":{ + "shape":"SessionLifetimeInMinutes", + "documentation":"

How many minutes the session is valid. The session lifetime must be in [15-600] minutes range.

" + }, + "Namespace":{ + "shape":"Namespace", + "documentation":"

The Amazon QuickSight namespace that the anonymous user virtually belongs to. If you are not using an Amazon QuickSight custom namespace, set this to default.

" + }, + "SessionTags":{ + "shape":"SessionTagList", + "documentation":"

The session tags used for row-level security. Before you use this parameter, make sure that you have configured the relevant datasets using the DataSet$RowLevelPermissionTagConfiguration parameter so that session tags can be used to provide row-level security.

These are not the tags used for the Amazon Web Services resource tagging feature. For more information, see Using Row-Level Security (RLS) with Tags.

" + }, + "AuthorizedResourceArns":{ + "shape":"ArnList", + "documentation":"

The Amazon Resource Names for the Amazon QuickSight resources that the user is authorized to access during the lifetime of the session. If you choose Dashboard embedding experience, pass the list of dashboard ARNs in the account that you want the user to be able to view.

" + }, + "ExperienceConfiguration":{ + "shape":"AnonymousUserEmbeddingExperienceConfiguration", + "documentation":"

The configuration of the experience you are embedding.

" + } + } + }, + "GenerateEmbedUrlForAnonymousUserResponse":{ + "type":"structure", + "required":[ + "EmbedUrl", + "Status", + "RequestId" + ], + "members":{ + "EmbedUrl":{ + "shape":"EmbeddingUrl", + "documentation":"

The embed URL for the dashboard.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The Amazon Web Services request ID for this operation.

" + } + } + }, + "GenerateEmbedUrlForRegisteredUserRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "UserArn", + "ExperienceConfiguration" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID for the Amazon Web Services account that contains the dashboard that you're embedding.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "SessionLifetimeInMinutes":{ + "shape":"SessionLifetimeInMinutes", + "documentation":"

How many minutes the session is valid. The session lifetime must be in [15-600] minutes range.

" + }, + "UserArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name for the registered user.

" + }, + "ExperienceConfiguration":{ + "shape":"RegisteredUserEmbeddingExperienceConfiguration", + "documentation":"

The experience you are embedding. For registered users, you can embed Amazon QuickSight dashboards or the entire Amazon QuickSight console.

" + } + } + }, + "GenerateEmbedUrlForRegisteredUserResponse":{ + "type":"structure", + "required":[ + "EmbedUrl", + "Status", + "RequestId" + ], + "members":{ + "EmbedUrl":{ + "shape":"EmbeddingUrl", + "documentation":"

The embed URL for the Amazon QuickSight dashboard or console.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The Amazon Web Services request ID for this operation.

" + } + } + }, "GeoSpatialColumnGroup":{ "type":"structure", "required":[ @@ -6813,13 +7007,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that contains the dashboard that you're embedding.

", + "documentation":"

The ID for the Amazon Web Services account; that contains the dashboard that you're embedding.

", "location":"uri", "locationName":"AwsAccountId" }, "DashboardId":{ "shape":"RestrictiveResourceId", - "documentation":"

The ID for the dashboard, also added to the AWS Identity and Access Management (IAM) policy.

", + "documentation":"

The ID for the dashboard, also added to the Identity and Access Management (IAM) policy.

", "location":"uri", "locationName":"DashboardId" }, @@ -6861,7 +7055,7 @@ }, "Namespace":{ "shape":"Namespace", - "documentation":"

The QuickSight namespace that contains the dashboard IDs in this request. If you're not using a custom namespace, set this to \"default\".

", + "documentation":"

The Amazon QuickSight namespace that the user virtually belongs to. If you are not using an Amazon QuickSight custom namespace, set this to default.

", "location":"querystring", "locationName":"namespace" }, @@ -6887,7 +7081,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } }, "documentation":"

Output returned from the GetDashboardEmbedUrl operation.

" @@ -6898,7 +7092,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account associated with your QuickSight subscription.

", + "documentation":"

The ID for the Amazon Web Services account; associated with your QuickSight subscription.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -6916,7 +7110,7 @@ }, "UserArn":{ "shape":"Arn", - "documentation":"

The Amazon QuickSight user's Amazon Resource Name (ARN), for use with QUICKSIGHT identity type. You can use this for any type of Amazon QuickSight users in your account (readers, authors, or admins). They need to be authenticated as one of the following:

  1. Active Directory (AD) users or group members

  2. Invited nonfederated users

  3. AWS Identity and Access Management (IAM) users and IAM role-based sessions authenticated through Federated Single Sign-On using SAML, OpenID Connect, or IAM federation

Omit this parameter for users in the third group, IAM users and IAM role-based sessions.

", + "documentation":"

The Amazon QuickSight user's Amazon Resource Name (ARN), for use with QUICKSIGHT identity type. You can use this for any type of Amazon QuickSight users in your account (readers, authors, or admins). They need to be authenticated as one of the following:

  1. Active Directory (AD) users or group members

  2. Invited nonfederated users

  3. Identity and Access Management (IAM) users and IAM role-based sessions authenticated through Federated Single Sign-On using SAML, OpenID Connect, or IAM federation

Omit this parameter for users in the third group, IAM users and IAM role-based sessions.

", "location":"querystring", "locationName":"user-arn" } @@ -6936,7 +7130,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -7025,7 +7219,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

" + "documentation":"

The Amazon Web Services account; ID.

" }, "AssignmentId":{ "shape":"String", @@ -7048,7 +7242,7 @@ "documentation":"

Assignment status.

" } }, - "documentation":"

An AWS Identity and Access Management (IAM) policy assignment.

" + "documentation":"

An Identity and Access Management (IAM) policy assignment.

" }, "IAMPolicyAssignmentName":{ "type":"string", @@ -7104,7 +7298,7 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

The identity type specified isn't supported. Supported identity types include IAM and QUICKSIGHT.

", @@ -7319,7 +7513,7 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

An internal failure occurred.

", @@ -7333,7 +7527,7 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

The NextToken value isn't valid.

", @@ -7346,7 +7540,7 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

One or more parameters has a value that isn't valid.

", @@ -7430,7 +7624,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

A limit is exceeded.

", @@ -7443,7 +7637,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the analyses.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the analyses.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -7480,7 +7674,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -7493,7 +7687,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the dashboard that you're listing versions for.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the dashboard that you're listing versions for.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -7536,7 +7730,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -7546,7 +7740,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the dashboards that you're listing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the dashboards that you're listing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -7570,7 +7764,7 @@ "members":{ "DashboardSummaryList":{ "shape":"DashboardSummaryList", - "documentation":"

A structure that contains all of the dashboards in your AWS account. This structure provides basic information about the dashboards.

" + "documentation":"

A structure that contains all of the dashboards in your Amazon Web Services account;. This structure provides basic information about the dashboards.

" }, "NextToken":{ "shape":"String", @@ -7583,7 +7777,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -7593,7 +7787,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -7625,7 +7819,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -7640,7 +7834,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -7672,7 +7866,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -7813,7 +8007,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the group is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -7838,7 +8032,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -7856,7 +8050,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the group is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -7894,7 +8088,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -7913,7 +8107,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the assignments.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the assignments.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -7953,7 +8147,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "NextToken":{ "shape":"String", @@ -7975,7 +8169,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains these IAM policy assignments.

", + "documentation":"

The ID of the Amazon Web Services account; that contains these IAM policy assignments.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -8017,7 +8211,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -8047,7 +8241,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -8073,7 +8267,7 @@ }, "RequestId":{ "shape":"string", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -8088,7 +8282,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that contains the QuickSight namespaces that you want to list.

", + "documentation":"

The ID for the Amazon Web Services account; that contains the QuickSight namespaces that you want to list.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -8112,7 +8306,7 @@ "members":{ "Namespaces":{ "shape":"Namespaces", - "documentation":"

The information about the namespaces in this AWS account. The response includes the namespace ARN, name, AWS Region, notification email address, creation status, and identity store.

" + "documentation":"

The information about the namespaces in this Amazon Web Services account;. The response includes the namespace ARN, name, Region;, notification email address, creation status, and identity store.

" }, "NextToken":{ "shape":"String", @@ -8120,7 +8314,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -8150,7 +8344,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -8168,7 +8362,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the template aliases that you're listing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the template aliases that you're listing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -8207,7 +8401,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "NextToken":{ "shape":"String", @@ -8224,7 +8418,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the templates that you're listing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the templates that you're listing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -8267,7 +8461,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -8277,7 +8471,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the templates that you're listing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the templates that you're listing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -8314,7 +8508,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -8327,7 +8521,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the theme aliases that you're listing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the theme aliases that you're listing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -8366,7 +8560,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "NextToken":{ "shape":"String", @@ -8383,7 +8577,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the themes that you're listing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the themes that you're listing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -8426,7 +8620,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -8436,7 +8630,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the themes that you're listing.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the themes that you're listing.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -8455,7 +8649,7 @@ }, "Type":{ "shape":"ThemeType", - "documentation":"

The type of themes that you want to list. Valid options include the following:

  • ALL (default)- Display all existing themes.

  • CUSTOM - Display only the themes created by people using Amazon QuickSight.

  • QUICKSIGHT - Display only the starting themes defined by QuickSight.

", + "documentation":"

The type of themes that you want to list. Valid options include the following:

  • ALL (default)- Display all existing themes.

  • CUSTOM - Display only the themes created by people using Amazon QuickSight.

  • QUICKSIGHT - Display only the starting themes defined by Amazon QuickSight.

", "location":"querystring", "locationName":"type" } @@ -8479,7 +8673,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -8499,7 +8693,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The Amazon Web Services account; ID that the user is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -8537,7 +8731,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -8555,7 +8749,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the user is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -8593,7 +8787,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -8802,7 +8996,7 @@ }, "CapacityRegion":{ "shape":"String", - "documentation":"

The namespace AWS Region.

" + "documentation":"

The namespace Region;.

" }, "CreationStatus":{ "shape":"NamespaceStatus", @@ -8994,7 +9188,7 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

One or more preconditions aren't met.

", @@ -9081,7 +9275,7 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

The user with the provided name isn't found. This error can happen in any operation that requires finding a user based on a provided user name, such as DeleteUser, DescribeUser, and so on.

", @@ -9166,7 +9360,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the user is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -9182,7 +9376,7 @@ }, "CustomPermissionsName":{ "shape":"RoleName", - "documentation":"

(Enterprise edition only) The name of the custom permissions profile that you want to assign to this user. Customized permissions allows you to control a user's access by restricting access the following operations:

  • Create and update data sources

  • Create and update datasets

  • Create and update email reports

  • Subscribe to email reports

To add custom permissions to an existing user, use UpdateUser instead.

A set of custom permissions includes any combination of these restrictions. Currently, you need to create the profile names for custom permission sets by using the QuickSight console. Then, you use the RegisterUser API operation to assign the named set of permissions to a QuickSight user.

QuickSight custom permissions are applied through IAM policies. Therefore, they override the permissions typically granted by assigning QuickSight users to one of the default security cohorts in QuickSight (admin, author, reader).

This feature is available only to QuickSight Enterprise edition subscriptions that use SAML 2.0-Based Federation for Single Sign-On (SSO).

" + "documentation":"

(Enterprise edition only) The name of the custom permissions profile that you want to assign to this user. Customized permissions allows you to control a user's access by restricting access the following operations:

  • Create and update data sources

  • Create and update datasets

  • Create and update email reports

  • Subscribe to email reports

To add custom permissions to an existing user, use UpdateUser instead.

A set of custom permissions includes any combination of these restrictions. Currently, you need to create the profile names for custom permission sets by using the QuickSight console. Then, you use the RegisterUser API operation to assign the named set of permissions to a QuickSight user.

QuickSight custom permissions are applied through IAM policies. Therefore, they override the permissions typically granted by assigning QuickSight users to one of the default security cohorts in QuickSight (admin, author, reader).

This feature is available only to QuickSight Enterprise edition subscriptions.

" }, "ExternalLoginFederationProviderType":{ "shape":"String", @@ -9211,7 +9405,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -9220,6 +9414,41 @@ } } }, + "RegisteredUserDashboardEmbeddingConfiguration":{ + "type":"structure", + "required":["InitialDashboardId"], + "members":{ + "InitialDashboardId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The dashboard ID for the dashboard that you want the user to see first. This ID is included in the output URL. When the URL in response is accessed, Amazon QuickSight renders this dashboard if the user has permissions to view it.

If the user does not have permission to view this dashboard, they see a permissions error message.

" + } + }, + "documentation":"

Information about the dashboard you want to embed.

" + }, + "RegisteredUserEmbeddingExperienceConfiguration":{ + "type":"structure", + "members":{ + "Dashboard":{ + "shape":"RegisteredUserDashboardEmbeddingConfiguration", + "documentation":"

The configuration details for providing a dashboard embedding experience.

" + }, + "QuickSightConsole":{ + "shape":"RegisteredUserQuickSightConsoleEmbeddingConfiguration", + "documentation":"

The configuration details for providing an Amazon QuickSight console embedding experience. This can be used along with custom permissions to restrict access to certain features. For more information, see Customizing Access to the Amazon QuickSight Console in the Amazon QuickSight User Guide.

Use GenerateEmbedUrlForRegisteredUser where you want to provide an authoring portal that allows users to create data sources, datasets, analyses, and dashboards. The users who accesses an embedded Amazon QuickSight console needs to belong to the author or admin security cohort. If you want to restrict permissions to some of these features, add a custom permissions profile to the user with the UpdateUser API operation. Use RegisterUser API operation to add a new user with a custom permission profile attached. For more information, see the following sections in the Amazon QuickSight User Guide:

For more information about the high-level steps for embedding and for an interactive demo of the ways you can customize embedding, visit the Amazon QuickSight Developer Portal.

" + } + }, + "documentation":"

The type of experience you want to embed. For registered users, you can embed an Amazon QuickSight dashboard or the Amazon QuickSight console.

Exactly one of the experience configurations is required. You can choose Dashboard or QuickSightConsole. You cannot choose more than one experience configuraton.

" + }, + "RegisteredUserQuickSightConsoleEmbeddingConfiguration":{ + "type":"structure", + "members":{ + "InitialPath":{ + "shape":"EntryPath", + "documentation":"

The initial URL path for the Amazon QuickSight console. InitialPath is required.

The entry point URL is constrained to the following paths:

  • /start

  • /start/analyses

  • /start/dashboards

  • /start/favorites

  • /dashboards/DashboardId. DashboardId is the actual ID key from the Amazon QuickSight console URL of the dashboard.

  • /analyses/AnalysisId. AnalysisId is the actual ID key from the Amazon QuickSight console URL of the analysis.

" + } + }, + "documentation":"

Information about the Amazon QuickSight console that you want to embed.

" + }, "RelationalTable":{ "type":"structure", "required":[ @@ -9292,7 +9521,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

The resource specified already exists.

", @@ -9315,7 +9544,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

One or more resources can't be found.

", @@ -9331,7 +9560,7 @@ "members":{ "Principal":{ "shape":"Principal", - "documentation":"

The Amazon Resource Name (ARN) of the principal. This can be one of the following:

  • The ARN of an Amazon QuickSight user or group associated with a data source or dataset. (This is common.)

  • The ARN of an Amazon QuickSight user, group, or namespace associated with an analysis, dashboard, template, or theme. (This is common.)

  • The ARN of an AWS account root: This is an IAM ARN rather than a QuickSight ARN. Use this option only to share resources (templates) across AWS accounts. (This is less common.)

" + "documentation":"

The Amazon Resource Name (ARN) of the principal. This can be one of the following:

  • The ARN of an Amazon QuickSight user or group associated with a data source or dataset. (This is common.)

  • The ARN of an Amazon QuickSight user, group, or namespace associated with an analysis, dashboard, template, or theme. (This is common.)

  • The ARN of an Amazon Web Services account; root: This is an IAM ARN rather than a QuickSight ARN. Use this option only to share resources (templates) across Amazon Web Services accounts. (This is less common.)

" }, "Actions":{ "shape":"ActionList", @@ -9368,7 +9597,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

This resource is currently unavailable.

", @@ -9384,7 +9613,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the analysis.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the analysis.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -9414,7 +9643,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -9474,6 +9703,10 @@ "FormatVersion":{ "shape":"RowLevelPermissionFormatVersion", "documentation":"

The user or group rules associated with the dataset that contains permissions for RLS.

By default, FormatVersion is VERSION_1. When FormatVersion is VERSION_1, UserName and GroupName are required. When FormatVersion is VERSION_2, UserARN and GroupARN are required, and Namespace must not exist.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the row-level security permission dataset. If enabled, the status is ENABLED. If disabled, the status is DISABLED.

" } }, "documentation":"

Information about a dataset that contains permissions for row-level security (RLS). The permissions dataset maps fields to users or groups. For more information, see Using Row-Level Security (RLS) to Restrict Access to a Dataset in the Amazon QuickSight User Guide.

The option to deny permissions by setting PermissionPolicy to DENY_ACCESS is not supported for new RLS datasets.

" @@ -9492,6 +9725,57 @@ "DENY_ACCESS" ] }, + "RowLevelPermissionTagConfiguration":{ + "type":"structure", + "required":["TagRules"], + "members":{ + "Status":{ + "shape":"Status", + "documentation":"

The status of row-level security tags. If enabled, the status is ENABLED. If disabled, the status is DISABLED.

" + }, + "TagRules":{ + "shape":"RowLevelPermissionTagRuleList", + "documentation":"

A set of rules associated with row-level security, such as the tag names and columns that they are assigned to.

" + } + }, + "documentation":"

The configuration of tags on a dataset to set row-level security.

" + }, + "RowLevelPermissionTagDelimiter":{ + "type":"string", + "max":10 + }, + "RowLevelPermissionTagRule":{ + "type":"structure", + "required":[ + "TagKey", + "ColumnName" + ], + "members":{ + "TagKey":{ + "shape":"SessionTagKey", + "documentation":"

The unique key for a tag.

" + }, + "ColumnName":{ + "shape":"String", + "documentation":"

The column name that a tag key is assigned to.

" + }, + "TagMultiValueDelimiter":{ + "shape":"RowLevelPermissionTagDelimiter", + "documentation":"

A string that you want to use to delimit the values when you pass the values at run time. For example, you can delimit the values with a comma.

" + }, + "MatchAllValue":{ + "shape":"SessionTagValue", + "documentation":"

A string that you want to use to filter by all the values in a column in the dataset and don’t want to list the values one by one. For example, you can use an asterisk as your match all value.

" + } + }, + "documentation":"

A set of rules associated with a tag.

" + }, + "RowLevelPermissionTagRuleList":{ + "type":"list", + "member":{"shape":"RowLevelPermissionTagRule"}, + "max":50, + "min":1 + }, "S3Bucket":{ "type":"string", "max":1024, @@ -9508,7 +9792,7 @@ "members":{ "ManifestFileLocation":{ "shape":"ManifestFileLocation", - "documentation":"

Location of the Amazon S3 manifest file. This is NULL if the manifest file was uploaded in the console.

" + "documentation":"

Location of the Amazon S3 manifest file. This is NULL if the manifest file was uploaded into QuickSight.

" } }, "documentation":"

S3 parameters.

" @@ -9544,7 +9828,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the analyses that you're searching for.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the analyses that you're searching for.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -9581,7 +9865,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -9594,7 +9878,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the user whose dashboards you're searching for.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the user whose dashboards you're searching for.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -9631,7 +9915,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -9707,13 +9991,48 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

The number of minutes specified for the lifetime of a session isn't valid. The session lifetime must be 15-600 minutes.

", "error":{"httpStatusCode":400}, "exception":true }, + "SessionTag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"SessionTagKey", + "documentation":"

The key for the tag.

" + }, + "Value":{ + "shape":"SessionTagValue", + "documentation":"

The value that you want to assign the tag.

" + } + }, + "documentation":"

The key-value pair used for the row-level security tags feature.

" + }, + "SessionTagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "SessionTagList":{ + "type":"list", + "member":{"shape":"SessionTag"}, + "max":50, + "min":1 + }, + "SessionTagValue":{ + "type":"string", + "max":256, + "min":1, + "sensitive":true + }, "Sheet":{ "type":"structure", "members":{ @@ -9726,7 +10045,7 @@ "documentation":"

The name of a sheet. This name is displayed on the sheet's tab in the QuickSight console.

" } }, - "documentation":"

A sheet, which is an object that contains a set of visuals that are viewed together on one page in the Amazon QuickSight console. Every analysis and dashboard contains at least one sheet. Each sheet contains at least one visualization widget, for example a chart, pivot table, or narrative insight. Sheets can be associated with other components, such as controls, filters, and so on.

" + "documentation":"

A sheet, which is an object that contains a set of visuals that are viewed together on one page in Amazon QuickSight. Every analysis and dashboard contains at least one sheet. Each sheet contains at least one visualization widget, for example a chart, pivot table, or narrative insight. Sheets can be associated with other components, such as controls, filters, and so on.

" }, "SheetControlsOption":{ "type":"structure", @@ -9841,6 +10160,13 @@ }, "documentation":"

Secure Socket Layer (SSL) properties that apply when QuickSight connects to your underlying data source.

" }, + "Status":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "StatusCode":{"type":"integer"}, "String":{"type":"string"}, "StringList":{ @@ -9901,7 +10227,7 @@ }, "Tags":{ "shape":"ColumnTagList", - "documentation":"

The dataset column tag, currently only used for geospatial type tagging. .

This is not tags for the AWS tagging feature. .

" + "documentation":"

The dataset column tag, currently only used for geospatial type tagging.

This is not tags for the Amazon Web Services tagging feature.

" } }, "documentation":"

A transform operation that tags a column with additional information.

" @@ -9947,7 +10273,7 @@ "members":{ "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -9978,7 +10304,7 @@ }, "TemplateId":{ "shape":"RestrictiveResourceId", - "documentation":"

The ID for the template. This is unique per AWS Region for each AWS account.

" + "documentation":"

The ID for the template. This is unique per Region; for each Amazon Web Services account;.

" }, "LastUpdatedTime":{ "shape":"Timestamp", @@ -9989,7 +10315,7 @@ "documentation":"

Time when this was created.

" } }, - "documentation":"

A template object. A template is an entity in QuickSight that encapsulates the metadata required to create an analysis and that you can use to create a dashboard. A template adds a layer of abstraction by using placeholders to replace the dataset associated with an analysis. You can use templates to create dashboards by replacing dataset placeholders with datasets that follow the same schema that was used to create the source analysis and template.

You can share templates across AWS accounts by allowing users in other AWS accounts to create a template or a dashboard from an existing template.

" + "documentation":"

A template object. A template is an entity in QuickSight that encapsulates the metadata required to create an analysis and that you can use to create a dashboard. A template adds a layer of abstraction by using placeholders to replace the dataset associated with an analysis. You can use templates to create dashboards by replacing dataset placeholders with datasets that follow the same schema that was used to create the source analysis and template.

You can share templates across Amazon Web Services accounts by allowing users in other Amazon Web Services accounts to create a template or a dashboard from an existing template.

" }, "TemplateAlias":{ "type":"structure", @@ -10099,7 +10425,7 @@ }, "TemplateId":{ "shape":"RestrictiveResourceId", - "documentation":"

The ID of the template. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID of the template. This ID is unique per Region; for each Amazon Web Services account;.

" }, "Name":{ "shape":"TemplateName", @@ -10341,7 +10667,7 @@ }, "ThemeId":{ "shape":"RestrictiveResourceId", - "documentation":"

The ID of the theme. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID of the theme. This ID is unique per Region; for each Amazon Web Services account;.

" }, "LatestVersionNumber":{ "shape":"VersionNumber", @@ -10446,7 +10772,7 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

Access is throttled.

", @@ -10616,10 +10942,10 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, - "documentation":"

This error indicates that you are calling an embedding operation in Amazon QuickSight without the required pricing plan on your AWS account. Before you can use embedding for anonymous users, a QuickSight administrator needs to add capacity pricing to QuickSight. You can do this on the Manage QuickSight page.

After capacity pricing is added, you can use the GetDashboardEmbedUrl API operation with the --identity-type ANONYMOUS option.

", + "documentation":"

This error indicates that you are calling an embedding operation in Amazon QuickSight without the required pricing plan on your Amazon Web Services account;. Before you can use embedding for anonymous users, a QuickSight administrator needs to add capacity pricing to QuickSight. You can do this on the Manage QuickSight page.

After capacity pricing is added, you can use the GetDashboardEmbedUrl API operation with the --identity-type ANONYMOUS option.

", "error":{"httpStatusCode":403}, "exception":true }, @@ -10629,7 +10955,7 @@ "Message":{"shape":"String"}, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The Amazon Web Services request ID for this request.

" } }, "documentation":"

This error indicates that you are calling an operation on an Amazon QuickSight subscription where the edition doesn't include support for that operation. Amazon QuickSight currently has Standard Edition and Enterprise Edition. Not every operation and capability is available in every edition.

", @@ -10662,7 +10988,7 @@ "members":{ "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -10680,7 +11006,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that you want to update QuickSight customizations for.

", + "documentation":"

The ID for the Amazon Web Services account; that you want to update QuickSight customizations for.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -10692,7 +11018,7 @@ }, "AccountCustomization":{ "shape":"AccountCustomization", - "documentation":"

The QuickSight customizations you're updating in the current AWS Region.

" + "documentation":"

The QuickSight customizations you're updating in the current Region;.

" } } }, @@ -10701,11 +11027,11 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for the updated customization for this AWS account.

" + "documentation":"

The Amazon Resource Name (ARN) for the updated customization for this Amazon Web Services account;.

" }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that you want to update QuickSight customizations for.

" + "documentation":"

The ID for the Amazon Web Services account; that you want to update QuickSight customizations for.

" }, "Namespace":{ "shape":"Namespace", @@ -10713,11 +11039,11 @@ }, "AccountCustomization":{ "shape":"AccountCustomization", - "documentation":"

The QuickSight customizations you're updating in the current AWS Region.

" + "documentation":"

The QuickSight customizations you're updating in the current Region;.

" }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -10735,17 +11061,17 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that contains the QuickSight settings that you want to list.

", + "documentation":"

The ID for the Amazon Web Services account; that contains the QuickSight settings that you want to list.

", "location":"uri", "locationName":"AwsAccountId" }, "DefaultNamespace":{ "shape":"Namespace", - "documentation":"

The default namespace for this AWS account. Currently, the default is default. AWS Identity and Access Management (IAM) users that register for the first time with QuickSight provide an email that becomes associated with the default namespace.

" + "documentation":"

The default namespace for this Amazon Web Services account;. Currently, the default is default. Identity and Access Management (IAM) users that register for the first time with QuickSight provide an email that becomes associated with the default namespace.

" }, "NotificationEmail":{ "shape":"String", - "documentation":"

The email address that you want QuickSight to send notifications to regarding your AWS account or QuickSight subscription.

" + "documentation":"

The email address that you want QuickSight to send notifications to regarding your Amazon Web Services account; or QuickSight subscription.

" } } }, @@ -10754,7 +11080,7 @@ "members":{ "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -10772,7 +11098,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the analysis whose permissions you're updating. You must be using the AWS account that the analysis is in.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the analysis whose permissions you're updating. You must be using the Amazon Web Services account; that the analysis is in.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -10809,7 +11135,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -10829,7 +11155,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the analysis that you're updating.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the analysis that you're updating.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -10879,7 +11205,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -10892,7 +11218,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the dashboard whose permissions you're updating.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the dashboard whose permissions you're updating.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -10929,7 +11255,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -10948,7 +11274,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the dashboard that you're updating.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the dashboard that you're updating.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -10984,7 +11310,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -10999,7 +11325,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the dashboard that you're updating.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the dashboard that you're updating.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -11015,7 +11341,7 @@ }, "SourceEntity":{ "shape":"DashboardSourceEntity", - "documentation":"

The entity that you are using as a source when you update the dashboard. In SourceEntity, you specify the type of object you're using as source. You can only update a dashboard from a template, so you use a SourceTemplate entity. If you need to update a dashboard from an analysis, first convert the analysis to a template by using the CreateTemplate API operation. For SourceTemplate, specify the Amazon Resource Name (ARN) of the source template. The SourceTemplate ARN can contain any AWS Account and any QuickSight-supported AWS Region.

Use the DataSetReferences entity within SourceTemplate to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.

" + "documentation":"

The entity that you are using as a source when you update the dashboard. In SourceEntity, you specify the type of object you're using as source. You can only update a dashboard from a template, so you use a SourceTemplate entity. If you need to update a dashboard from an analysis, first convert the analysis to a template by using the CreateTemplate API operation. For SourceTemplate, specify the Amazon Resource Name (ARN) of the source template. The SourceTemplate ARN can contain any Amazon Web Services account; and any QuickSight-supported Region;.

Use the DataSetReferences entity within SourceTemplate to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.

" }, "Parameters":{ "shape":"Parameters", @@ -11031,7 +11357,7 @@ }, "ThemeArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. If you add a value for this field, it overrides the value that was originally associated with the entity. The theme ARN must exist in the same AWS account where you create the dashboard.

" + "documentation":"

The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. If you add a value for this field, it overrides the value that was originally associated with the entity. The theme ARN must exist in the same Amazon Web Services account; where you create the dashboard.

" } } }, @@ -11060,7 +11386,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -11073,13 +11399,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, "DataSetId":{ "shape":"ResourceId", - "documentation":"

The ID for the dataset whose permissions you want to update. This ID is unique per AWS Region for each AWS account.

", + "documentation":"

The ID for the dataset whose permissions you want to update. This ID is unique per Region; for each Amazon Web Services account;.

", "location":"uri", "locationName":"DataSetId" }, @@ -11102,11 +11428,11 @@ }, "DataSetId":{ "shape":"ResourceId", - "documentation":"

The ID for the dataset whose permissions you want to update. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID for the dataset whose permissions you want to update. This ID is unique per Region; for each Amazon Web Services account;.

" }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -11127,13 +11453,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, "DataSetId":{ "shape":"ResourceId", - "documentation":"

The ID for the dataset that you want to update. This ID is unique per AWS Region for each AWS account.

", + "documentation":"

The ID for the dataset that you want to update. This ID is unique per Region; for each Amazon Web Services account;.

", "location":"uri", "locationName":"DataSetId" }, @@ -11165,6 +11491,10 @@ "shape":"RowLevelPermissionDataSet", "documentation":"

The row-level security configuration for the data you want to create.

" }, + "RowLevelPermissionTagConfiguration":{ + "shape":"RowLevelPermissionTagConfiguration", + "documentation":"

The configuration of tags on a dataset to set row-level security. Row-level security tags are currently supported for anonymous embedding only.

" + }, "ColumnLevelPermissionRules":{ "shape":"ColumnLevelPermissionRuleList", "documentation":"

A set of one or more definitions of a ColumnLevelPermissionRule .

" @@ -11180,7 +11510,7 @@ }, "DataSetId":{ "shape":"ResourceId", - "documentation":"

The ID for the dataset that you want to create. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.

" }, "IngestionArn":{ "shape":"Arn", @@ -11192,7 +11522,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -11210,13 +11540,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, "DataSourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the data source. This ID is unique per AWS Region for each AWS account.

", + "documentation":"

The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.

", "location":"uri", "locationName":"DataSourceId" }, @@ -11239,11 +11569,11 @@ }, "DataSourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the data source. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.

" }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -11262,13 +11592,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The AWS account ID.

", + "documentation":"

The Amazon Web Services account; ID.

", "location":"uri", "locationName":"AwsAccountId" }, "DataSourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the data source. This ID is unique per AWS Region for each AWS account.

", + "documentation":"

The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.

", "location":"uri", "locationName":"DataSourceId" }, @@ -11303,7 +11633,7 @@ }, "DataSourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the data source. This ID is unique per AWS Region for each AWS account.

" + "documentation":"

The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.

" }, "UpdateStatus":{ "shape":"ResourceStatus", @@ -11311,7 +11641,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -11442,7 +11772,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the group is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -11463,7 +11793,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -11482,13 +11812,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the IAM policy assignment.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the IAM policy assignment.

", "location":"uri", "locationName":"AwsAccountId" }, "AssignmentName":{ "shape":"IAMPolicyAssignmentName", - "documentation":"

The name of the assignment, also called a rule. This name must be unique within an AWS account.

", + "documentation":"

The name of the assignment, also called a rule. This name must be unique within an Amazon Web Services account;.

", "location":"uri", "locationName":"AssignmentName" }, @@ -11537,7 +11867,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -11562,7 +11892,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the template alias that you're updating.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the template alias that you're updating.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -11598,7 +11928,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -11611,7 +11941,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the template.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the template.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -11648,7 +11978,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -11667,7 +11997,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the template that you're updating.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the template that you're updating.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -11679,7 +12009,7 @@ }, "SourceEntity":{ "shape":"TemplateSourceEntity", - "documentation":"

The entity that you are using as a source when you update the template. In SourceEntity, you specify the type of object you're using as source: SourceTemplate for a template or SourceAnalysis for an analysis. Both of these require an Amazon Resource Name (ARN). For SourceTemplate, specify the ARN of the source template. For SourceAnalysis, specify the ARN of the source analysis. The SourceTemplate ARN can contain any AWS Account and any QuickSight-supported AWS Region.

Use the DataSetReferences entity within SourceTemplate or SourceAnalysis to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.

" + "documentation":"

The entity that you are using as a source when you update the template. In SourceEntity, you specify the type of object you're using as source: SourceTemplate for a template or SourceAnalysis for an analysis. Both of these require an Amazon Resource Name (ARN). For SourceTemplate, specify the ARN of the source template. For SourceAnalysis, specify the ARN of the source analysis. The SourceTemplate ARN can contain any Amazon Web Services account; and any QuickSight-supported Region;.

Use the DataSetReferences entity within SourceTemplate or SourceAnalysis to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.

" }, "VersionDescription":{ "shape":"VersionDescription", @@ -11717,7 +12047,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -11732,7 +12062,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the theme alias that you're updating.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the theme alias that you're updating.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -11768,7 +12098,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -11781,7 +12111,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the theme.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the theme.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -11818,7 +12148,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -11837,7 +12167,7 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the theme that you're updating.

", + "documentation":"

The ID of the Amazon Web Services account; that contains the theme that you're updating.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -11853,7 +12183,7 @@ }, "BaseThemeId":{ "shape":"RestrictiveResourceId", - "documentation":"

The theme ID, defined by Amazon QuickSight, that a custom theme inherits from. All themes initially inherit from a default QuickSight theme.

" + "documentation":"

The theme ID, defined by Amazon QuickSight, that a custom theme inherits from. All themes initially inherit from a default Amazon QuickSight theme.

" }, "VersionDescription":{ "shape":"VersionDescription", @@ -11891,7 +12221,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" } } }, @@ -11913,7 +12243,7 @@ }, "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID for the Amazon Web Services account; that the user is in. Currently, you use the ID for the Amazon Web Services account; that contains your Amazon QuickSight account.

", "location":"uri", "locationName":"AwsAccountId" }, @@ -11933,7 +12263,7 @@ }, "CustomPermissionsName":{ "shape":"RoleName", - "documentation":"

(Enterprise edition only) The name of the custom permissions profile that you want to assign to this user. Customized permissions allows you to control a user's access by restricting access the following operations:

  • Create and update data sources

  • Create and update datasets

  • Create and update email reports

  • Subscribe to email reports

A set of custom permissions includes any combination of these restrictions. Currently, you need to create the profile names for custom permission sets by using the QuickSight console. Then, you use the RegisterUser API operation to assign the named set of permissions to a QuickSight user.

QuickSight custom permissions are applied through IAM policies. Therefore, they override the permissions typically granted by assigning QuickSight users to one of the default security cohorts in QuickSight (admin, author, reader).

This feature is available only to QuickSight Enterprise edition subscriptions that use SAML 2.0-Based Federation for Single Sign-On (SSO).

" + "documentation":"

(Enterprise edition only) The name of the custom permissions profile that you want to assign to this user. Customized permissions allows you to control a user's access by restricting access the following operations:

  • Create and update data sources

  • Create and update datasets

  • Create and update email reports

  • Subscribe to email reports

A set of custom permissions includes any combination of these restrictions. Currently, you need to create the profile names for custom permission sets by using the QuickSight console. Then, you use the RegisterUser API operation to assign the named set of permissions to a QuickSight user.

QuickSight custom permissions are applied through IAM policies. Therefore, they override the permissions typically granted by assigning QuickSight users to one of the default security cohorts in QuickSight (admin, author, reader).

This feature is available only to QuickSight Enterprise edition subscriptions.

" }, "UnapplyCustomPermissions":{ "shape":"Boolean", @@ -11962,7 +12292,7 @@ }, "RequestId":{ "shape":"String", - "documentation":"

The AWS request ID for this operation.

" + "documentation":"

The Amazon Web Services request ID for this operation.

" }, "Status":{ "shape":"StatusCode", @@ -12107,5 +12437,5 @@ "string":{"type":"string"}, "timestamp":{"type":"timestamp"} }, - "documentation":"Amazon QuickSight API Reference

Amazon QuickSight is a fully managed, serverless business intelligence service for the AWS Cloud that makes it easy to extend data and insights to every user in your organization. This API reference contains documentation for a programming interface that you can use to manage Amazon QuickSight.

" + "documentation":"Amazon QuickSight API Reference

Amazon QuickSight is a fully managed, serverless business intelligence service for the Cloud that makes it easy to extend data and insights to every user in your organization. This API reference contains documentation for a programming interface that you can use to manage Amazon QuickSight.

" } diff --git a/services/ram/pom.xml b/services/ram/pom.xml index f16b02a69d82..e185f67c2bc2 100644 --- a/services/ram/pom.xml +++ b/services/ram/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ram AWS Java SDK :: Services :: RAM diff --git a/services/rds/pom.xml b/services/rds/pom.xml index b50c1034afb9..c3e4d31d2a2d 100644 --- a/services/rds/pom.xml +++ b/services/rds/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT rds AWS Java SDK :: Services :: Amazon RDS diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index c028d6e9dde0..285d6f50c571 100755 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -3540,7 +3540,7 @@ }, "AllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

The amount of storage (in gibibytes) to allocate for the DB instance.

Type: Integer

Amazon Aurora

Not applicable. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume.

MySQL

Constraints to the amount of storage for each storage type are the following:

  • General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

  • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

  • Magnetic storage (standard): Must be an integer from 5 to 3072.

MariaDB

Constraints to the amount of storage for each storage type are the following:

  • General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

  • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

  • Magnetic storage (standard): Must be an integer from 5 to 3072.

PostgreSQL

Constraints to the amount of storage for each storage type are the following:

  • General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

  • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

  • Magnetic storage (standard): Must be an integer from 5 to 3072.

Oracle

Constraints to the amount of storage for each storage type are the following:

  • General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

  • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

  • Magnetic storage (standard): Must be an integer from 10 to 3072.

SQL Server

Constraints to the amount of storage for each storage type are the following:

  • General Purpose (SSD) storage (gp2):

    • Enterprise and Standard editions: Must be an integer from 200 to 16384.

    • Web and Express editions: Must be an integer from 20 to 16384.

  • Provisioned IOPS storage (io1):

    • Enterprise and Standard editions: Must be an integer from 200 to 16384.

    • Web and Express editions: Must be an integer from 100 to 16384.

  • Magnetic storage (standard):

    • Enterprise and Standard editions: Must be an integer from 200 to 1024.

    • Web and Express editions: Must be an integer from 20 to 1024.

" + "documentation":"

The amount of storage in gibibytes (GiB) to allocate for the DB instance.

Type: Integer

Amazon Aurora

Not applicable. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume.

MySQL

Constraints to the amount of storage for each storage type are the following:

  • General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

  • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

  • Magnetic storage (standard): Must be an integer from 5 to 3072.

MariaDB

Constraints to the amount of storage for each storage type are the following:

  • General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

  • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

  • Magnetic storage (standard): Must be an integer from 5 to 3072.

PostgreSQL

Constraints to the amount of storage for each storage type are the following:

  • General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

  • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

  • Magnetic storage (standard): Must be an integer from 5 to 3072.

Oracle

Constraints to the amount of storage for each storage type are the following:

  • General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

  • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

  • Magnetic storage (standard): Must be an integer from 10 to 3072.

SQL Server

Constraints to the amount of storage for each storage type are the following:

  • General Purpose (SSD) storage (gp2):

    • Enterprise and Standard editions: Must be an integer from 200 to 16384.

    • Web and Express editions: Must be an integer from 20 to 16384.

  • Provisioned IOPS storage (io1):

    • Enterprise and Standard editions: Must be an integer from 200 to 16384.

    • Web and Express editions: Must be an integer from 100 to 16384.

  • Magnetic storage (standard):

    • Enterprise and Standard editions: Must be an integer from 200 to 1024.

    • Web and Express editions: Must be an integer from 20 to 1024.

" }, "DBInstanceClass":{ "shape":"String", @@ -3716,7 +3716,7 @@ }, "MaxAllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

" + "documentation":"

The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

" }, "EnableCustomerOwnedIp":{ "shape":"BooleanOptional", @@ -3855,7 +3855,7 @@ }, "MaxAllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

" + "documentation":"

The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

" } } }, @@ -4327,6 +4327,10 @@ "shape":"String", "documentation":"

Specifies the current state of this DB cluster.

" }, + "AutomaticRestartTime":{ + "shape":"TStamp", + "documentation":"

The time when a stopped DB cluster is restarted automatically.

" + }, "PercentProgress":{ "shape":"String", "documentation":"

Specifies the progress of the operation as a percentage.

" @@ -5249,6 +5253,10 @@ "shape":"String", "documentation":"

Specifies the current state of this database.

For information about DB instance statuses, see Viewing DB instance status in the Amazon RDS User Guide.

" }, + "AutomaticRestartTime":{ + "shape":"TStamp", + "documentation":"

The time when a stopped DB instance is restarted automatically.

" + }, "MasterUsername":{ "shape":"String", "documentation":"

Contains the master username for the DB instance.

" @@ -5259,11 +5267,11 @@ }, "Endpoint":{ "shape":"Endpoint", - "documentation":"

Specifies the connection endpoint.

" + "documentation":"

Specifies the connection endpoint.

The endpoint might not be shown for instances whose status is creating.

" }, "AllocatedStorage":{ "shape":"Integer", - "documentation":"

Specifies the allocated storage size specified in gibibytes.

" + "documentation":"

Specifies the allocated storage size specified in gibibytes (GiB).

" }, "InstanceCreateTime":{ "shape":"TStamp", @@ -5471,7 +5479,7 @@ }, "MaxAllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

" + "documentation":"

The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

" }, "TagList":{"shape":"TagList"}, "DBInstanceAutomatedBackupsReplications":{ @@ -6452,7 +6460,7 @@ }, "SnapshotCreateTime":{ "shape":"TStamp", - "documentation":"

Specifies when the snapshot was taken in Coordinated Universal Time (UTC).

" + "documentation":"

Specifies when the snapshot was taken in Coordinated Universal Time (UTC). Changes for the copy when the snapshot is copied.

" }, "Engine":{ "shape":"String", @@ -6516,7 +6524,7 @@ }, "SourceDBSnapshotIdentifier":{ "shape":"String", - "documentation":"

The DB snapshot Amazon Resource Name (ARN) that the DB snapshot was copied from. It only has value in case of cross-customer or cross-region copy.

" + "documentation":"

The DB snapshot Amazon Resource Name (ARN) that the DB snapshot was copied from. It only has a value in the case of a cross-account or cross-Region copy.

" }, "StorageType":{ "shape":"String", @@ -6554,7 +6562,11 @@ "shape":"String", "documentation":"

The identifier for the source DB instance, which can't be changed and which is unique to an Amazon Web Services Region.

" }, - "TagList":{"shape":"TagList"} + "TagList":{"shape":"TagList"}, + "OriginalSnapshotCreateTime":{ + "shape":"TStamp", + "documentation":"

Specifies the time of the CreateDBSnapshot operation in Coordinated Universal Time (UTC). Doesn't change when the snapshot is copied.

" + } }, "documentation":"

Contains the details of an Amazon RDS DB snapshot.

This data type is used as a response element in the DescribeDBSnapshots action.

", "wrapper":true @@ -7288,7 +7300,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

A filter that specifies one or more DB clusters to describe.

Supported filters:

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB clusters identified by these ARNs.

" + "documentation":"

A filter that specifies one or more DB clusters to describe.

Supported filters:

  • clone-group-id - Accepts clone group identifiers. The results list will only include information about the DB clusters associated with these clone groups.

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB clusters identified by these ARNs.

  • domain - Accepts Active Directory directory IDs. The results list will only include information about the DB clusters associated with these domains.

  • engine - Accepts engine names. The results list will only include information about the DB clusters for these engines.

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -7947,7 +7959,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

A filter that specifies one or more global DB clusters to describe.

Supported filters:

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB clusters identified by these ARNs.

" + "documentation":"

This parameter isn't currently supported.

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -9770,7 +9782,7 @@ }, "AllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

The new amount of storage (in gibibytes) to allocate for the DB instance.

For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

For the valid values for allocated storage for each engine, see CreateDBInstance.

" + "documentation":"

The new amount of storage in gibibytes (GiB) to allocate for the DB instance.

For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

For the valid values for allocated storage for each engine, see CreateDBInstance.

" }, "DBInstanceClass":{ "shape":"String", @@ -9926,7 +9938,7 @@ }, "MaxAllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

" + "documentation":"

The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

" }, "CertificateRotationRestart":{ "shape":"BooleanOptional", @@ -10977,7 +10989,7 @@ }, "AllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

The allocated storage size for the DB instance specified in gibibytes .

" + "documentation":"

The allocated storage size for the DB instance specified in gibibytes (GiB).

" }, "MasterUserPassword":{ "shape":"String", @@ -12315,7 +12327,7 @@ }, "MaxAllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

" + "documentation":"

The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

" } } }, @@ -12452,7 +12464,7 @@ }, "MaxAllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

" + "documentation":"

The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

" }, "SourceDBInstanceAutomatedBackupsArn":{ "shape":"String", @@ -13298,7 +13310,7 @@ }, "StorageSize":{ "shape":"RangeList", - "documentation":"

The valid range of storage in gibibytes. For example, 100 to 16384.

" + "documentation":"

The valid range of storage in gibibytes (GiB). For example, 100 to 16384.

" }, "ProvisionedIops":{ "shape":"RangeList", diff --git a/services/rdsdata/pom.xml b/services/rdsdata/pom.xml index 794a5cdf9b62..0dbbcc175692 100644 --- a/services/rdsdata/pom.xml +++ b/services/rdsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT rdsdata AWS Java SDK :: Services :: RDS Data diff --git a/services/redshift/pom.xml b/services/redshift/pom.xml index 3418df66bd30..4363d0df0189 100644 --- a/services/redshift/pom.xml +++ b/services/redshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT redshift AWS Java SDK :: Services :: Amazon Redshift diff --git a/services/redshift/src/main/resources/codegen-resources/service-2.json b/services/redshift/src/main/resources/codegen-resources/service-2.json index c9fa01fb73dd..188c1752e117 100644 --- a/services/redshift/src/main/resources/codegen-resources/service-2.json +++ b/services/redshift/src/main/resources/codegen-resources/service-2.json @@ -51,6 +51,23 @@ ], "documentation":"

Adds a partner integration to a cluster. This operation authorizes a partner to push status updates for the specified database. To complete the integration, you also set up the integration on the partner website.

" }, + "AssociateDataShareConsumer":{ + "name":"AssociateDataShareConsumer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateDataShareConsumerMessage"}, + "output":{ + "shape":"DataShare", + "resultWrapper":"AssociateDataShareConsumerResult" + }, + "errors":[ + {"shape":"InvalidDataShareFault"}, + {"shape":"InvalidNamespaceFault"} + ], + "documentation":"

From a datashare consumer account, associates a datashare with the account (AssociateEntireAccount) or the specified namespace (ConsumerArn). If you make this association, the consumer can consume the datashare.

" + }, "AuthorizeClusterSecurityGroupIngress":{ "name":"AuthorizeClusterSecurityGroupIngress", "http":{ @@ -68,7 +85,23 @@ {"shape":"AuthorizationAlreadyExistsFault"}, {"shape":"AuthorizationQuotaExceededFault"} ], - "documentation":"

Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending on whether the application accessing your cluster is running on the Internet or an Amazon EC2 instance, you can authorize inbound access to either a Classless Interdomain Routing (CIDR)/Internet Protocol (IP) range or to an Amazon EC2 security group. You can add as many as 20 ingress rules to an Amazon Redshift security group.

If you authorize access to an Amazon EC2 security group, specify EC2SecurityGroupName and EC2SecurityGroupOwnerId. The Amazon EC2 security group and Amazon Redshift cluster must be in the same AWS Region.

If you authorize access to a CIDR/IP address range, specify CIDRIP. For an overview of CIDR blocks, see the Wikipedia article on Classless Inter-Domain Routing.

You must also associate the security group with a cluster so that clients running on these IP addresses or the EC2 instance are authorized to connect to the cluster. For information about managing security groups, go to Working with Security Groups in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending on whether the application accessing your cluster is running on the Internet or an Amazon EC2 instance, you can authorize inbound access to either a Classless Interdomain Routing (CIDR)/Internet Protocol (IP) range or to an Amazon EC2 security group. You can add as many as 20 ingress rules to an Amazon Redshift security group.

If you authorize access to an Amazon EC2 security group, specify EC2SecurityGroupName and EC2SecurityGroupOwnerId. The Amazon EC2 security group and Amazon Redshift cluster must be in the same Amazon Web Services Region.

If you authorize access to a CIDR/IP address range, specify CIDRIP. For an overview of CIDR blocks, see the Wikipedia article on Classless Inter-Domain Routing.

You must also associate the security group with a cluster so that clients running on these IP addresses or the EC2 instance are authorized to connect to the cluster. For information about managing security groups, go to Working with Security Groups in the Amazon Redshift Cluster Management Guide.

" + }, + "AuthorizeDataShare":{ + "name":"AuthorizeDataShare", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeDataShareMessage"}, + "output":{ + "shape":"DataShare", + "resultWrapper":"AuthorizeDataShareResult" + }, + "errors":[ + {"shape":"InvalidDataShareFault"} + ], + "documentation":"

From a data producer account, authorizes the sharing of a datashare with one or more consumer accounts. To authorize a datashare for a data consumer, the producer account must have the correct access privileges.

" }, "AuthorizeEndpointAccess":{ "name":"AuthorizeEndpointAccess", @@ -110,7 +143,7 @@ {"shape":"InvalidClusterSnapshotStateFault"}, {"shape":"LimitExceededFault"} ], - "documentation":"

Authorizes the specified AWS customer account to restore the specified snapshot.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

Authorizes the specified Amazon Web Services account to restore the specified snapshot.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

" }, "BatchDeleteClusterSnapshots":{ "name":"BatchDeleteClusterSnapshots", @@ -184,6 +217,24 @@ ], "documentation":"

Copies the specified automated cluster snapshot to a new manual cluster snapshot. The source must be an automated snapshot and it must be in the available state.

When you delete a cluster, Amazon Redshift deletes any automated snapshots of the cluster. Also, when the retention period of the snapshot expires, Amazon Redshift automatically deletes it. If you want to keep an automated snapshot for a longer period, you can make a manual copy of the snapshot. Manual snapshots are retained until you delete them.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

" }, + "CreateAuthenticationProfile":{ + "name":"CreateAuthenticationProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAuthenticationProfileMessage"}, + "output":{ + "shape":"CreateAuthenticationProfileResult", + "resultWrapper":"CreateAuthenticationProfileResult" + }, + "errors":[ + {"shape":"AuthenticationProfileAlreadyExistsFault"}, + {"shape":"AuthenticationProfileQuotaExceededFault"}, + {"shape":"InvalidAuthenticationProfileRequestFault"} + ], + "documentation":"

Creates an authentication profile with the specified parameters.

" + }, "CreateCluster":{ "name":"CreateCluster", "http":{ @@ -353,7 +404,7 @@ {"shape":"TagLimitExceededFault"}, {"shape":"InvalidTagFault"} ], - "documentation":"

Creates an Amazon Redshift event notification subscription. This action requires an ARN (Amazon Resource Name) of an Amazon SNS topic created by either the Amazon Redshift console, the Amazon SNS console, or the Amazon SNS API. To obtain an ARN with Amazon SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

You can specify the source type, and lists of Amazon Redshift source IDs, event categories, and event severities. Notifications will be sent for all events you want that match those criteria. For example, you can specify source type = cluster, source ID = my-cluster-1 and mycluster2, event categories = Availability, Backup, and severity = ERROR. The subscription will only send notifications for those ERROR events in the Availability and Backup categories for the specified clusters.

If you specify both the source type and source IDs, such as source type = cluster and source identifier = my-cluster-1, notifications will be sent for all the cluster events for my-cluster-1. If you specify a source type but do not specify a source identifier, you will receive notice of the events for the objects of that type in your AWS account. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all Amazon Redshift sources belonging to your AWS account. You must specify a source type if you specify a source ID.

" + "documentation":"

Creates an Amazon Redshift event notification subscription. This action requires an ARN (Amazon Resource Name) of an Amazon SNS topic created by either the Amazon Redshift console, the Amazon SNS console, or the Amazon SNS API. To obtain an ARN with Amazon SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

You can specify the source type, and lists of Amazon Redshift source IDs, event categories, and event severities. Notifications will be sent for all events you want that match those criteria. For example, you can specify source type = cluster, source ID = my-cluster-1 and mycluster2, event categories = Availability, Backup, and severity = ERROR. The subscription will only send notifications for those ERROR events in the Availability and Backup categories for the specified clusters.

If you specify both the source type and source IDs, such as source type = cluster and source identifier = my-cluster-1, notifications will be sent for all the cluster events for my-cluster-1. If you specify a source type but do not specify a source identifier, you will receive notice of the events for the objects of that type in your Amazon Web Services account. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all Amazon Redshift sources belonging to your Amazon Web Services account. You must specify a source type if you specify a source ID.

" }, "CreateHsmClientCertificate":{ "name":"CreateHsmClientCertificate", @@ -433,7 +484,7 @@ {"shape":"InvalidTagFault"}, {"shape":"DependentServiceRequestThrottlingFault"} ], - "documentation":"

Creates a snapshot copy grant that permits Amazon Redshift to use a customer master key (CMK) from AWS Key Management Service (AWS KMS) to encrypt copied snapshots in a destination region.

For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

Creates a snapshot copy grant that permits Amazon Redshift to use a customer master key (CMK) from Key Management Service (KMS) to encrypt copied snapshots in a destination region.

For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

" }, "CreateSnapshotSchedule":{ "name":"CreateSnapshotSchedule", @@ -493,6 +544,39 @@ ], "documentation":"

Creates a usage limit for a specified Amazon Redshift feature on a cluster. The usage limit is identified by the returned usage limit identifier.

" }, + "DeauthorizeDataShare":{ + "name":"DeauthorizeDataShare", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeauthorizeDataShareMessage"}, + "output":{ + "shape":"DataShare", + "resultWrapper":"DeauthorizeDataShareResult" + }, + "errors":[ + {"shape":"InvalidDataShareFault"} + ], + "documentation":"

From the producer account, removes authorization from the specified datashare.

" + }, + "DeleteAuthenticationProfile":{ + "name":"DeleteAuthenticationProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAuthenticationProfileMessage"}, + "output":{ + "shape":"DeleteAuthenticationProfileResult", + "resultWrapper":"DeleteAuthenticationProfileResult" + }, + "errors":[ + {"shape":"AuthenticationProfileNotFoundFault"}, + {"shape":"InvalidAuthenticationProfileRequestFault"} + ], + "documentation":"

Deletes an authentication profile.

" + }, "DeleteCluster":{ "name":"DeleteCluster", "http":{ @@ -725,6 +809,23 @@ }, "documentation":"

Returns a list of attributes attached to an account

" }, + "DescribeAuthenticationProfiles":{ + "name":"DescribeAuthenticationProfiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAuthenticationProfilesMessage"}, + "output":{ + "shape":"DescribeAuthenticationProfilesResult", + "resultWrapper":"DescribeAuthenticationProfilesResult" + }, + "errors":[ + {"shape":"AuthenticationProfileNotFoundFault"}, + {"shape":"InvalidAuthenticationProfileRequestFault"} + ], + "documentation":"

Describes an authentication profile.

" + }, "DescribeClusterDbRevisions":{ "name":"DescribeClusterDbRevisions", "http":{ @@ -808,7 +909,7 @@ {"shape":"ClusterSnapshotNotFoundFault"}, {"shape":"InvalidTagFault"} ], - "documentation":"

Returns one or more snapshot objects, which contain metadata about your cluster snapshots. By default, this operation returns information about all snapshots of all clusters that are owned by you AWS customer account. No information is returned for snapshots owned by inactive AWS customer accounts.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all snapshots that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all snapshots that have any combination of those values are returned. Only snapshots that you own are returned in the response; shared snapshots are not returned with the tag key and tag value request parameters.

If both tag keys and values are omitted from the request, snapshots are returned regardless of whether they have tag keys or values associated with them.

" + "documentation":"

Returns one or more snapshot objects, which contain metadata about your cluster snapshots. By default, this operation returns information about all snapshots of all clusters that are owned by your Amazon Web Services account. No information is returned for snapshots owned by inactive Amazon Web Services accounts.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all snapshots that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all snapshots that have any combination of those values are returned. Only snapshots that you own are returned in the response; shared snapshots are not returned with the tag key and tag value request parameters.

If both tag keys and values are omitted from the request, snapshots are returned regardless of whether they have tag keys or values associated with them.

" }, "DescribeClusterSubnetGroups":{ "name":"DescribeClusterSubnetGroups", @@ -825,7 +926,7 @@ {"shape":"ClusterSubnetGroupNotFoundFault"}, {"shape":"InvalidTagFault"} ], - "documentation":"

Returns one or more cluster subnet group objects, which contain metadata about your cluster subnet groups. By default, this operation returns information about all cluster subnet groups that are defined in you AWS account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all subnet groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all subnet groups that have any combination of those values are returned.

If both tag keys and values are omitted from the request, subnet groups are returned regardless of whether they have tag keys or values associated with them.

" + "documentation":"

Returns one or more cluster subnet group objects, which contain metadata about your cluster subnet groups. By default, this operation returns information about all cluster subnet groups that are defined in your Amazon Web Services account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all subnet groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all subnet groups that have any combination of those values are returned.

If both tag keys and values are omitted from the request, subnet groups are returned regardless of whether they have tag keys or values associated with them.

" }, "DescribeClusterTracks":{ "name":"DescribeClusterTracks", @@ -874,6 +975,54 @@ ], "documentation":"

Returns properties of provisioned clusters including general cluster properties, cluster database properties, maintenance and backup properties, and security and access properties. This operation supports pagination. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all clusters that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all clusters that have any combination of those values are returned.

If both tag keys and values are omitted from the request, clusters are returned regardless of whether they have tag keys or values associated with them.

" }, + "DescribeDataShares":{ + "name":"DescribeDataShares", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataSharesMessage"}, + "output":{ + "shape":"DescribeDataSharesResult", + "resultWrapper":"DescribeDataSharesResult" + }, + "errors":[ + {"shape":"InvalidDataShareFault"} + ], + "documentation":"

Shows the status of any inbound or outbound datashares available in the specified account.

" + }, + "DescribeDataSharesForConsumer":{ + "name":"DescribeDataSharesForConsumer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataSharesForConsumerMessage"}, + "output":{ + "shape":"DescribeDataSharesForConsumerResult", + "resultWrapper":"DescribeDataSharesForConsumerResult" + }, + "errors":[ + {"shape":"InvalidNamespaceFault"} + ], + "documentation":"

Returns a list of datashares where the account identifier being called is a consumer account identifier.

" + }, + "DescribeDataSharesForProducer":{ + "name":"DescribeDataSharesForProducer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataSharesForProducerMessage"}, + "output":{ + "shape":"DescribeDataSharesForProducerResult", + "resultWrapper":"DescribeDataSharesForProducerResult" + }, + "errors":[ + {"shape":"InvalidNamespaceFault"} + ], + "documentation":"

Returns a list of datashares when the account identifier being called is a producer account identifier.

" + }, "DescribeDefaultClusterParameters":{ "name":"DescribeDefaultClusterParameters", "http":{ @@ -980,7 +1129,7 @@ {"shape":"HsmClientCertificateNotFoundFault"}, {"shape":"InvalidTagFault"} ], - "documentation":"

Returns information about the specified HSM client certificate. If no certificate ID is specified, returns information about all the HSM certificates owned by your AWS customer account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM client certificates that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM client certificates that have any combination of those values are returned.

If both tag keys and values are omitted from the request, HSM client certificates are returned regardless of whether they have tag keys or values associated with them.

" + "documentation":"

Returns information about the specified HSM client certificate. If no certificate ID is specified, returns information about all the HSM certificates owned by your Amazon Web Services account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM client certificates that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM client certificates that have any combination of those values are returned.

If both tag keys and values are omitted from the request, HSM client certificates are returned regardless of whether they have tag keys or values associated with them.

" }, "DescribeHsmConfigurations":{ "name":"DescribeHsmConfigurations", @@ -997,7 +1146,7 @@ {"shape":"HsmConfigurationNotFoundFault"}, {"shape":"InvalidTagFault"} ], - "documentation":"

Returns information about the specified Amazon Redshift HSM configuration. If no configuration ID is specified, returns information about all the HSM configurations owned by your AWS customer account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM connections that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM connections that have any combination of those values are returned.

If both tag keys and values are omitted from the request, HSM connections are returned regardless of whether they have tag keys or values associated with them.

" + "documentation":"

Returns information about the specified Amazon Redshift HSM configuration. If no configuration ID is specified, returns information about all the HSM configurations owned by your Amazon Web Services account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM connections that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM connections that have any combination of those values are returned.

If both tag keys and values are omitted from the request, HSM connections are returned regardless of whether they have tag keys or values associated with them.

" }, "DescribeLoggingStatus":{ "name":"DescribeLoggingStatus", @@ -1045,7 +1194,7 @@ "shape":"OrderableClusterOptionsMessage", "resultWrapper":"DescribeOrderableClusterOptionsResult" }, - "documentation":"

Returns a list of orderable cluster options. Before you create a new cluster you can use this operation to find what options are available, such as the EC2 Availability Zones (AZ) in the specific AWS Region that you can specify, and the node types you can request. The node types differ by available storage, memory, CPU and price. With the cost involved you might want to obtain a list of cluster options in the specific region and specify values when creating a cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

Returns a list of orderable cluster options. Before you create a new cluster you can use this operation to find what options are available, such as the EC2 Availability Zones (AZ) in the specific Amazon Web Services Region that you can specify, and the node types you can request. The node types differ by available storage, memory, CPU and price. With the cost involved you might want to obtain a list of cluster options in the specific region and specify values when creating a cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" }, "DescribePartners":{ "name":"DescribePartners", @@ -1148,7 +1297,7 @@ {"shape":"SnapshotCopyGrantNotFoundFault"}, {"shape":"InvalidTagFault"} ], - "documentation":"

Returns a list of snapshot copy grants owned by the AWS account in the destination region.

For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

Returns a list of snapshot copy grants owned by the Amazon Web Services account in the destination region.

For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

" }, "DescribeSnapshotSchedules":{ "name":"DescribeSnapshotSchedules", @@ -1260,7 +1409,24 @@ {"shape":"InvalidClusterStateFault"}, {"shape":"UnauthorizedOperation"} ], - "documentation":"

Disables the automatic copying of snapshots from one region to another region for a specified cluster.

If your cluster and its snapshots are encrypted using a customer master key (CMK) from AWS KMS, use DeleteSnapshotCopyGrant to delete the grant that grants Amazon Redshift permission to the CMK in the destination region.

" + "documentation":"

Disables the automatic copying of snapshots from one region to another region for a specified cluster.

If your cluster and its snapshots are encrypted using a customer master key (CMK) from Key Management Service, use DeleteSnapshotCopyGrant to delete the grant that grants Amazon Redshift permission to the CMK in the destination region.

" + }, + "DisassociateDataShareConsumer":{ + "name":"DisassociateDataShareConsumer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateDataShareConsumerMessage"}, + "output":{ + "shape":"DataShare", + "resultWrapper":"DisassociateDataShareConsumerResult" + }, + "errors":[ + {"shape":"InvalidDataShareFault"}, + {"shape":"InvalidNamespaceFault"} + ], + "documentation":"

From a consumer account, remove association for the specified datashare.

" }, "EnableLogging":{ "name":"EnableLogging", @@ -1324,7 +1490,7 @@ {"shape":"ClusterNotFoundFault"}, {"shape":"UnsupportedOperationFault"} ], - "documentation":"

Returns a database user name and temporary password with temporary authorization to log on to an Amazon Redshift database. The action returns the database user name prefixed with IAM: if AutoCreate is False or IAMA: if AutoCreate is True. You can optionally specify one or more database user groups that the user will join at log on. By default, the temporary credentials expire in 900 seconds. You can optionally specify a duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes). For more information, see Using IAM Authentication to Generate Database User Credentials in the Amazon Redshift Cluster Management Guide.

The AWS Identity and Access Management (IAM)user or role that executes GetClusterCredentials must have an IAM policy attached that allows access to all necessary actions and resources. For more information about permissions, see Resource Policies for GetClusterCredentials in the Amazon Redshift Cluster Management Guide.

If the DbGroups parameter is specified, the IAM policy must allow the redshift:JoinGroup action with access to the listed dbgroups.

In addition, if the AutoCreate parameter is set to True, then the policy must include the redshift:CreateClusterUser privilege.

If the DbName parameter is specified, the IAM policy must allow access to the resource dbname for the specified database name.

" + "documentation":"

Returns a database user name and temporary password with temporary authorization to log on to an Amazon Redshift database. The action returns the database user name prefixed with IAM: if AutoCreate is False or IAMA: if AutoCreate is True. You can optionally specify one or more database user groups that the user will join at log on. By default, the temporary credentials expire in 900 seconds. You can optionally specify a duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes). For more information, see Using IAM Authentication to Generate Database User Credentials in the Amazon Redshift Cluster Management Guide.

The Identity and Access Management (IAM) user or role that runs GetClusterCredentials must have an IAM policy attached that allows access to all necessary actions and resources. For more information about permissions, see Resource Policies for GetClusterCredentials in the Amazon Redshift Cluster Management Guide.

If the DbGroups parameter is specified, the IAM policy must allow the redshift:JoinGroup action with access to the listed dbgroups.

In addition, if the AutoCreate parameter is set to True, then the policy must include the redshift:CreateClusterUser privilege.

If the DbName parameter is specified, the IAM policy must allow access to the resource dbname for the specified database name.

" }, "GetReservedNodeExchangeOfferings":{ "name":"GetReservedNodeExchangeOfferings", @@ -1365,6 +1531,24 @@ ], "documentation":"

Modifies whether a cluster can use AQUA (Advanced Query Accelerator).

" }, + "ModifyAuthenticationProfile":{ + "name":"ModifyAuthenticationProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyAuthenticationProfileMessage"}, + "output":{ + "shape":"ModifyAuthenticationProfileResult", + "resultWrapper":"ModifyAuthenticationProfileResult" + }, + "errors":[ + {"shape":"AuthenticationProfileNotFoundFault"}, + {"shape":"AuthenticationProfileQuotaExceededFault"}, + {"shape":"InvalidAuthenticationProfileRequestFault"} + ], + "documentation":"

Modifies an authentication profile.

" + }, "ModifyCluster":{ "name":"ModifyCluster", "http":{ @@ -1397,7 +1581,7 @@ {"shape":"InvalidClusterTrackFault"}, {"shape":"InvalidRetentionPeriodFault"} ], - "documentation":"

Modifies the settings for a cluster.

You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.

You can add another security or parameter group, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

Modifies the settings for a cluster.

You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.

You can add another security or parameter group, or change the admin user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" }, "ModifyClusterDbRevision":{ "name":"ModifyClusterDbRevision", @@ -1432,7 +1616,7 @@ {"shape":"InvalidClusterStateFault"}, {"shape":"ClusterNotFoundFault"} ], - "documentation":"

Modifies the list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.

A cluster can have up to 10 IAM roles associated at any time.

" + "documentation":"

Modifies the list of Identity and Access Management (IAM) roles that can be used by the cluster to access other Amazon Web Services services.

A cluster can have up to 10 IAM roles associated at any time.

" }, "ModifyClusterMaintenance":{ "name":"ModifyClusterMaintenance", @@ -1466,7 +1650,7 @@ {"shape":"ClusterParameterGroupNotFoundFault"}, {"shape":"InvalidClusterParameterGroupStateFault"} ], - "documentation":"

Modifies the parameters of a parameter group.

For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

Modifies the parameters of a parameter group. For the parameters parameter, it can't contain ASCII characters.

For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

" }, "ModifyClusterSnapshot":{ "name":"ModifyClusterSnapshot", @@ -1604,7 +1788,7 @@ {"shape":"InvalidClusterStateFault"}, {"shape":"InvalidRetentionPeriodFault"} ], - "documentation":"

Modifies the number of days to retain snapshots in the destination AWS Region after they are copied from the source AWS Region. By default, this operation only changes the retention period of copied automated snapshots. The retention periods for both new and existing copied automated snapshots are updated with the new retention period. You can set the manual option to change only the retention periods of copied manual snapshots. If you set this option, only newly copied manual snapshots have the new retention period.

" + "documentation":"

Modifies the number of days to retain snapshots in the destination Amazon Web Services Region after they are copied from the source Amazon Web Services Region. By default, this operation only changes the retention period of copied automated snapshots. The retention periods for both new and existing copied automated snapshots are updated with the new retention period. You can set the manual option to change only the retention periods of copied manual snapshots. If you set this option, only newly copied manual snapshots have the new retention period.

" }, "ModifySnapshotSchedule":{ "name":"ModifySnapshotSchedule", @@ -1695,6 +1879,22 @@ ], "documentation":"

Reboots a cluster. This action is taken as soon as possible. It results in a momentary outage to the cluster, during which the cluster status is set to rebooting. A cluster event is created when the reboot is completed. Any pending cluster modifications (see ModifyCluster) are applied at this reboot. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" }, + "RejectDataShare":{ + "name":"RejectDataShare", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RejectDataShareMessage"}, + "output":{ + "shape":"DataShare", + "resultWrapper":"RejectDataShareResult" + }, + "errors":[ + {"shape":"InvalidDataShareFault"} + ], + "documentation":"

From the consumer account, rejects the specified datashare.

" + }, "ResetClusterParameterGroup":{ "name":"ResetClusterParameterGroup", "http":{ @@ -1872,7 +2072,7 @@ {"shape":"AuthorizationNotFoundFault"}, {"shape":"ClusterSnapshotNotFoundFault"} ], - "documentation":"

Removes the ability of the specified AWS customer account to restore the specified snapshot. If the account is currently restoring the snapshot, the restore will run to completion.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

Removes the ability of the specified Amazon Web Services account to restore the specified snapshot. If the account is currently restoring the snapshot, the restore will run to completion.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

" }, "RotateEncryptionKey":{ "name":"RotateEncryptionKey", @@ -1990,14 +2190,14 @@ "members":{ "AccountId":{ "shape":"String", - "documentation":"

The identifier of an AWS customer account authorized to restore a snapshot.

" + "documentation":"

The identifier of an Amazon Web Services account authorized to restore a snapshot.

" }, "AccountAlias":{ "shape":"String", - "documentation":"

The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is amazon-redshift-support.

" + "documentation":"

The identifier of an Amazon Web Services support account authorized to restore a snapshot. For Amazon Web Services Support, the identifier is amazon-redshift-support.

" } }, - "documentation":"

Describes an AWS customer account authorized to restore a snapshot.

" + "documentation":"

Describes an Amazon Web Services account authorized to restore a snapshot.

" }, "AccountsWithRestoreAccessList":{ "type":"list", @@ -2023,7 +2223,7 @@ }, "AquaConfigurationStatus":{ "shape":"AquaConfigurationStatus", - "documentation":"

The value represents how the cluster is configured to use AQUA. Possible values include the following.

  • enabled - Use AQUA if it is available for the current AWS Region and Amazon Redshift node type.

  • disabled - Don't use AQUA.

  • auto - Amazon Redshift determines whether to use AQUA.

" + "documentation":"

The value represents how the cluster is configured to use AQUA. Possible values include the following.

  • enabled - Use AQUA if it is available for the current Amazon Web Services Region and Amazon Redshift node type.

  • disabled - Don't use AQUA.

  • auto - Amazon Redshift determines whether to use AQUA.

" } }, "documentation":"

The AQUA (Advanced Query Accelerator) configuration of the cluster.

" @@ -2044,6 +2244,24 @@ "applying" ] }, + "AssociateDataShareConsumerMessage":{ + "type":"structure", + "required":["DataShareArn"], + "members":{ + "DataShareArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the datashare that the consumer is to use with the account or the namespace.

" + }, + "AssociateEntireAccount":{ + "shape":"BooleanOptional", + "documentation":"

A value that specifies whether the datashare is associated with the entire account.

" + }, + "ConsumerArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the consumer that is associated with the datashare.

" + } + } + }, "AssociatedClusterList":{ "type":"list", "member":{ @@ -2082,6 +2300,65 @@ }, "documentation":"

Describes an attribute value.

" }, + "AuthenticationProfile":{ + "type":"structure", + "members":{ + "AuthenticationProfileName":{ + "shape":"AuthenticationProfileNameString", + "documentation":"

The name of the authentication profile.

" + }, + "AuthenticationProfileContent":{ + "shape":"String", + "documentation":"

The content of the authentication profile in JSON format. The maximum length of the JSON string is determined by a quota for your account.

" + } + }, + "documentation":"

Describes an authentication profile.

" + }, + "AuthenticationProfileAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The authentication profile already exists.

", + "error":{ + "code":"AuthenticationProfileAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthenticationProfileList":{ + "type":"list", + "member":{"shape":"AuthenticationProfile"} + }, + "AuthenticationProfileNameString":{ + "type":"string", + "max":63, + "pattern":"^[a-zA-Z0-9\\-]+$" + }, + "AuthenticationProfileNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The authentication profile can't be found.

", + "error":{ + "code":"AuthenticationProfileNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthenticationProfileQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The size or number of authentication profiles has exceeded the quota. The maximum length of the JSON string and maximum number of authentication profiles is determined by a quota for your account.

", + "error":{ + "code":"AuthenticationProfileQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "AuthorizationAlreadyExistsFault":{ "type":"structure", "members":{ @@ -2143,7 +2420,7 @@ }, "EC2SecurityGroupOwnerId":{ "shape":"String", - "documentation":"

The AWS account number of the owner of the security group specified by the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value.

Example: 111122223333

" + "documentation":"

The Amazon Web Services account number of the owner of the security group specified by the EC2SecurityGroupName parameter. The Amazon Web Services Access Key ID is not an acceptable value.

Example: 111122223333

" } }, "documentation":"

" @@ -2154,6 +2431,23 @@ "ClusterSecurityGroup":{"shape":"ClusterSecurityGroup"} } }, + "AuthorizeDataShareMessage":{ + "type":"structure", + "required":[ + "DataShareArn", + "ConsumerIdentifier" + ], + "members":{ + "DataShareArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the datashare that producers are to authorize sharing for.

" + }, + "ConsumerIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the data consumer that is authorized to access the datashare. This identifier is an AWS account ID.

" + } + } + }, "AuthorizeEndpointAccessMessage":{ "type":"structure", "required":["Account"], @@ -2164,7 +2458,7 @@ }, "Account":{ "shape":"String", - "documentation":"

The AWS account ID to grant access to.

" + "documentation":"

The Amazon Web Services account ID to grant access to.

" }, "VpcIds":{ "shape":"VpcIdentifierList", @@ -2189,7 +2483,7 @@ }, "AccountWithRestoreAccess":{ "shape":"String", - "documentation":"

The identifier of the AWS customer account authorized to restore the specified snapshot.

To share a snapshot with AWS support, specify amazon-redshift-support.

" + "documentation":"

The identifier of the Amazon Web Services account authorized to restore the specified snapshot.

To share a snapshot with Amazon Web Services Support, specify amazon-redshift-support.

" } }, "documentation":"

" @@ -2363,7 +2657,7 @@ }, "MasterUsername":{ "shape":"String", - "documentation":"

The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.

" + "documentation":"

The admin user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.

" }, "DBName":{ "shape":"String", @@ -2475,7 +2769,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.

" + "documentation":"

The Key Management Service (KMS) key ID of the encryption key used to encrypt data in the cluster.

" }, "EnhancedVpcRouting":{ "shape":"Boolean", @@ -2483,7 +2777,7 @@ }, "IamRoles":{ "shape":"ClusterIamRoleList", - "documentation":"

A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.

" + "documentation":"

A list of Identity and Access Management (IAM) roles that can be used by the cluster to access other Amazon Web Services services.

" }, "PendingActions":{ "shape":"PendingActionsList", @@ -2643,7 +2937,7 @@ "documentation":"

A value that describes the status of the IAM role's association with an Amazon Redshift cluster.

The following are possible statuses and descriptions.

  • in-sync: The role is available for use by the cluster.

  • adding: The role is in the process of being associated with the cluster.

  • removing: The role is in the process of being disassociated with the cluster.

" } }, - "documentation":"

An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.

" + "documentation":"

An Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other Amazon Web Services services.

" }, "ClusterIamRoleList":{ "type":"list", @@ -3205,7 +3499,7 @@ }, "TargetSnapshotIdentifier":{ "shape":"String", - "documentation":"

The identifier given to the new manual snapshot.

Constraints:

  • Cannot be null, empty, or blank.

  • Must contain from 1 to 255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for the AWS account that is making the request.

" + "documentation":"

The identifier given to the new manual snapshot.

Constraints:

  • Cannot be null, empty, or blank.

  • Must contain from 1 to 255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for the Amazon Web Services account that is making the request.

" }, "ManualSnapshotRetentionPeriod":{ "shape":"IntegerOptional", @@ -3232,6 +3526,36 @@ }, "exception":true }, + "CreateAuthenticationProfileMessage":{ + "type":"structure", + "required":[ + "AuthenticationProfileName", + "AuthenticationProfileContent" + ], + "members":{ + "AuthenticationProfileName":{ + "shape":"AuthenticationProfileNameString", + "documentation":"

The name of the authentication profile to be created.

" + }, + "AuthenticationProfileContent":{ + "shape":"String", + "documentation":"

The content of the authentication profile in JSON format. The maximum length of the JSON string is determined by a quota for your account.

" + } + } + }, + "CreateAuthenticationProfileResult":{ + "type":"structure", + "members":{ + "AuthenticationProfileName":{ + "shape":"AuthenticationProfileNameString", + "documentation":"

The name of the authentication profile that was created.

" + }, + "AuthenticationProfileContent":{ + "shape":"String", + "documentation":"

The content of the authentication profile in JSON format.

" + } + } + }, "CreateClusterMessage":{ "type":"structure", "required":[ @@ -3247,7 +3571,7 @@ }, "ClusterIdentifier":{ "shape":"String", - "documentation":"

A unique identifier for the cluster. You use this identifier to refer to the cluster for any subsequent cluster operations such as deleting or modifying. The identifier also appears in the Amazon Redshift console.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • Alphabetic characters must be lowercase.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for all clusters within an AWS account.

Example: myexamplecluster

" + "documentation":"

A unique identifier for the cluster. You use this identifier to refer to the cluster for any subsequent cluster operations such as deleting or modifying. The identifier also appears in the Amazon Redshift console.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • Alphabetic characters must be lowercase.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for all clusters within an Amazon Web Services account.

Example: myexamplecluster

" }, "ClusterType":{ "shape":"String", @@ -3259,11 +3583,11 @@ }, "MasterUsername":{ "shape":"String", - "documentation":"

The user name associated with the master user account for the cluster that is being created.

Constraints:

  • Must be 1 - 128 alphanumeric characters. The user name can't be PUBLIC.

  • First character must be a letter.

  • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

" + "documentation":"

The user name associated with the admin user account for the cluster that is being created.

Constraints:

  • Must be 1 - 128 alphanumeric characters. The user name can't be PUBLIC.

  • First character must be a letter.

  • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

" }, "MasterUserPassword":{ "shape":"String", - "documentation":"

The password associated with the master user account for the cluster that is being created.

Constraints:

  • Must be between 8 and 64 characters in length.

  • Must contain at least one uppercase letter.

  • Must contain at least one lowercase letter.

  • Must contain one number.

  • Can be any printable ASCII character (ASCII code 33 to 126) except ' (single quote), \" (double quote), \\, /, @, or space.

" + "documentation":"

The password associated with the admin user account for the cluster that is being created.

Constraints:

  • Must be between 8 and 64 characters in length.

  • Must contain at least one uppercase letter.

  • Must contain at least one lowercase letter.

  • Must contain one number.

  • Can be any printable ASCII character (ASCII code 33 to 126) except ' (single quote), \" (double quote), \\, /, @, or space.

" }, "ClusterSecurityGroups":{ "shape":"ClusterSecurityGroupNameList", @@ -3339,7 +3663,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.

" + "documentation":"

The Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.

" }, "EnhancedVpcRouting":{ "shape":"BooleanOptional", @@ -3351,7 +3675,7 @@ }, "IamRoles":{ "shape":"IamRoleArnList", - "documentation":"

A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.

A cluster can have up to 10 IAM roles associated with it at any time.

" + "documentation":"

A list of Identity and Access Management (IAM) roles that can be used by the cluster to access other Amazon Web Services services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.

A cluster can have up to 10 IAM roles associated with it at any time.

" }, "MaintenanceTrackName":{ "shape":"String", @@ -3367,7 +3691,7 @@ }, "AquaConfigurationStatus":{ "shape":"AquaConfigurationStatus", - "documentation":"

The value represents how the cluster is configured to use AQUA (Advanced Query Accelerator) when it is created. Possible values include the following.

  • enabled - Use AQUA if it is available for the current AWS Region and Amazon Redshift node type.

  • disabled - Don't use AQUA.

  • auto - Amazon Redshift determines whether to use AQUA.

" + "documentation":"

The value represents how the cluster is configured to use AQUA (Advanced Query Accelerator) when it is created. Possible values include the following.

  • enabled - Use AQUA if it is available for the current Amazon Web Services Region and Amazon Redshift node type.

  • disabled - Don't use AQUA.

  • auto - Amazon Redshift determines whether to use AQUA.

" } }, "documentation":"

" @@ -3382,11 +3706,11 @@ "members":{ "ParameterGroupName":{ "shape":"String", - "documentation":"

The name of the cluster parameter group.

Constraints:

  • Must be 1 to 255 alphanumeric characters or hyphens

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique withing your AWS account.

This value is stored as a lower-case string.

" + "documentation":"

The name of the cluster parameter group.

Constraints:

  • Must be 1 to 255 alphanumeric characters or hyphens

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique withing your Amazon Web Services account.

This value is stored as a lower-case string.

" }, "ParameterGroupFamily":{ "shape":"String", - "documentation":"

The Amazon Redshift engine version to which the cluster parameter group applies. The cluster engine version determines the set of parameters.

To get a list of valid parameter group family names, you can call DescribeClusterParameterGroups. By default, Amazon Redshift returns a list of all the parameter groups that are owned by your AWS account, including the default parameter groups for each Amazon Redshift engine version. The parameter group family names associated with the default parameter groups provide you the valid values. For example, a valid family name is \"redshift-1.0\".

" + "documentation":"

The Amazon Redshift engine version to which the cluster parameter group applies. The cluster engine version determines the set of parameters.

To get a list of valid parameter group family names, you can call DescribeClusterParameterGroups. By default, Amazon Redshift returns a list of all the parameter groups that are owned by your Amazon Web Services account, including the default parameter groups for each Amazon Redshift engine version. The parameter group family names associated with the default parameter groups provide you the valid values. For example, a valid family name is \"redshift-1.0\".

" }, "Description":{ "shape":"String", @@ -3420,7 +3744,7 @@ "members":{ "ClusterSecurityGroupName":{ "shape":"String", - "documentation":"

The name for the security group. Amazon Redshift stores the value as a lowercase string.

Constraints:

  • Must contain no more than 255 alphanumeric characters or hyphens.

  • Must not be \"Default\".

  • Must be unique for all security groups that are created by your AWS account.

Example: examplesecuritygroup

" + "documentation":"

The name for the security group. Amazon Redshift stores the value as a lowercase string.

Constraints:

  • Must contain no more than 255 alphanumeric characters or hyphens.

  • Must not be \"Default\".

  • Must be unique for all security groups that are created by your Amazon Web Services account.

Example: examplesecuritygroup

" }, "Description":{ "shape":"String", @@ -3448,7 +3772,7 @@ "members":{ "SnapshotIdentifier":{ "shape":"String", - "documentation":"

A unique identifier for the snapshot that you are requesting. This identifier must be unique for all snapshots within the AWS account.

Constraints:

  • Cannot be null, empty, or blank

  • Must contain from 1 to 255 alphanumeric characters or hyphens

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

Example: my-snapshot-id

" + "documentation":"

A unique identifier for the snapshot that you are requesting. This identifier must be unique for all snapshots within the Amazon Web Services account.

Constraints:

  • Cannot be null, empty, or blank

  • Must contain from 1 to 255 alphanumeric characters or hyphens

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

Example: my-snapshot-id

" }, "ClusterIdentifier":{ "shape":"String", @@ -3481,7 +3805,7 @@ "members":{ "ClusterSubnetGroupName":{ "shape":"String", - "documentation":"

The name for the subnet group. Amazon Redshift stores the value as a lowercase string.

Constraints:

  • Must contain no more than 255 alphanumeric characters or hyphens.

  • Must not be \"Default\".

  • Must be unique for all subnet groups that are created by your AWS account.

Example: examplesubnetgroup

" + "documentation":"

The name for the subnet group. Amazon Redshift stores the value as a lowercase string.

Constraints:

  • Must contain no more than 255 alphanumeric characters or hyphens.

  • Must not be \"Default\".

  • Must be unique for all subnet groups that are created by your Amazon Web Services account.

Example: examplesubnetgroup

" }, "Description":{ "shape":"String", @@ -3517,7 +3841,7 @@ }, "ResourceOwner":{ "shape":"String", - "documentation":"

The AWS account ID of the owner of the cluster. This is only required if the cluster is in another AWS account.

" + "documentation":"

The Amazon Web Services account ID of the owner of the cluster. This is only required if the cluster is in another Amazon Web Services account.

" }, "EndpointName":{ "shape":"String", @@ -3550,7 +3874,7 @@ }, "SourceType":{ "shape":"String", - "documentation":"

The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.

Valid values: cluster, cluster-parameter-group, cluster-security-group, cluster-snapshot, and scheduled-action.

" + "documentation":"

The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your Amazon Web Services account. You must specify a source type in order to specify source IDs.

Valid values: cluster, cluster-parameter-group, cluster-security-group, cluster-snapshot, and scheduled-action.

" }, "SourceIds":{ "shape":"SourceIdsList", @@ -3558,7 +3882,7 @@ }, "EventCategories":{ "shape":"EventCategoriesList", - "documentation":"

Specifies the Amazon Redshift event categories to be published by the event notification subscription.

Values: configuration, management, monitoring, security

" + "documentation":"

Specifies the Amazon Redshift event categories to be published by the event notification subscription.

Values: configuration, management, monitoring, security, pending

" }, "Severity":{ "shape":"String", @@ -3699,7 +4023,7 @@ "members":{ "SnapshotCopyGrantName":{ "shape":"String", - "documentation":"

The name of the snapshot copy grant. This name must be unique in the region for the AWS account.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • Alphabetic characters must be lowercase.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for all clusters within an AWS account.

" + "documentation":"

The name of the snapshot copy grant. This name must be unique in the region for the Amazon Web Services account.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • Alphabetic characters must be lowercase.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for all clusters within an Amazon Web Services account.

" }, "KmsKeyId":{ "shape":"String", @@ -3817,6 +4141,85 @@ } } }, + "DataShare":{ + "type":"structure", + "members":{ + "DataShareArn":{ + "shape":"String", + "documentation":"

An Amazon Resource Name (ARN) that references the datashare that is owned by a specific namespace of the producer cluster. A datashare ARN is in the arn:aws:redshift:{region}:{account-id}:{datashare}:{namespace-guid}/{datashare-name} format.

" + }, + "ProducerArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the producer.

" + }, + "AllowPubliclyAccessibleConsumers":{ + "shape":"Boolean", + "documentation":"

A value that specifies whether the datashare can be shared to a publicly accessible cluster.

" + }, + "DataShareAssociations":{ + "shape":"DataShareAssociationList", + "documentation":"

A value that specifies when the datashare has an association between a producer and data consumers.

" + } + } + }, + "DataShareAssociation":{ + "type":"structure", + "members":{ + "ConsumerIdentifier":{ + "shape":"String", + "documentation":"

The name of the consumer accounts that have an association with a producer datashare.

" + }, + "Status":{ + "shape":"DataShareStatus", + "documentation":"

The status of the datashare that is associated.

" + }, + "CreatedDate":{ + "shape":"TStamp", + "documentation":"

The creation date of the datashare that is associated.

" + }, + "StatusChangeDate":{ + "shape":"TStamp", + "documentation":"

The status change data of the datashare that is associated.

" + } + }, + "documentation":"

The association of a datashare from a producer account with a data consumer.

" + }, + "DataShareAssociationList":{ + "type":"list", + "member":{"shape":"DataShareAssociation"} + }, + "DataShareList":{ + "type":"list", + "member":{"shape":"DataShare"} + }, + "DataShareStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "PENDING_AUTHORIZATION", + "AUTHORIZED", + "DEAUTHORIZED", + "REJECTED", + "AVAILABLE" + ] + }, + "DataShareStatusForConsumer":{ + "type":"string", + "enum":[ + "ACTIVE", + "AVAILABLE" + ] + }, + "DataShareStatusForProducer":{ + "type":"string", + "enum":[ + "ACTIVE", + "AUTHORIZED", + "PENDING_AUTHORIZATION", + "DEAUTHORIZED", + "REJECTED" + ] + }, "DataTransferProgress":{ "type":"structure", "members":{ @@ -3854,6 +4257,23 @@ "locationName":"DbGroup" } }, + "DeauthorizeDataShareMessage":{ + "type":"structure", + "required":[ + "DataShareArn", + "ConsumerIdentifier" + ], + "members":{ + "DataShareArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the datashare to remove authorization from.

" + }, + "ConsumerIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the data consumer that is to have authorization removed from the datashare. This identifier is an AWS account ID.

" + } + } + }, "DefaultClusterParameters":{ "type":"structure", "members":{ @@ -3898,6 +4318,25 @@ "locationName":"DeferredMaintenanceWindow" } }, + "DeleteAuthenticationProfileMessage":{ + "type":"structure", + "required":["AuthenticationProfileName"], + "members":{ + "AuthenticationProfileName":{ + "shape":"AuthenticationProfileNameString", + "documentation":"

The name of the authentication profile to delete.

" + } + } + }, + "DeleteAuthenticationProfileResult":{ + "type":"structure", + "members":{ + "AuthenticationProfileName":{ + "shape":"AuthenticationProfileNameString", + "documentation":"

The name of the authentication profile that was deleted.

" + } + } + }, "DeleteClusterMessage":{ "type":"structure", "required":["ClusterIdentifier"], @@ -4123,6 +4562,24 @@ } } }, + "DescribeAuthenticationProfilesMessage":{ + "type":"structure", + "members":{ + "AuthenticationProfileName":{ + "shape":"AuthenticationProfileNameString", + "documentation":"

The name of the authentication profile to describe. If not specified then all authentication profiles owned by the account are listed.

" + } + } + }, + "DescribeAuthenticationProfilesResult":{ + "type":"structure", + "members":{ + "AuthenticationProfiles":{ + "shape":"AuthenticationProfileList", + "documentation":"

The list of authentication profiles.

" + } + } + }, "DescribeClusterDbRevisionsMessage":{ "type":"structure", "members":{ @@ -4153,7 +4610,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterParameterGroups request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterParameterGroups request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -4184,7 +4641,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterParameters request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterParameters request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" } }, "documentation":"

" @@ -4202,7 +4659,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSecurityGroups request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

Constraints: You can specify either the ClusterSecurityGroupName parameter or the Marker parameter, but not both.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSecurityGroups request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

Constraints: You can specify either the ClusterSecurityGroupName parameter or the Marker parameter, but not both.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -4244,11 +4701,11 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSnapshots request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSnapshots request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" }, "OwnerAccount":{ "shape":"String", - "documentation":"

The AWS customer account used to create or copy the snapshot. Use this field to filter the results to snapshots owned by a particular account. To describe snapshots you own, either specify your AWS customer account, or do not specify the parameter.

" + "documentation":"

The Amazon Web Services account used to create or copy the snapshot. Use this field to filter the results to snapshots owned by a particular account. To describe snapshots you own, either specify your Amazon Web Services account, or do not specify the parameter.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -4282,7 +4739,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSubnetGroups request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSubnetGroups request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -4329,7 +4786,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterVersions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterVersions request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" } }, "documentation":"

" @@ -4347,7 +4804,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusters request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

Constraints: You can specify either the ClusterIdentifier parameter or the Marker parameter, but not both.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusters request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

Constraints: You can specify either the ClusterIdentifier parameter or the Marker parameter, but not both.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -4360,6 +4817,104 @@ }, "documentation":"

" }, + "DescribeDataSharesForConsumerMessage":{ + "type":"structure", + "members":{ + "ConsumerArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the consumer that returns in the list of datashares.

" + }, + "Status":{ + "shape":"DataShareStatusForConsumer", + "documentation":"

An identifier giving the status of a datashare in the consumer cluster. If this field is specified, Amazon Redshift returns the list of datashares that have the specified status.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDataSharesForConsumer request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + } + } + }, + "DescribeDataSharesForConsumerResult":{ + "type":"structure", + "members":{ + "DataShares":{ + "shape":"DataShareList", + "documentation":"

Shows the results of datashares available for consumers.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDataSharesForConsumer request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + } + } + }, + "DescribeDataSharesForProducerMessage":{ + "type":"structure", + "members":{ + "ProducerArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the producer that returns in the list of datashares.

" + }, + "Status":{ + "shape":"DataShareStatusForProducer", + "documentation":"

An identifier giving the status of a datashare in the producer. If this field is specified, Amazon Redshift returns the list of datashares that have the specified status.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDataSharesForProducer request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + } + } + }, + "DescribeDataSharesForProducerResult":{ + "type":"structure", + "members":{ + "DataShares":{ + "shape":"DataShareList", + "documentation":"

Shows the results of datashares available for producers.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDataSharesForProducer request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + } + } + }, + "DescribeDataSharesMessage":{ + "type":"structure", + "members":{ + "DataShareArn":{ + "shape":"String", + "documentation":"

The identifier of the datashare to describe details of.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDataShares request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + } + } + }, + "DescribeDataSharesResult":{ + "type":"structure", + "members":{ + "DataShares":{ + "shape":"DataShareList", + "documentation":"

The results returned from describing datashares.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDataShares request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + } + } + }, "DescribeDefaultClusterParametersMessage":{ "type":"structure", "required":["ParameterGroupFamily"], @@ -4374,7 +4929,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDefaultClusterParameters request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDefaultClusterParameters request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" } }, "documentation":"

" @@ -4394,7 +4949,7 @@ }, "ResourceOwner":{ "shape":"String", - "documentation":"

The AWS account ID of the owner of the cluster.

" + "documentation":"

The Amazon Web Services account ID of the owner of the cluster.

" }, "EndpointName":{ "shape":"String", @@ -4423,7 +4978,7 @@ }, "Account":{ "shape":"String", - "documentation":"

The AWS account ID of either the cluster owner (grantor) or grantee. If Grantee parameter is true, then the Account value is of the grantor.

" + "documentation":"

The AAmazon Web Services account ID of either the cluster owner (grantor) or grantee. If Grantee parameter is true, then the Account value is of the grantor.

" }, "Grantee":{ "shape":"BooleanOptional", @@ -4462,7 +5017,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEventSubscriptions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEventSubscriptions request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -4504,7 +5059,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEvents request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEvents request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" } }, "documentation":"

" @@ -4514,7 +5069,7 @@ "members":{ "HsmClientCertificateIdentifier":{ "shape":"String", - "documentation":"

The identifier of a specific HSM client certificate for which you want information. If no identifier is specified, information is returned for all HSM client certificates owned by your AWS customer account.

" + "documentation":"

The identifier of a specific HSM client certificate for which you want information. If no identifier is specified, information is returned for all HSM client certificates owned by your Amazon Web Services account.

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -4522,7 +5077,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmClientCertificates request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmClientCertificates request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -4540,7 +5095,7 @@ "members":{ "HsmConfigurationIdentifier":{ "shape":"String", - "documentation":"

The identifier of a specific Amazon Redshift HSM configuration to be described. If no identifier is specified, information is returned for all HSM configurations owned by your AWS customer account.

" + "documentation":"

The identifier of a specific Amazon Redshift HSM configuration to be described. If no identifier is specified, information is returned for all HSM configurations owned by your Amazon Web Services account.

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -4548,7 +5103,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmConfigurations request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmConfigurations request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -4590,7 +5145,7 @@ }, "OwnerAccount":{ "shape":"String", - "documentation":"

The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.

" + "documentation":"

The Amazon Web Services account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.

" }, "Filters":{ "shape":"NodeConfigurationOptionsFilterList", @@ -4599,7 +5154,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeNodeConfigurationOptions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeNodeConfigurationOptions request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -4624,7 +5179,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeOrderableClusterOptions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeOrderableClusterOptions request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" } }, "documentation":"

" @@ -4638,7 +5193,7 @@ "members":{ "AccountId":{ "shape":"PartnerIntegrationAccountId", - "documentation":"

The AWS account ID that owns the cluster.

" + "documentation":"

The Amazon Web Services account ID that owns the cluster.

" }, "ClusterIdentifier":{ "shape":"PartnerIntegrationClusterIdentifier", @@ -4676,7 +5231,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeReservedNodeOfferings request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeReservedNodeOfferings request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" } }, "documentation":"

" @@ -4694,7 +5249,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeReservedNodes request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeReservedNodes request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" } }, "documentation":"

" @@ -4705,7 +5260,7 @@ "members":{ "ClusterIdentifier":{ "shape":"String", - "documentation":"

The unique identifier of a cluster whose resize progress you are requesting. This parameter is case-sensitive.

By default, resize operations for all clusters defined for an AWS account are returned.

" + "documentation":"

The unique identifier of a cluster whose resize progress you are requesting. This parameter is case-sensitive.

By default, resize operations for all clusters defined for an Amazon Web Services account are returned.

" } }, "documentation":"

" @@ -4739,7 +5294,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeScheduledActions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeScheduledActions request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -4760,7 +5315,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeSnapshotCopyGrant request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

Constraints: You can specify either the SnapshotCopyGrantName parameter or the Marker parameter, but not both.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeSnapshotCopyGrant request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

Constraints: You can specify either the SnapshotCopyGrantName parameter or the Marker parameter, but not both.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -4888,7 +5443,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeUsageLimits request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeUsageLimits request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -4928,6 +5483,24 @@ "Cluster":{"shape":"Cluster"} } }, + "DisassociateDataShareConsumerMessage":{ + "type":"structure", + "required":["DataShareArn"], + "members":{ + "DataShareArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the datashare to remove association for.

" + }, + "DisassociateEntireAccount":{ + "shape":"BooleanOptional", + "documentation":"

A value that specifies whether association for the datashare is removed from the entire account.

" + }, + "ConsumerArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the consumer that association for the datashare is removed from.

" + } + } + }, "Double":{"type":"double"}, "DoubleOptional":{"type":"double"}, "EC2SecurityGroup":{ @@ -4943,7 +5516,7 @@ }, "EC2SecurityGroupOwnerId":{ "shape":"String", - "documentation":"

The AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

" + "documentation":"

The Amazon Web Services account ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

" }, "Tags":{ "shape":"TagList", @@ -5015,7 +5588,7 @@ }, "DestinationRegion":{ "shape":"String", - "documentation":"

The destination AWS Region that you want to copy snapshots to.

Constraints: Must be the name of a valid AWS Region. For more information, see Regions and Endpoints in the Amazon Web Services General Reference.

" + "documentation":"

The destination Amazon Web Services Region that you want to copy snapshots to.

Constraints: Must be the name of a valid Amazon Web Services Region. For more information, see Regions and Endpoints in the Amazon Web Services General Reference.

" }, "RetentionPeriod":{ "shape":"IntegerOptional", @@ -5023,11 +5596,11 @@ }, "SnapshotCopyGrantName":{ "shape":"String", - "documentation":"

The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region.

" + "documentation":"

The name of the snapshot copy grant to use when snapshots of an Amazon Web Services KMS-encrypted cluster are copied to the destination region.

" }, "ManualSnapshotRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

The number of days to retain newly copied snapshots in the destination AWS Region after they are copied from the source AWS Region. If the value is -1, the manual snapshot is retained indefinitely.

The value must be either -1 or an integer between 1 and 3,653.

" + "documentation":"

The number of days to retain newly copied snapshots in the destination Amazon Web Services Region after they are copied from the source Amazon Web Services Region. If the value is -1, the manual snapshot is retained indefinitely.

The value must be either -1 or an integer between 1 and 3,653.

" } }, "documentation":"

" @@ -5065,7 +5638,7 @@ }, "ResourceOwner":{ "shape":"String", - "documentation":"

The AWS account ID of the owner of the cluster.

" + "documentation":"

The Amazon Web Services account ID of the owner of the cluster.

" }, "SubnetGroupName":{ "shape":"String", @@ -5133,11 +5706,11 @@ "members":{ "Grantor":{ "shape":"String", - "documentation":"

The AWS account ID of the cluster owner.

" + "documentation":"

The Amazon Web Services account ID of the cluster owner.

" }, "Grantee":{ "shape":"String", - "documentation":"

The AWS account ID of the grantee of the cluster.

" + "documentation":"

The Amazon Web Services account ID of the grantee of the cluster.

" }, "ClusterIdentifier":{ "shape":"String", @@ -5168,7 +5741,7 @@ "documentation":"

The number of Redshift-managed VPC endpoints created for the authorization.

" } }, - "documentation":"

Describes an endpoint authorization for authorizing Redshift-managed VPC endpoint access to a cluster across AWS accounts.

" + "documentation":"

Describes an endpoint authorization for authorizing Redshift-managed VPC endpoint access to a cluster across Amazon Web Services accounts.

" }, "EndpointAuthorizationAlreadyExistsFault":{ "type":"structure", @@ -5276,7 +5849,7 @@ }, "EventCategories":{ "shape":"EventCategoriesList", - "documentation":"

A list of the event categories.

Values: Configuration, Management, Monitoring, Security

" + "documentation":"

A list of the event categories.

Values: Configuration, Management, Monitoring, Security, Pending

" }, "Severity":{ "shape":"String", @@ -5374,7 +5947,7 @@ "members":{ "CustomerAwsId":{ "shape":"String", - "documentation":"

The AWS customer account associated with the Amazon Redshift event notification subscription.

" + "documentation":"

The Amazon Web Services account associated with the Amazon Redshift event notification subscription.

" }, "CustSubscriptionId":{ "shape":"String", @@ -5402,7 +5975,7 @@ }, "EventCategoriesList":{ "shape":"EventCategoriesList", - "documentation":"

The list of Amazon Redshift event categories specified in the event notification subscription.

Values: Configuration, Management, Monitoring, Security

" + "documentation":"

The list of Amazon Redshift event categories specified in the event notification subscription.

Values: Configuration, Management, Monitoring, Security, Pending

" }, "Severity":{ "shape":"String", @@ -5805,6 +6378,18 @@ }, "Integer":{"type":"integer"}, "IntegerOptional":{"type":"integer"}, + "InvalidAuthenticationProfileRequestFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The authentication profile request is not valid. The profile name can't be null or empty. The authentication profile API operation must be available in the Amazon Web Services Region.

", + "error":{ + "code":"InvalidAuthenticationProfileRequestFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidAuthorizationStateFault":{ "type":"structure", "members":{ @@ -5913,6 +6498,18 @@ }, "exception":true }, + "InvalidDataShareFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

There is an error with the datashare.

", + "error":{ + "code":"InvalidDataShareFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidElasticIpFault":{ "type":"structure", "members":{ @@ -5961,6 +6558,18 @@ }, "exception":true }, + "InvalidNamespaceFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The namespace isn't valid because the namespace doesn't exist. Provide a valid namespace.

", + "error":{ + "code":"InvalidNamespaceFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidReservedNodeStateFault":{ "type":"structure", "members":{ @@ -6133,7 +6742,7 @@ "type":"structure", "members":{ }, - "documentation":"

The encryption key has exceeded its grant limit in AWS KMS.

", + "documentation":"

The encryption key has exceeded its grant limit in Amazon Web Services KMS.

", "error":{ "code":"LimitExceededFault", "httpStatusCode":400, @@ -6208,7 +6817,7 @@ }, "AquaConfigurationStatus":{ "shape":"AquaConfigurationStatus", - "documentation":"

The new value of AQUA configuration status. Possible values include the following.

  • enabled - Use AQUA if it is available for the current AWS Region and Amazon Redshift node type.

  • disabled - Don't use AQUA.

  • auto - Amazon Redshift determines whether to use AQUA.

" + "documentation":"

The new value of AQUA configuration status. Possible values include the following.

  • enabled - Use AQUA if it is available for the current Amazon Web Services Region and Amazon Redshift node type.

  • disabled - Don't use AQUA.

  • auto - Amazon Redshift determines whether to use AQUA.

" } } }, @@ -6221,6 +6830,36 @@ } } }, + "ModifyAuthenticationProfileMessage":{ + "type":"structure", + "required":[ + "AuthenticationProfileName", + "AuthenticationProfileContent" + ], + "members":{ + "AuthenticationProfileName":{ + "shape":"AuthenticationProfileNameString", + "documentation":"

The name of the authentication profile to replace.

" + }, + "AuthenticationProfileContent":{ + "shape":"String", + "documentation":"

The new content of the authentication profile in JSON format. The maximum length of the JSON string is determined by a quota for your account.

" + } + } + }, + "ModifyAuthenticationProfileResult":{ + "type":"structure", + "members":{ + "AuthenticationProfileName":{ + "shape":"AuthenticationProfileNameString", + "documentation":"

The name of the authentication profile that was replaced.

" + }, + "AuthenticationProfileContent":{ + "shape":"String", + "documentation":"

The updated content of the authentication profile in JSON format.

" + } + } + }, "ModifyClusterDbRevisionMessage":{ "type":"structure", "required":[ @@ -6335,7 +6974,7 @@ }, "MasterUserPassword":{ "shape":"String", - "documentation":"

The new password for the cluster master user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

Operations never return the password, so this operation provides a way to regain access to the master user account for a cluster if the password is lost.

Default: Uses existing setting.

Constraints:

  • Must be between 8 and 64 characters in length.

  • Must contain at least one uppercase letter.

  • Must contain at least one lowercase letter.

  • Must contain one number.

  • Can be any printable ASCII character (ASCII code 33 to 126) except ' (single quote), \" (double quote), \\, /, @, or space.

" + "documentation":"

The new password for the cluster admin user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

Operations never return the password, so this operation provides a way to regain access to the admin user account for a cluster if the password is lost.

Default: Uses existing setting.

Constraints:

  • Must be between 8 and 64 characters in length.

  • Must contain at least one uppercase letter.

  • Must contain at least one lowercase letter.

  • Must contain one number.

  • Can be any printable ASCII character (ASCII code 33 to 126) except ' (single quote), \" (double quote), \\, /, @, or space.

" }, "ClusterParameterGroupName":{ "shape":"String", @@ -6371,7 +7010,7 @@ }, "NewClusterIdentifier":{ "shape":"String", - "documentation":"

The new identifier for the cluster.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • Alphabetic characters must be lowercase.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for all clusters within an AWS account.

Example: examplecluster

" + "documentation":"

The new identifier for the cluster.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • Alphabetic characters must be lowercase.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for all clusters within an Amazon Web Services account.

Example: examplecluster

" }, "PubliclyAccessible":{ "shape":"BooleanOptional", @@ -6395,7 +7034,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.

" + "documentation":"

The Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.

" }, "AvailabilityZoneRelocation":{ "shape":"BooleanOptional", @@ -6534,7 +7173,7 @@ }, "SourceType":{ "shape":"String", - "documentation":"

The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.

Valid values: cluster, cluster-parameter-group, cluster-security-group, cluster-snapshot, and scheduled-action.

" + "documentation":"

The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your Amazon Web Services account. You must specify a source type in order to specify source IDs.

Valid values: cluster, cluster-parameter-group, cluster-security-group, cluster-snapshot, and scheduled-action.

" }, "SourceIds":{ "shape":"SourceIdsList", @@ -6542,7 +7181,7 @@ }, "EventCategories":{ "shape":"EventCategoriesList", - "documentation":"

Specifies the Amazon Redshift event categories to be published by the event notification subscription.

Values: configuration, management, monitoring, security

" + "documentation":"

Specifies the Amazon Redshift event categories to be published by the event notification subscription.

Values: configuration, management, monitoring, security, pending

" }, "Severity":{ "shape":"String", @@ -6608,11 +7247,11 @@ "members":{ "ClusterIdentifier":{ "shape":"String", - "documentation":"

The unique identifier of the cluster for which you want to change the retention period for either automated or manual snapshots that are copied to a destination AWS Region.

Constraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.

" + "documentation":"

The unique identifier of the cluster for which you want to change the retention period for either automated or manual snapshots that are copied to a destination Amazon Web Services Region.

Constraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.

" }, "RetentionPeriod":{ "shape":"Integer", - "documentation":"

The number of days to retain automated snapshots in the destination AWS Region after they are copied from the source AWS Region.

By default, this only changes the retention period of copied automated snapshots.

If you decrease the retention period for automated snapshots that are copied to a destination AWS Region, Amazon Redshift deletes any existing automated snapshots that were copied to the destination AWS Region and that fall outside of the new retention period.

Constraints: Must be at least 1 and no more than 35 for automated snapshots.

If you specify the manual option, only newly copied manual snapshots will have the new retention period.

If you specify the value of -1 newly copied manual snapshots are retained indefinitely.

Constraints: The number of days must be either -1 or an integer between 1 and 3,653 for manual snapshots.

" + "documentation":"

The number of days to retain automated snapshots in the destination Amazon Web Services Region after they are copied from the source Amazon Web Services Region.

By default, this only changes the retention period of copied automated snapshots.

If you decrease the retention period for automated snapshots that are copied to a destination Amazon Web Services Region, Amazon Redshift deletes any existing automated snapshots that were copied to the destination Amazon Web Services Region and that fall outside of the new retention period.

Constraints: Must be at least 1 and no more than 35 for automated snapshots.

If you specify the manual option, only newly copied manual snapshots will have the new retention period.

If you specify the value of -1 newly copied manual snapshots are retained indefinitely.

Constraints: The number of days must be either -1 or an integer between 1 and 3,653 for manual snapshots.

" }, "Manual":{ "shape":"Boolean", @@ -6975,7 +7614,7 @@ "members":{ "AccountId":{ "shape":"PartnerIntegrationAccountId", - "documentation":"

The AWS account ID that owns the cluster.

" + "documentation":"

The Amazon Web Services account ID that owns the cluster.

" }, "ClusterIdentifier":{ "shape":"PartnerIntegrationClusterIdentifier", @@ -7061,7 +7700,7 @@ "members":{ "MasterUserPassword":{ "shape":"String", - "documentation":"

The pending or in-progress change of the master user password for the cluster.

" + "documentation":"

The pending or in-progress change of the admin user password for the cluster.

" }, "NodeType":{ "shape":"String", @@ -7166,6 +7805,16 @@ "locationName":"RecurringCharge" } }, + "RejectDataShareMessage":{ + "type":"structure", + "required":["DataShareArn"], + "members":{ + "DataShareArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the datashare to reject.

" + } + } + }, "ReservedNode":{ "type":"structure", "members":{ @@ -7553,7 +8202,7 @@ "members":{ "ClusterIdentifier":{ "shape":"String", - "documentation":"

The identifier of the cluster that will be created from restoring the snapshot.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • Alphabetic characters must be lowercase.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for all clusters within an AWS account.

" + "documentation":"

The identifier of the cluster that will be created from restoring the snapshot.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • Alphabetic characters must be lowercase.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for all clusters within an Amazon Web Services account.

" }, "SnapshotIdentifier":{ "shape":"String", @@ -7585,7 +8234,7 @@ }, "OwnerAccount":{ "shape":"String", - "documentation":"

The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.

" + "documentation":"

The Amazon Web Services account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.

" }, "HsmClientCertificateIdentifier":{ "shape":"String", @@ -7625,7 +8274,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster that you restore from a shared snapshot.

" + "documentation":"

The Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster that you restore from a shared snapshot.

" }, "NodeType":{ "shape":"String", @@ -7641,7 +8290,7 @@ }, "IamRoles":{ "shape":"IamRoleArnList", - "documentation":"

A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.

A cluster can have up to 10 IAM roles associated at any time.

" + "documentation":"

A list of Identity and Access Management (IAM) roles that can be used by the cluster to access other Amazon Web Services services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.

A cluster can have up to 10 IAM roles associated at any time.

" }, "MaintenanceTrackName":{ "shape":"String", @@ -7661,7 +8310,7 @@ }, "AquaConfigurationStatus":{ "shape":"AquaConfigurationStatus", - "documentation":"

The value represents how the cluster is configured to use AQUA (Advanced Query Accelerator) after the cluster is restored. Possible values include the following.

  • enabled - Use AQUA if it is available for the current AWS Region and Amazon Redshift node type.

  • disabled - Don't use AQUA.

  • auto - Amazon Redshift determines whether to use AQUA.

" + "documentation":"

The value represents how the cluster is configured to use AQUA (Advanced Query Accelerator) after the cluster is restored. Possible values include the following.

  • enabled - Use AQUA if it is available for the current Amazon Web Services Region and Amazon Redshift node type.

  • disabled - Don't use AQUA.

  • auto - Amazon Redshift determines whether to use AQUA.

" } }, "documentation":"

" @@ -7817,7 +8466,7 @@ }, "EC2SecurityGroupOwnerId":{ "shape":"String", - "documentation":"

The AWS account number of the owner of the security group specified in the EC2SecurityGroupName parameter. The AWS access key ID is not an acceptable value. If EC2SecurityGroupOwnerId is specified, EC2SecurityGroupName must also be provided. and CIDRIP cannot be provided.

Example: 111122223333

" + "documentation":"

The Amazon Web Services account number of the owner of the security group specified in the EC2SecurityGroupName parameter. The Amazon Web Services access key ID is not an acceptable value. If EC2SecurityGroupOwnerId is specified, EC2SecurityGroupName must also be provided. and CIDRIP cannot be provided.

Example: 111122223333

" } }, "documentation":"

" @@ -7837,7 +8486,7 @@ }, "Account":{ "shape":"String", - "documentation":"

The AWS account ID whose access is to be revoked.

" + "documentation":"

The Amazon Web Services account ID whose access is to be revoked.

" }, "VpcIds":{ "shape":"VpcIdentifierList", @@ -7866,7 +8515,7 @@ }, "AccountWithRestoreAccess":{ "shape":"String", - "documentation":"

The identifier of the AWS customer account that can no longer restore the specified snapshot.

" + "documentation":"

The identifier of the Amazon Web Services account that can no longer restore the specified snapshot.

" } }, "documentation":"

" @@ -8131,7 +8780,7 @@ "members":{ "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeScheduledActions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeScheduledActions request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" }, "ScheduledActions":{ "shape":"ScheduledActionList", @@ -8183,7 +8832,7 @@ }, "MasterUsername":{ "shape":"String", - "documentation":"

The master user name for the cluster.

" + "documentation":"

The admin user name for the cluster.

" }, "ClusterVersion":{ "shape":"String", @@ -8219,7 +8868,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.

" + "documentation":"

The Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.

" }, "EncryptedWithHSM":{ "shape":"Boolean", @@ -8227,11 +8876,11 @@ }, "AccountsWithRestoreAccess":{ "shape":"AccountsWithRestoreAccessList", - "documentation":"

A list of the AWS customer accounts authorized to restore the snapshot. Returns null if no accounts are authorized. Visible only to the snapshot owner.

" + "documentation":"

A list of the Amazon Web Services accounts authorized to restore the snapshot. Returns null if no accounts are authorized. Visible only to the snapshot owner.

" }, "OwnerAccount":{ "shape":"String", - "documentation":"

For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.

" + "documentation":"

For manual snapshots, the Amazon Web Services account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.

" }, "TotalBackupSizeInMegaBytes":{ "shape":"Double", @@ -8346,14 +8995,14 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The unique identifier of the customer master key (CMK) in AWS KMS to which Amazon Redshift is granted permission.

" + "documentation":"

The unique identifier of the customer master key (CMK) in Amazon Web Services KMS to which Amazon Redshift is granted permission.

" }, "Tags":{ "shape":"TagList", "documentation":"

A list of tag instances.

" } }, - "documentation":"

The snapshot copy grant that grants Amazon Redshift permission to encrypt copied snapshots with the specified customer master key (CMK) from AWS KMS in the destination region.

For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

", + "documentation":"

The snapshot copy grant that grants Amazon Redshift permission to encrypt copied snapshots with the specified customer master key (CMK) from Amazon Web Services KMS in the destination region.

For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

", "wrapper":true }, "SnapshotCopyGrantAlreadyExistsFault":{ @@ -8380,7 +9029,7 @@ "members":{ "Marker":{ "shape":"String", - "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeSnapshotCopyGrant request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

Constraints: You can specify either the SnapshotCopyGrantName parameter or the Marker parameter, but not both.

" + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeSnapshotCopyGrant request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

Constraints: You can specify either the SnapshotCopyGrantName parameter or the Marker parameter, but not both.

" }, "SnapshotCopyGrants":{ "shape":"SnapshotCopyGrantList", @@ -8405,7 +9054,7 @@ "type":"structure", "members":{ }, - "documentation":"

The AWS account has exceeded the maximum number of snapshot copy grants in this region.

", + "documentation":"

The Amazon Web Services account has exceeded the maximum number of snapshot copy grants in this region.

", "error":{ "code":"SnapshotCopyGrantQuotaExceededFault", "httpStatusCode":400, @@ -9050,7 +9699,7 @@ "members":{ "AccountId":{ "shape":"PartnerIntegrationAccountId", - "documentation":"

The AWS account ID that owns the cluster.

" + "documentation":"

The Amazon Web Services account ID that owns the cluster.

" }, "ClusterIdentifier":{ "shape":"PartnerIntegrationClusterIdentifier", diff --git a/services/redshiftdata/pom.xml b/services/redshiftdata/pom.xml index 7c18589a222b..7950908cdca7 100644 --- a/services/redshiftdata/pom.xml +++ b/services/redshiftdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT redshiftdata AWS Java SDK :: Services :: Redshift Data diff --git a/services/redshiftdata/src/main/resources/codegen-resources/service-2.json b/services/redshiftdata/src/main/resources/codegen-resources/service-2.json index d0a4000bcc83..6ae75eaf9f79 100644 --- a/services/redshiftdata/src/main/resources/codegen-resources/service-2.json +++ b/services/redshiftdata/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,21 @@ "uid":"redshift-data-2019-12-20" }, "operations":{ + "BatchExecuteStatement":{ + "name":"BatchExecuteStatement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchExecuteStatementInput"}, + "output":{"shape":"BatchExecuteStatementOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ActiveStatementsExceededException"}, + {"shape":"BatchExecuteStatementException"} + ], + "documentation":"

Runs one or more SQL statements, which can be data manipulation language (DML) or data definition language (DDL). Depending on the authorization method, use one of the following combinations of request parameters:

  • Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret.

  • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

" + }, "CancelStatement":{ "name":"CancelStatement", "http":{ @@ -22,6 +37,7 @@ "input":{"shape":"CancelStatementRequest"}, "output":{"shape":"CancelStatementResponse"}, "errors":[ + {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], @@ -54,7 +70,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describes the detailed information about a table from metadata in the cluster. The information includes its columns. A token is returned to page through the column list. Depending on the authorization method, use one of the following combinations of request parameters:

  • AWS Secrets Manager - specify the Amazon Resource Name (ARN) of the secret and the cluster identifier that matches the cluster in the secret.

  • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

" + "documentation":"

Describes the detailed information about a table from metadata in the cluster. The information includes its columns. A token is returned to page through the column list. Depending on the authorization method, use one of the following combinations of request parameters:

  • Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret.

  • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

" }, "ExecuteStatement":{ "name":"ExecuteStatement", @@ -69,7 +85,7 @@ {"shape":"ExecuteStatementException"}, {"shape":"ActiveStatementsExceededException"} ], - "documentation":"

Runs an SQL statement, which can be data manipulation language (DML) or data definition language (DDL). This statement must be a single SQL statement. Depending on the authorization method, use one of the following combinations of request parameters:

  • AWS Secrets Manager - specify the Amazon Resource Name (ARN) of the secret and the cluster identifier that matches the cluster in the secret.

  • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

" + "documentation":"

Runs an SQL statement, which can be data manipulation language (DML) or data definition language (DDL). This statement must be a single SQL statement. Depending on the authorization method, use one of the following combinations of request parameters:

  • Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret.

  • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

" }, "GetStatementResult":{ "name":"GetStatementResult", @@ -98,7 +114,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

List the databases in a cluster. A token is returned to page through the database list. Depending on the authorization method, use one of the following combinations of request parameters:

  • AWS Secrets Manager - specify the Amazon Resource Name (ARN) of the secret and the cluster identifier that matches the cluster in the secret.

  • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

" + "documentation":"

List the databases in a cluster. A token is returned to page through the database list. Depending on the authorization method, use one of the following combinations of request parameters:

  • Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret.

  • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

" }, "ListSchemas":{ "name":"ListSchemas", @@ -112,7 +128,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists the schemas in a database. A token is returned to page through the schema list. Depending on the authorization method, use one of the following combinations of request parameters:

  • AWS Secrets Manager - specify the Amazon Resource Name (ARN) of the secret and the cluster identifier that matches the cluster in the secret.

  • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

" + "documentation":"

Lists the schemas in a database. A token is returned to page through the schema list. Depending on the authorization method, use one of the following combinations of request parameters:

  • Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret.

  • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

" }, "ListStatements":{ "name":"ListStatements", @@ -140,7 +156,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

List the tables in a database. If neither SchemaPattern nor TablePattern are specified, then all tables in the database are returned. A token is returned to page through the table list. Depending on the authorization method, use one of the following combinations of request parameters:

  • AWS Secrets Manager - specify the Amazon Resource Name (ARN) of the secret and the cluster identifier that matches the cluster in the secret.

  • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

" + "documentation":"

List the tables in a database. If neither SchemaPattern nor TablePattern are specified, then all tables in the database are returned. A token is returned to page through the table list. Depending on the authorization method, use one of the following combinations of request parameters:

  • Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret.

  • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

" } }, "shapes":{ @@ -152,6 +168,90 @@ "documentation":"

The number of active statements exceeds the limit.

", "exception":true }, + "BatchExecuteStatementException":{ + "type":"structure", + "required":[ + "Message", + "StatementId" + ], + "members":{ + "Message":{"shape":"String"}, + "StatementId":{ + "shape":"String", + "documentation":"

Statement identifier of the exception.

" + } + }, + "documentation":"

An SQL statement encountered an environmental error while running.

", + "exception":true, + "fault":true + }, + "BatchExecuteStatementInput":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "Database", + "Sqls" + ], + "members":{ + "ClusterIdentifier":{ + "shape":"Location", + "documentation":"

The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

" + }, + "Database":{ + "shape":"String", + "documentation":"

The name of the database. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

" + }, + "DbUser":{ + "shape":"String", + "documentation":"

The database user name. This parameter is required when authenticating using temporary credentials.

" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

" + }, + "Sqls":{ + "shape":"SqlList", + "documentation":"

One or more SQL statements to run.

" + }, + "StatementName":{ + "shape":"StatementNameString", + "documentation":"

The name of the SQL statements. You can name the SQL statements when you create them to identify the query.

" + }, + "WithEvent":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether to send an event to the Amazon EventBridge event bus after the SQL statements run.

" + } + } + }, + "BatchExecuteStatementOutput":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{ + "shape":"Location", + "documentation":"

The cluster identifier.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time (UTC) the statement was created.

" + }, + "Database":{ + "shape":"String", + "documentation":"

The name of the database.

" + }, + "DbUser":{ + "shape":"String", + "documentation":"

The database user name.

" + }, + "Id":{ + "shape":"StatementId", + "documentation":"

The identifier of the SQL statement whose results are to be fetched. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. This identifier is returned by BatchExecuteStatment.

" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The name or ARN of the secret that enables access to the database.

" + } + } + }, "Blob":{"type":"blob"}, "Boolean":{ "type":"boolean", @@ -174,8 +274,8 @@ "required":["Id"], "members":{ "Id":{ - "shape":"UUID", - "documentation":"

The identifier of the SQL statement to cancel. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. This identifier is returned by ExecuteStatment and ListStatements.

" + "shape":"StatementId", + "documentation":"

The identifier of the SQL statement to cancel. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. This identifier is returned by BatchExecuteStatment, ExecuteStatment, and ListStatements.

" } } }, @@ -263,8 +363,8 @@ "required":["Id"], "members":{ "Id":{ - "shape":"UUID", - "documentation":"

The identifier of the SQL statement to describe. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. This identifier is returned by ExecuteStatment and ListStatements.

" + "shape":"StatementId", + "documentation":"

The identifier of the SQL statement to describe. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. A suffix indicates the number of the SQL statement. For example, d9b6c0c9-0747-4bf4-b142-e8883122f766:2 has a suffix of :2 that indicates the second SQL statement of a batch query. This identifier is returned by BatchExecuteStatment, ExecuteStatement, and ListStatements.

" } } }, @@ -301,7 +401,7 @@ "documentation":"

A value that indicates whether the statement has a result set. The result set can be empty.

" }, "Id":{ - "shape":"UUID", + "shape":"StatementId", "documentation":"

The identifier of the SQL statement described. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API.

" }, "QueryParameters":{ @@ -322,11 +422,11 @@ }, "ResultRows":{ "shape":"Long", - "documentation":"

Either the number of rows returned from the SQL statement or the number of rows affected. If result size is greater than zero, the result rows can be the number of rows affected by SQL statements such as INSERT, UPDATE, DELETE, COPY, and others.

" + "documentation":"

Either the number of rows returned from the SQL statement or the number of rows affected. If result size is greater than zero, the result rows can be the number of rows affected by SQL statements such as INSERT, UPDATE, DELETE, COPY, and others. A -1 indicates the value is null.

" }, "ResultSize":{ "shape":"Long", - "documentation":"

The size in bytes of the returned results.

" + "documentation":"

The size in bytes of the returned results. A -1 indicates the value is null.

" }, "SecretArn":{ "shape":"SecretArn", @@ -336,6 +436,10 @@ "shape":"StatusString", "documentation":"

The status of the SQL statement being described. Status values are defined as follows:

  • ABORTED - The query run was stopped by the user.

  • ALL - A status value that includes all query statuses. This value can be used to filter results.

  • FAILED - The query run failed.

  • FINISHED - The query has finished running.

  • PICKED - The query has been chosen to be run.

  • STARTED - The query run has started.

  • SUBMITTED - The query was submitted, but not yet processed.

" }, + "SubStatements":{ + "shape":"SubStatementList", + "documentation":"

The SQL statements from a multiple statement run.

" + }, "UpdatedAt":{ "shape":"Timestamp", "documentation":"

The date and time (UTC) that the metadata for the SQL statement was last updated. An example is the time the status last changed.

" @@ -351,7 +455,7 @@ "members":{ "ClusterIdentifier":{ "shape":"Location", - "documentation":"

The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

" + "documentation":"

The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

" }, "ConnectedDatabase":{ "shape":"String", @@ -379,7 +483,7 @@ }, "SecretArn":{ "shape":"SecretArn", - "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using AWS Secrets Manager.

" + "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

" }, "Table":{ "shape":"String", @@ -428,16 +532,17 @@ "type":"structure", "required":[ "ClusterIdentifier", + "Database", "Sql" ], "members":{ "ClusterIdentifier":{ "shape":"Location", - "documentation":"

The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

" + "documentation":"

The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

" }, "Database":{ "shape":"String", - "documentation":"

The name of the database. This parameter is required when authenticating using temporary credentials.

" + "documentation":"

The name of the database. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

" }, "DbUser":{ "shape":"String", @@ -449,7 +554,7 @@ }, "SecretArn":{ "shape":"SecretArn", - "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using AWS Secrets Manager.

" + "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

" }, "Sql":{ "shape":"StatementString", @@ -485,8 +590,8 @@ "documentation":"

The database user name.

" }, "Id":{ - "shape":"UUID", - "documentation":"

The identifier of the statement to be run. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API.

" + "shape":"StatementId", + "documentation":"

The identifier of the SQL statement whose results are to be fetched. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API.

" }, "SecretArn":{ "shape":"SecretArn", @@ -534,8 +639,8 @@ "required":["Id"], "members":{ "Id":{ - "shape":"UUID", - "documentation":"

The identifier of the SQL statement whose results are to be fetched. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. This identifier is returned by ExecuteStatment and ListStatements.

" + "shape":"StatementId", + "documentation":"

The identifier of the SQL statement whose results are to be fetched. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. A suffix indicates then number of the SQL statement. For example, d9b6c0c9-0747-4bf4-b142-e8883122f766:2 has a suffix of :2 that indicates the second SQL statement of a batch query. This identifier is returned by BatchExecuteStatment, ExecuteStatment, and ListStatements.

" }, "NextToken":{ "shape":"String", @@ -581,15 +686,18 @@ }, "ListDatabasesRequest":{ "type":"structure", - "required":["ClusterIdentifier"], + "required":[ + "ClusterIdentifier", + "Database" + ], "members":{ "ClusterIdentifier":{ "shape":"Location", - "documentation":"

The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

" + "documentation":"

The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

" }, "Database":{ "shape":"String", - "documentation":"

The name of the database. This parameter is required when authenticating using temporary credentials.

" + "documentation":"

The name of the database. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

" }, "DbUser":{ "shape":"String", @@ -605,7 +713,7 @@ }, "SecretArn":{ "shape":"SecretArn", - "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using AWS Secrets Manager.

" + "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

" } } }, @@ -631,7 +739,7 @@ "members":{ "ClusterIdentifier":{ "shape":"Location", - "documentation":"

The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

" + "documentation":"

The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

" }, "ConnectedDatabase":{ "shape":"String", @@ -659,7 +767,7 @@ }, "SecretArn":{ "shape":"SecretArn", - "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using AWS Secrets Manager.

" + "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

" } } }, @@ -698,7 +806,7 @@ }, "StatementName":{ "shape":"StatementNameString", - "documentation":"

The name of the SQL statement specified as input to ExecuteStatement to identify the query. You can list multiple statements by providing a prefix that matches the beginning of the statement name. For example, to list myStatement1, myStatement2, myStatement3, and so on, then provide the a value of myStatement. Data API does a case-sensitive match of SQL statement names to the prefix value you provide.

" + "documentation":"

The name of the SQL statement specified as input to BatchExecuteStatement or ExecuteStatement to identify the query. You can list multiple statements by providing a prefix that matches the beginning of the statement name. For example, to list myStatement1, myStatement2, myStatement3, and so on, then provide the a value of myStatement. Data API does a case-sensitive match of SQL statement names to the prefix value you provide.

" }, "Status":{ "shape":"StatusString", @@ -729,7 +837,7 @@ "members":{ "ClusterIdentifier":{ "shape":"Location", - "documentation":"

The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

" + "documentation":"

The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

" }, "ConnectedDatabase":{ "shape":"String", @@ -757,7 +865,7 @@ }, "SecretArn":{ "shape":"SecretArn", - "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using AWS Secrets Manager.

" + "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

" }, "TablePattern":{ "shape":"String", @@ -817,6 +925,12 @@ "member":{"shape":"String"} }, "SecretArn":{"type":"string"}, + "SqlList":{ + "type":"list", + "member":{"shape":"StatementString"}, + "max":40, + "min":1 + }, "SqlParameter":{ "type":"structure", "required":[ @@ -853,9 +967,13 @@ "documentation":"

The date and time (UTC) the statement was created.

" }, "Id":{ - "shape":"UUID", + "shape":"StatementId", "documentation":"

The SQL statement identifier. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API.

" }, + "IsBatchStatement":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether the statement is a batch query request.

" + }, "QueryParameters":{ "shape":"SqlParametersList", "documentation":"

The parameters used in a SQL statement.

" @@ -864,6 +982,10 @@ "shape":"StatementString", "documentation":"

The SQL statement.

" }, + "QueryStrings":{ + "shape":"StatementStringList", + "documentation":"

One or more SQL statements. Each query string in the array corresponds to one of the queries in a batch query request.

" + }, "SecretArn":{ "shape":"SecretArn", "documentation":"

The name or Amazon Resource Name (ARN) of the secret that enables access to the database.

" @@ -883,6 +1005,10 @@ }, "documentation":"

The SQL statement to run.

" }, + "StatementId":{ + "type":"string", + "pattern":"^[a-z0-9]{8}(-[a-z0-9]{4}){3}-[a-z0-9]{12}(:\\d+)?$" + }, "StatementList":{ "type":"list", "member":{"shape":"StatementData"} @@ -892,7 +1018,22 @@ "max":500, "min":0 }, + "StatementStatusString":{ + "type":"string", + "enum":[ + "SUBMITTED", + "PICKED", + "STARTED", + "FINISHED", + "ABORTED", + "FAILED" + ] + }, "StatementString":{"type":"string"}, + "StatementStringList":{ + "type":"list", + "member":{"shape":"StatementString"} + }, "StatusString":{ "type":"string", "enum":[ @@ -906,6 +1047,61 @@ ] }, "String":{"type":"string"}, + "SubStatementData":{ + "type":"structure", + "required":["Id"], + "members":{ + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time (UTC) the statement was created.

" + }, + "Duration":{ + "shape":"Long", + "documentation":"

The amount of time in nanoseconds that the statement ran.

" + }, + "Error":{ + "shape":"String", + "documentation":"

The error message from the cluster if the SQL statement encountered an error while running.

" + }, + "HasResultSet":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether the statement has a result set. The result set can be empty.

" + }, + "Id":{ + "shape":"StatementId", + "documentation":"

The identifier of the SQL statement. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. A suffix indicates the number of the SQL statement. For example, d9b6c0c9-0747-4bf4-b142-e8883122f766:2 has a suffix of :2 that indicates the second SQL statement of a batch query.

" + }, + "QueryString":{ + "shape":"StatementString", + "documentation":"

The SQL statement text.

" + }, + "RedshiftQueryId":{ + "shape":"Long", + "documentation":"

The SQL statement identifier. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API.

" + }, + "ResultRows":{ + "shape":"Long", + "documentation":"

Either the number of rows returned from the SQL statement or the number of rows affected. If result size is greater than zero, the result rows can be the number of rows affected by SQL statements such as INSERT, UPDATE, DELETE, COPY, and others. A -1 indicates the value is null.

" + }, + "ResultSize":{ + "shape":"Long", + "documentation":"

The size in bytes of the returned results. A -1 indicates the value is null.

" + }, + "Status":{ + "shape":"StatementStatusString", + "documentation":"

The status of the SQL statement. An example is the that the SQL statement finished.

" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time (UTC) that the statement metadata was last updated.

" + } + }, + "documentation":"

Information about an SQL statement.

" + }, + "SubStatementList":{ + "type":"list", + "member":{"shape":"SubStatementData"} + }, "TableList":{ "type":"list", "member":{"shape":"TableMember"} @@ -929,7 +1125,6 @@ "documentation":"

The properties of a table.

" }, "Timestamp":{"type":"timestamp"}, - "UUID":{"type":"string"}, "ValidationException":{ "type":"structure", "members":{ @@ -943,5 +1138,5 @@ }, "bool":{"type":"boolean"} }, - "documentation":"

You can use the Amazon Redshift Data API to run queries on Amazon Redshift tables. You can run individual SQL statements, which are committed if the statement succeeds.

For more information about the Amazon Redshift Data API, see Using the Amazon Redshift Data API in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

You can use the Amazon Redshift Data API to run queries on Amazon Redshift tables. You can run SQL statements, which are committed if the statement succeeds.

For more information about the Amazon Redshift Data API, see Using the Amazon Redshift Data API in the Amazon Redshift Cluster Management Guide.

" } diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index a8e164fc77ce..f0c7a3eb5ab0 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT rekognition AWS Java SDK :: Services :: Amazon Rekognition diff --git a/services/rekognition/src/main/resources/codegen-resources/service-2.json b/services/rekognition/src/main/resources/codegen-resources/service-2.json index 6a7eb8633bfe..962730c478fe 100644 --- a/services/rekognition/src/main/resources/codegen-resources/service-2.json +++ b/services/rekognition/src/main/resources/codegen-resources/service-2.json @@ -399,7 +399,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects text in the input image and converts it into machine-readable text.

Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file.

The DetectText operation returns text in an array of TextDetection elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image.

A word is one or more ISO basic latin script characters that are not separated by spaces. DetectText can detect up to 50 words in an image.

A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines.

To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field.

To be detected, text must be within +/- 90 degrees orientation of the horizontal axis.

For more information, see DetectText in the Amazon Rekognition Developer Guide.

" + "documentation":"

Detects text in the input image and converts it into machine-readable text.

Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file.

The DetectText operation returns text in an array of TextDetection elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image.

A word is one or more ISO basic latin script characters that are not separated by spaces. DetectText can detect up to 100 words in an image.

A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines.

To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field.

To be detected, text must be within +/- 90 degrees orientation of the horizontal axis.

For more information, see DetectText in the Amazon Rekognition Developer Guide.

" }, "GetCelebrityInfo":{ "name":"GetCelebrityInfo", @@ -455,7 +455,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the unsafe content analysis results for a Amazon Rekognition Video analysis started by StartContentModeration.

Unsafe content analysis of a video is an asynchronous operation. You start analysis by calling StartContentModeration which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the unsafe content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide.

GetContentModeration returns detected unsafe content labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects.

By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.

Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

" + "documentation":"

Gets the inappropriate, unwanted, or offensive content analysis results for a Amazon Rekognition Video analysis started by StartContentModeration. For a list of moderation labels in Amazon Rekognition, see Using the image and video moderation APIs.

Amazon Rekognition Video inappropriate or offensive content detection in a stored video is an asynchronous operation. You start analysis by calling StartContentModeration which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide.

GetContentModeration returns detected inappropriate, unwanted, or offensive content moderation labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects.

By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.

Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration.

For more information, see Content moderation in the Amazon Rekognition Developer Guide.

" }, "GetFaceDetection":{ "name":"GetFaceDetection", @@ -768,7 +768,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts asynchronous detection of unsafe content in a stored video.

Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When unsafe content analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the unsafe content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

", + "documentation":"

Starts asynchronous detection of inappropriate, unwanted, or offensive content in a stored video. For a list of moderation labels in Amazon Rekognition, see Using the image and video moderation APIs.

Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When content analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Content moderation in the Amazon Rekognition Developer Guide.

", "idempotent":true }, "StartFaceDetection":{ @@ -1103,6 +1103,20 @@ }, "documentation":"

Indicates whether or not the face has a beard, and the confidence level in the determination.

" }, + "BlackFrame":{ + "type":"structure", + "members":{ + "MaxPixelThreshold":{ + "shape":"MaxPixelThreshold", + "documentation":"

A threshold used to determine the maximum luminance value for a pixel to be considered black. In a full color range video, luminance values range from 0-255. A pixel value of 0 is pure black, and the most strict filter. The maximum black pixel value is computed as follows: max_black_pixel_value = minimum_luminance + MaxPixelThreshold *luminance_range.

For example, for a full range video with BlackPixelThreshold = 0.1, max_black_pixel_value is 0 + 0.1 * (255-0) = 25.5.

The default value of MaxPixelThreshold is 0.2, which maps to a max_black_pixel_value of 51 for a full range video. You can lower this threshold to be more strict on black levels.

" + }, + "MinCoveragePercentage":{ + "shape":"MinCoveragePercentage", + "documentation":"

The minimum percentage of pixels in a frame that need to have a luminance below the max_black_pixel_value for a frame to be considered a black frame. Luminance is calculated using the BT.709 matrix.

The default value is 99, which means at least 99% of all pixels in the frame are black pixels as per the MaxPixelThreshold set. You can reduce this value to allow more noise on the black frame.

" + } + }, + "documentation":"

A filter that allows you to control the black frame detection by specifying the black levels and pixel coverage of black pixels in a frame. As videos can come from multiple sources, formats, and time periods, they may contain different standards and varying noise levels for black frames that need to be accounted for. For more information, see StartSegmentDetection.

" + }, "BodyPart":{ "type":"string", "enum":[ @@ -1383,14 +1397,14 @@ "members":{ "Timestamp":{ "shape":"Timestamp", - "documentation":"

Time, in milliseconds from the beginning of the video, that the unsafe content label was detected.

" + "documentation":"

Time, in milliseconds from the beginning of the video, that the content moderation label was detected.

" }, "ModerationLabel":{ "shape":"ModerationLabel", - "documentation":"

The unsafe content label detected by in the stored video.

" + "documentation":"

The content moderation label detected by in the stored video.

" } }, - "documentation":"

Information about an unsafe content label detection in a stored video.

" + "documentation":"

Information about an inappropriate, unwanted, or offensive content label detection in a stored video.

" }, "ContentModerationDetections":{ "type":"list", @@ -1487,7 +1501,7 @@ }, "OutputConfig":{ "shape":"OutputConfig", - "documentation":"

The Amazon S3 location to store the results of training.

" + "documentation":"

The Amazon S3 bucket location to store the results of training. The S3 bucket can be in any AWS account as long as the caller has s3:PutObject permissions on the S3 bucket.

" }, "TrainingData":{ "shape":"TrainingData", @@ -1503,7 +1517,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The identifier for your AWS Key Management Service (AWS KMS) customer master key (CMK). You can supply the Amazon Resource Name (ARN) of your CMK, the ID of your CMK, or an alias for your CMK. The key is used to encrypt training and test images copied into the service for model training. Your source images are unaffected. The key is also used to encrypt training results and manifest files written to the output Amazon S3 bucket (OutputConfig).

If you don't specify a value for KmsKeyId, images copied into the service are encrypted using a key that AWS owns and manages.

" + "documentation":"

The identifier for your AWS Key Management Service (AWS KMS) customer master key (CMK). You can supply the Amazon Resource Name (ARN) of your CMK, the ID of your CMK, an alias for your CMK, or an alias ARN. The key is used to encrypt training and test images copied into the service for model training. Your source images are unaffected. The key is also used to encrypt training results and manifest files written to the output Amazon S3 bucket (OutputConfig).

If you choose to use your own CMK, you need the following permissions on the CMK.

  • kms:CreateGrant

  • kms:DescribeKey

  • kms:GenerateDataKey

  • kms:Decrypt

If you don't specify a value for KmsKeyId, images copied into the service are encrypted using a key that AWS owns and manages.

" } } }, @@ -2471,7 +2485,7 @@ "members":{ "JobId":{ "shape":"JobId", - "documentation":"

The identifier for the unsafe content job. Use JobId to identify the job in a subsequent call to GetContentModeration.

" + "documentation":"

The identifier for the inappropriate, unwanted, or offensive content moderation job. Use JobId to identify the job in a subsequent call to GetContentModeration.

" }, "MaxResults":{ "shape":"MaxResults", @@ -2479,7 +2493,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the previous response was incomplete (because there is more data to retrieve), Amazon Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of unsafe content labels.

" + "documentation":"

If the previous response was incomplete (because there is more data to retrieve), Amazon Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of content moderation labels.

" }, "SortBy":{ "shape":"ContentModerationSortBy", @@ -2492,7 +2506,7 @@ "members":{ "JobStatus":{ "shape":"VideoJobStatus", - "documentation":"

The current status of the unsafe content analysis job.

" + "documentation":"

The current status of the content moderation analysis job.

" }, "StatusMessage":{ "shape":"StatusMessage", @@ -2504,15 +2518,15 @@ }, "ModerationLabels":{ "shape":"ContentModerationDetections", - "documentation":"

The detected unsafe content labels and the time(s) they were detected.

" + "documentation":"

The detected inappropriate, unwanted, or offensive content moderation labels and the time(s) they were detected.

" }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of unsafe content labels.

" + "documentation":"

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of content moderation labels.

" }, "ModerationModelVersion":{ "shape":"String", - "documentation":"

Version number of the moderation detection model that was used to detect unsafe content.

" + "documentation":"

Version number of the moderation detection model that was used to detect inappropriate, unwanted, or offensive content.

" } } }, @@ -3339,10 +3353,20 @@ "type":"integer", "min":1 }, + "MaxPixelThreshold":{ + "type":"float", + "max":1, + "min":0 + }, "MaxResults":{ "type":"integer", "min":1 }, + "MinCoveragePercentage":{ + "type":"float", + "max":100, + "min":0 + }, "ModerationLabel":{ "type":"structure", "members":{ @@ -3359,7 +3383,7 @@ "documentation":"

The name for the parent label. Labels at the top level of the hierarchy have the parent label \"\".

" } }, - "documentation":"

Provides information about a single type of unsafe content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

" + "documentation":"

Provides information about a single type of inappropriate, unwanted, or offensive content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Content moderation in the Amazon Rekognition Developer Guide.

" }, "ModerationLabels":{ "type":"list", @@ -3409,7 +3433,7 @@ "documentation":"

The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.

" } }, - "documentation":"

The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status of a video analysis operation. For more information, see api-video.

" + "documentation":"

The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status of a video analysis operation. For more information, see api-video. Note that the Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy to access the topic. For more information, see Giving access to multiple Amazon SNS topics.

" }, "OrientationCorrection":{ "type":"string", @@ -4088,6 +4112,18 @@ "ShotSegment":{ "shape":"ShotSegment", "documentation":"

If the segment is a shot detection, contains information about the shot detection.

" + }, + "StartFrameNumber":{ + "shape":"ULong", + "documentation":"

The frame number of the start of a video segment, using a frame index that starts with 0.

" + }, + "EndFrameNumber":{ + "shape":"ULong", + "documentation":"

The frame number at the end of a video segment, using a frame index that starts with 0.

" + }, + "DurationFrames":{ + "shape":"ULong", + "documentation":"

The duration of a video segment, expressed in frames.

" } }, "documentation":"

A technical cue or shot detection segment detected in a video. An array of SegmentDetection objects containing all segments detected in a stored video is returned by GetSegmentDetection.

" @@ -4175,7 +4211,7 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the celebrity recognition analysis to.

" + "documentation":"

The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the celebrity recognition analysis to. The Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy.

" }, "JobTag":{ "shape":"JobTag", @@ -4198,7 +4234,7 @@ "members":{ "Video":{ "shape":"Video", - "documentation":"

The video in which you want to detect unsafe content. The video must be stored in an Amazon S3 bucket.

" + "documentation":"

The video in which you want to detect inappropriate, unwanted, or offensive content. The video must be stored in an Amazon S3 bucket.

" }, "MinConfidence":{ "shape":"Percent", @@ -4210,7 +4246,7 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the unsafe content analysis to.

" + "documentation":"

The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the content analysis to. The Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy to access the topic.

" }, "JobTag":{ "shape":"JobTag", @@ -4223,7 +4259,7 @@ "members":{ "JobId":{ "shape":"JobId", - "documentation":"

The identifier for the unsafe content analysis job. Use JobId to identify the job in a subsequent call to GetContentModeration.

" + "documentation":"

The identifier for the content analysis job. Use JobId to identify the job in a subsequent call to GetContentModeration.

" } } }, @@ -4241,7 +4277,7 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the face detection operation.

" + "documentation":"

The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the face detection operation. The Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy.

" }, "FaceAttributes":{ "shape":"FaceAttributes", @@ -4287,7 +4323,7 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the search.

" + "documentation":"

The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the search. The Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy to access the topic.

" }, "JobTag":{ "shape":"JobTag", @@ -4322,7 +4358,7 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the completion status of the label detection operation to.

" + "documentation":"

The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the completion status of the label detection operation to. The Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy.

" }, "JobTag":{ "shape":"JobTag", @@ -4353,7 +4389,7 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the completion status of the people detection operation to.

" + "documentation":"

The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the completion status of the people detection operation to. The Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy.

" }, "JobTag":{ "shape":"JobTag", @@ -4424,7 +4460,7 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the segment detection operation.

" + "documentation":"

The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the segment detection operation. Note that the Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy to access the topic.

" }, "JobTag":{ "shape":"JobTag", @@ -4480,6 +4516,10 @@ "MinSegmentConfidence":{ "shape":"SegmentConfidence", "documentation":"

Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected segment. Confidence represents how certain Amazon Rekognition is that a segment is correctly identified. 0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't return any segments with a confidence level lower than this specified value.

If you don't specify MinSegmentConfidence, GetSegmentDetection returns segments with confidence values greater than or equal to 50 percent.

" + }, + "BlackFrame":{ + "shape":"BlackFrame", + "documentation":"

A filter that allows you to control the black frame detection by specifying the black levels and pixel coverage of black pixels in a frame. Videos can come from multiple sources, formats, and time periods, with different standards and varying noise levels for black frames that need to be accounted for.

" } }, "documentation":"

Filters for the technical segments returned by GetSegmentDetection. For more information, see StartSegmentDetectionFilters.

" @@ -4718,7 +4758,11 @@ "enum":[ "ColorBars", "EndCredits", - "BlackFrames" + "BlackFrames", + "OpeningCredits", + "StudioLogo", + "Slate", + "Content" ] }, "TestingData":{ @@ -4935,6 +4979,13 @@ }, "documentation":"

Video file stored in an Amazon S3 bucket. Amazon Rekognition video start operations such as StartLabelDetection use Video to specify a video for analysis. The supported file formats are .mp4, .mov and .avi.

" }, + "VideoColorRange":{ + "type":"string", + "enum":[ + "FULL", + "LIMITED" + ] + }, "VideoJobStatus":{ "type":"string", "enum":[ @@ -4969,6 +5020,10 @@ "FrameWidth":{ "shape":"ULong", "documentation":"

Horizontal pixel dimension of the video.

" + }, + "ColorRange":{ + "shape":"VideoColorRange", + "documentation":"

A description of the range of luminance values in a video, either LIMITED (16 to 235) or FULL (0 to 255).

" } }, "documentation":"

Information about a video that Amazon Rekognition analyzed. Videometadata is returned in every page of paginated responses from a Amazon Rekognition video operation.

" diff --git a/services/resourcegroups/pom.xml b/services/resourcegroups/pom.xml index e14afcea640e..99349a7566a9 100644 --- a/services/resourcegroups/pom.xml +++ b/services/resourcegroups/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 resourcegroups diff --git a/services/resourcegroupstaggingapi/pom.xml b/services/resourcegroupstaggingapi/pom.xml index 62881cb69f26..4c329dd8f944 100644 --- a/services/resourcegroupstaggingapi/pom.xml +++ b/services/resourcegroupstaggingapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT resourcegroupstaggingapi AWS Java SDK :: Services :: AWS Resource Groups Tagging API diff --git a/services/robomaker/pom.xml b/services/robomaker/pom.xml index a3e54f37f401..e7fc232a5b63 100644 --- a/services/robomaker/pom.xml +++ b/services/robomaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT robomaker AWS Java SDK :: Services :: RoboMaker diff --git a/services/robomaker/src/main/resources/codegen-resources/service-2.json b/services/robomaker/src/main/resources/codegen-resources/service-2.json index a937626ef7fe..deeef7245472 100644 --- a/services/robomaker/src/main/resources/codegen-resources/service-2.json +++ b/services/robomaker/src/main/resources/codegen-resources/service-2.json @@ -2779,6 +2779,10 @@ "tags":{ "shape":"TagMap", "documentation":"

A map that contains tag keys and tag values that are attached to the world.

" + }, + "worldDescriptionBody":{ + "shape":"Json", + "documentation":"

Returns the JSON formatted string that describes the contents of your world.

" } } }, @@ -2818,6 +2822,10 @@ "tags":{ "shape":"TagMap", "documentation":"

A map that contains tag keys and tag values that are attached to the world template.

" + }, + "version":{ + "shape":"GenericString", + "documentation":"

The version of the world template that you're using.

" } } }, @@ -4670,6 +4678,10 @@ "name":{ "shape":"TemplateName", "documentation":"

The name of the template.

" + }, + "version":{ + "shape":"GenericString", + "documentation":"

The version of the template that you're using.

" } }, "documentation":"

Summary information for a template.

" diff --git a/services/route53/pom.xml b/services/route53/pom.xml index 7f18abb389f8..b26936396436 100644 --- a/services/route53/pom.xml +++ b/services/route53/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT route53 AWS Java SDK :: Services :: Amazon Route53 diff --git a/services/route53/src/main/resources/codegen-resources/service-2.json b/services/route53/src/main/resources/codegen-resources/service-2.json index d62154bcb3a3..e740e45c3145 100644 --- a/services/route53/src/main/resources/codegen-resources/service-2.json +++ b/services/route53/src/main/resources/codegen-resources/service-2.json @@ -25,7 +25,8 @@ {"shape":"NoSuchKeySigningKey"}, {"shape":"InvalidKeySigningKeyStatus"}, {"shape":"InvalidSigningStatus"}, - {"shape":"InvalidKMSArn"} + {"shape":"InvalidKMSArn"}, + {"shape":"InvalidInput"} ], "documentation":"

Activates a key-signing key (KSK) so that it can be used for signing by DNSSEC. This operation changes the KSK status to ACTIVE.

" }, @@ -51,7 +52,7 @@ {"shape":"LimitsExceeded"}, {"shape":"PriorRequestNotComplete"} ], - "documentation":"

Associates an Amazon VPC with a private hosted zone.

To perform the association, the VPC and the private hosted zone must already exist. You can't convert a public hosted zone into a private hosted zone.

If you want to associate a VPC that was created by using one AWS account with a private hosted zone that was created by using a different account, the AWS account that created the private hosted zone must first submit a CreateVPCAssociationAuthorization request. Then the account that created the VPC must submit an AssociateVPCWithHostedZone request.

" + "documentation":"

Associates an Amazon VPC with a private hosted zone.

To perform the association, the VPC and the private hosted zone must already exist. You can't convert a public hosted zone into a private hosted zone.

If you want to associate a VPC that was created by using one account with a private hosted zone that was created by using a different account, the account that created the private hosted zone must first submit a CreateVPCAssociationAuthorization request. Then the account that created the VPC must submit an AssociateVPCWithHostedZone request.

" }, "ChangeResourceRecordSets":{ "name":"ChangeResourceRecordSets", @@ -72,7 +73,7 @@ {"shape":"InvalidInput"}, {"shape":"PriorRequestNotComplete"} ], - "documentation":"

Creates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.

Deleting Resource Record Sets

To delete a resource record set, you must specify all the same values that you specified when you created it.

Change Batches and Transactional Changes

The request body must include a document with a ChangeResourceRecordSetsRequest element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. Route 53 validates the changes in the request and then either makes all or none of the changes in the change batch request. This ensures that DNS routing isn't adversely affected by partial changes to the resource record sets in a hosted zone.

For example, suppose a change batch request contains two changes: it deletes the CNAME resource record set for www.example.com and creates an alias resource record set for www.example.com. If validation for both records succeeds, Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If validation for either the DELETE or the CREATE action fails, then the request is canceled, and the original CNAME record continues to exist.

If you try to delete the same resource record set more than once in a single change batch, Route 53 returns an InvalidChangeBatch error.

Traffic Flow

To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide.

Create, Delete, and Upsert

Use ChangeResourceRecordsSetsRequest to perform the following actions:

  • CREATE: Creates a resource record set that has the specified values.

  • DELETE: Deletes an existing resource record set that has the specified values.

  • UPSERT: If a resource record set does not already exist, AWS creates it. If a resource set does exist, Route 53 updates it with the values in the request.

Syntaxes for Creating, Updating, and Deleting Resource Record Sets

The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.

For an example for each type of resource record set, see \"Examples.\"

Don't refer to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets.

Change Propagation to Route 53 DNS Servers

When you submit a ChangeResourceRecordSets request, Route 53 propagates your changes to all of the Route 53 authoritative DNS servers. While your changes are propagating, GetChange returns a status of PENDING. When propagation is complete, GetChange returns a status of INSYNC. Changes generally propagate to all Route 53 name servers within 60 seconds. For more information, see GetChange.

Limits on ChangeResourceRecordSets Requests

For information about the limits on a ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide.

" + "documentation":"

Creates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.

Deleting Resource Record Sets

To delete a resource record set, you must specify all the same values that you specified when you created it.

Change Batches and Transactional Changes

The request body must include a document with a ChangeResourceRecordSetsRequest element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. Route 53 validates the changes in the request and then either makes all or none of the changes in the change batch request. This ensures that DNS routing isn't adversely affected by partial changes to the resource record sets in a hosted zone.

For example, suppose a change batch request contains two changes: it deletes the CNAME resource record set for www.example.com and creates an alias resource record set for www.example.com. If validation for both records succeeds, Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If validation for either the DELETE or the CREATE action fails, then the request is canceled, and the original CNAME record continues to exist.

If you try to delete the same resource record set more than once in a single change batch, Route 53 returns an InvalidChangeBatch error.

Traffic Flow

To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide.

Create, Delete, and Upsert

Use ChangeResourceRecordsSetsRequest to perform the following actions:

  • CREATE: Creates a resource record set that has the specified values.

  • DELETE: Deletes an existing resource record set that has the specified values.

  • UPSERT: If a resource record set does not already exist, Amazon Web Services creates it. If a resource set does exist, Route 53 updates it with the values in the request.

Syntaxes for Creating, Updating, and Deleting Resource Record Sets

The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.

For an example for each type of resource record set, see \"Examples.\"

Don't refer to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets.

Change Propagation to Route 53 DNS Servers

When you submit a ChangeResourceRecordSets request, Route 53 propagates your changes to all of the Route 53 authoritative DNS servers. While your changes are propagating, GetChange returns a status of PENDING. When propagation is complete, GetChange returns a status of INSYNC. Changes generally propagate to all Route 53 name servers within 60 seconds. For more information, see GetChange.

Limits on ChangeResourceRecordSets Requests

For information about the limits on a ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide.

" }, "ChangeTagsForResource":{ "name":"ChangeTagsForResource", @@ -93,7 +94,7 @@ {"shape":"PriorRequestNotComplete"}, {"shape":"ThrottlingException"} ], - "documentation":"

Adds, edits, or deletes tags for a health check or a hosted zone.

For information about using tags for cost allocation, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + "documentation":"

Adds, edits, or deletes tags for a health check or a hosted zone.

For information about using tags for cost allocation, see Using Cost Allocation Tags in the Billing and Cost Management User Guide.

" }, "CreateHealthCheck":{ "name":"CreateHealthCheck", @@ -189,7 +190,7 @@ {"shape":"QueryLoggingConfigAlreadyExists"}, {"shape":"InsufficientCloudWatchLogsResourcePolicy"} ], - "documentation":"

Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.

DNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:

  • Route 53 edge location that responded to the DNS query

  • Domain or subdomain that was requested

  • DNS record type, such as A or AAAA

  • DNS response code, such as NoError or ServFail

Log Group and Resource Policy

Before you create a query logging configuration, perform the following operations.

If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically.

  1. Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:

    • You must create the log group in the us-east-1 region.

    • You must use the same AWS account to create the log group and the hosted zone that you want to configure query logging for.

    • When you create log groups for query logging, we recommend that you use a consistent prefix, for example:

      /aws/route53/hosted zone name

      In the next step, you'll create a resource policy, which controls access to one or more log groups and the associated AWS resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.

  2. Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of Resource, specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with *, for example:

    arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*

    You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the AWS SDKs, or the AWS CLI.

Log Streams and Edge Locations

When Route 53 finishes creating the configuration for DNS query logging, it does the following:

  • Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.

  • Begins to send query logs to the applicable log stream.

The name of each log stream is in the following format:

hosted zone ID/edge location code

The edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the Route 53 Product Details page.

Queries That Are Logged

Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see Routing Internet Traffic to Your Website or Web Application in the Amazon Route 53 Developer Guide.

Log File Format

For a list of the values in each query log and the format of each value, see Logging DNS Queries in the Amazon Route 53 Developer Guide.

Pricing

For information about charges for query logs, see Amazon CloudWatch Pricing.

How to Stop Logging

If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see DeleteQueryLoggingConfig.

" + "documentation":"

Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.

DNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:

  • Route 53 edge location that responded to the DNS query

  • Domain or subdomain that was requested

  • DNS record type, such as A or AAAA

  • DNS response code, such as NoError or ServFail

Log Group and Resource Policy

Before you create a query logging configuration, perform the following operations.

If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically.

  1. Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:

    • You must create the log group in the us-east-1 region.

    • You must use the same account to create the log group and the hosted zone that you want to configure query logging for.

    • When you create log groups for query logging, we recommend that you use a consistent prefix, for example:

      /aws/route53/hosted zone name

      In the next step, you'll create a resource policy, which controls access to one or more log groups and the associated Amazon Web Services resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.

  2. Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of Resource, specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with *, for example:

    arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*

    You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the Amazon Web Services SDKs, or the CLI.

Log Streams and Edge Locations

When Route 53 finishes creating the configuration for DNS query logging, it does the following:

  • Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.

  • Begins to send query logs to the applicable log stream.

The name of each log stream is in the following format:

hosted zone ID/edge location code

The edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the Route 53 Product Details page.

Queries That Are Logged

Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see Routing Internet Traffic to Your Website or Web Application in the Amazon Route 53 Developer Guide.

Log File Format

For a list of the values in each query log and the format of each value, see Logging DNS Queries in the Amazon Route 53 Developer Guide.

Pricing

For information about charges for query logs, see Amazon CloudWatch Pricing.

How to Stop Logging

If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see DeleteQueryLoggingConfig.

" }, "CreateReusableDelegationSet":{ "name":"CreateReusableDelegationSet", @@ -213,7 +214,7 @@ {"shape":"DelegationSetNotAvailable"}, {"shape":"DelegationSetAlreadyReusable"} ], - "documentation":"

Creates a delegation set (a group of four name servers) that can be reused by multiple hosted zones that were created by the same AWS account.

You can also create a reusable delegation set that uses the four name servers that are associated with an existing hosted zone. Specify the hosted zone ID in the CreateReusableDelegationSet request.

You can't associate a reusable delegation set with a private hosted zone.

For information about using a reusable delegation set to configure white label name servers, see Configuring White Label Name Servers.

The process for migrating existing hosted zones to use a reusable delegation set is comparable to the process for configuring white label name servers. You need to perform the following steps:

  1. Create a reusable delegation set.

  2. Recreate hosted zones, and reduce the TTL to 60 seconds or less.

  3. Recreate resource record sets in the new hosted zones.

  4. Change the registrar's name servers to use the name servers for the new hosted zones.

  5. Monitor traffic for the website or application.

  6. Change TTLs back to their original values.

If you want to migrate existing hosted zones to use a reusable delegation set, the existing hosted zones can't use any of the name servers that are assigned to the reusable delegation set. If one or more hosted zones do use one or more name servers that are assigned to the reusable delegation set, you can do one of the following:

  • For small numbers of hosted zones—up to a few hundred—it's relatively easy to create reusable delegation sets until you get one that has four name servers that don't overlap with any of the name servers in your hosted zones.

  • For larger numbers of hosted zones, the easiest solution is to use more than one reusable delegation set.

  • For larger numbers of hosted zones, you can also migrate hosted zones that have overlapping name servers to hosted zones that don't have overlapping name servers, then migrate the hosted zones again to use the reusable delegation set.

" + "documentation":"

Creates a delegation set (a group of four name servers) that can be reused by multiple hosted zones that were created by the same account.

You can also create a reusable delegation set that uses the four name servers that are associated with an existing hosted zone. Specify the hosted zone ID in the CreateReusableDelegationSet request.

You can't associate a reusable delegation set with a private hosted zone.

For information about using a reusable delegation set to configure white label name servers, see Configuring White Label Name Servers.

The process for migrating existing hosted zones to use a reusable delegation set is comparable to the process for configuring white label name servers. You need to perform the following steps:

  1. Create a reusable delegation set.

  2. Recreate hosted zones, and reduce the TTL to 60 seconds or less.

  3. Recreate resource record sets in the new hosted zones.

  4. Change the registrar's name servers to use the name servers for the new hosted zones.

  5. Monitor traffic for the website or application.

  6. Change TTLs back to their original values.

If you want to migrate existing hosted zones to use a reusable delegation set, the existing hosted zones can't use any of the name servers that are assigned to the reusable delegation set. If one or more hosted zones do use one or more name servers that are assigned to the reusable delegation set, you can do one of the following:

  • For small numbers of hosted zones—up to a few hundred—it's relatively easy to create reusable delegation sets until you get one that has four name servers that don't overlap with any of the name servers in your hosted zones.

  • For larger numbers of hosted zones, the easiest solution is to use more than one reusable delegation set.

  • For larger numbers of hosted zones, you can also migrate hosted zones that have overlapping name servers to hosted zones that don't have overlapping name servers, then migrate the hosted zones again to use the reusable delegation set.

" }, "CreateTrafficPolicy":{ "name":"CreateTrafficPolicy", @@ -299,7 +300,7 @@ {"shape":"InvalidVPCId"}, {"shape":"InvalidInput"} ], - "documentation":"

Authorizes the AWS account that created a specified VPC to submit an AssociateVPCWithHostedZone request to associate the VPC with a specified hosted zone that was created by a different account. To submit a CreateVPCAssociationAuthorization request, you must use the account that created the hosted zone. After you authorize the association, use the account that created the VPC to submit an AssociateVPCWithHostedZone request.

If you want to associate multiple VPCs that you created by using one account with a hosted zone that you created by using a different account, you must submit one authorization request for each VPC.

" + "documentation":"

Authorizes the account that created a specified VPC to submit an AssociateVPCWithHostedZone request to associate the VPC with a specified hosted zone that was created by a different account. To submit a CreateVPCAssociationAuthorization request, you must use the account that created the hosted zone. After you authorize the association, use the account that created the VPC to submit an AssociateVPCWithHostedZone request.

If you want to associate multiple VPCs that you created by using one account with a hosted zone that you created by using a different account, you must submit one authorization request for each VPC.

" }, "DeactivateKeySigningKey":{ "name":"DeactivateKeySigningKey", @@ -315,7 +316,8 @@ {"shape":"InvalidKeySigningKeyStatus"}, {"shape":"InvalidSigningStatus"}, {"shape":"KeySigningKeyInUse"}, - {"shape":"KeySigningKeyInParentDSRecord"} + {"shape":"KeySigningKeyInParentDSRecord"}, + {"shape":"InvalidInput"} ], "documentation":"

Deactivates a key-signing key (KSK) so that it will not be used for signing by DNSSEC. This operation changes the KSK status to INACTIVE.

" }, @@ -332,7 +334,7 @@ {"shape":"HealthCheckInUse"}, {"shape":"InvalidInput"} ], - "documentation":"

Deletes a health check.

Amazon Route 53 does not prevent you from deleting a health check even if the health check is associated with one or more resource record sets. If you delete a health check and you don't update the associated resource record sets, the future status of the health check can't be predicted and may change. This will affect the routing of DNS queries for your DNS failover configuration. For more information, see Replacing and Deleting Health Checks in the Amazon Route 53 Developer Guide.

If you're using AWS Cloud Map and you configured Cloud Map to create a Route 53 health check when you register an instance, you can't use the Route 53 DeleteHealthCheck command to delete the health check. The health check is deleted automatically when you deregister the instance; there can be a delay of several hours before the health check is deleted from Route 53.

" + "documentation":"

Deletes a health check.

Amazon Route 53 does not prevent you from deleting a health check even if the health check is associated with one or more resource record sets. If you delete a health check and you don't update the associated resource record sets, the future status of the health check can't be predicted and may change. This will affect the routing of DNS queries for your DNS failover configuration. For more information, see Replacing and Deleting Health Checks in the Amazon Route 53 Developer Guide.

If you're using Cloud Map and you configured Cloud Map to create a Route 53 health check when you register an instance, you can't use the Route 53 DeleteHealthCheck command to delete the health check. The health check is deleted automatically when you deregister the instance; there can be a delay of several hours before the health check is deleted from Route 53.

" }, "DeleteHostedZone":{ "name":"DeleteHostedZone", @@ -349,7 +351,7 @@ {"shape":"InvalidInput"}, {"shape":"InvalidDomainName"} ], - "documentation":"

Deletes a hosted zone.

If the hosted zone was created by another service, such as AWS Cloud Map, see Deleting Public Hosted Zones That Were Created by Another Service in the Amazon Route 53 Developer Guide for information about how to delete it. (The process is the same for public and private hosted zones that were created by another service.)

If you want to keep your domain registration but you want to stop routing internet traffic to your website or web application, we recommend that you delete resource record sets in the hosted zone instead of deleting the hosted zone.

If you delete a hosted zone, you can't undelete it. You must create a new hosted zone and update the name servers for your domain registration, which can require up to 48 hours to take effect. (If you delegated responsibility for a subdomain to a hosted zone and you delete the child hosted zone, you must update the name servers in the parent hosted zone.) In addition, if you delete a hosted zone, someone could hijack the domain and route traffic to their own resources using your domain name.

If you want to avoid the monthly charge for the hosted zone, you can transfer DNS service for the domain to a free DNS service. When you transfer DNS service, you have to update the name servers for the domain registration. If the domain is registered with Route 53, see UpdateDomainNameservers for information about how to replace Route 53 name servers with name servers for the new DNS service. If the domain is registered with another registrar, use the method provided by the registrar to update name servers for the domain registration. For more information, perform an internet search on \"free DNS service.\"

You can delete a hosted zone only if it contains only the default SOA record and NS resource record sets. If the hosted zone contains other resource record sets, you must delete them before you can delete the hosted zone. If you try to delete a hosted zone that contains other resource record sets, the request fails, and Route 53 returns a HostedZoneNotEmpty error. For information about deleting records from your hosted zone, see ChangeResourceRecordSets.

To verify that the hosted zone has been deleted, do one of the following:

  • Use the GetHostedZone action to request information about the hosted zone.

  • Use the ListHostedZones action to get a list of the hosted zones associated with the current AWS account.

" + "documentation":"

Deletes a hosted zone.

If the hosted zone was created by another service, such as Cloud Map, see Deleting Public Hosted Zones That Were Created by Another Service in the Amazon Route 53 Developer Guide for information about how to delete it. (The process is the same for public and private hosted zones that were created by another service.)

If you want to keep your domain registration but you want to stop routing internet traffic to your website or web application, we recommend that you delete resource record sets in the hosted zone instead of deleting the hosted zone.

If you delete a hosted zone, you can't undelete it. You must create a new hosted zone and update the name servers for your domain registration, which can require up to 48 hours to take effect. (If you delegated responsibility for a subdomain to a hosted zone and you delete the child hosted zone, you must update the name servers in the parent hosted zone.) In addition, if you delete a hosted zone, someone could hijack the domain and route traffic to their own resources using your domain name.

If you want to avoid the monthly charge for the hosted zone, you can transfer DNS service for the domain to a free DNS service. When you transfer DNS service, you have to update the name servers for the domain registration. If the domain is registered with Route 53, see UpdateDomainNameservers for information about how to replace Route 53 name servers with name servers for the new DNS service. If the domain is registered with another registrar, use the method provided by the registrar to update name servers for the domain registration. For more information, perform an internet search on \"free DNS service.\"

You can delete a hosted zone only if it contains only the default SOA record and NS resource record sets. If the hosted zone contains other resource record sets, you must delete them before you can delete the hosted zone. If you try to delete a hosted zone that contains other resource record sets, the request fails, and Route 53 returns a HostedZoneNotEmpty error. For information about deleting records from your hosted zone, see ChangeResourceRecordSets.

To verify that the hosted zone has been deleted, do one of the following:

  • Use the GetHostedZone action to request information about the hosted zone.

  • Use the ListHostedZones action to get a list of the hosted zones associated with the current account.

" }, "DeleteKeySigningKey":{ "name":"DeleteKeySigningKey", @@ -364,7 +366,8 @@ {"shape":"NoSuchKeySigningKey"}, {"shape":"InvalidKeySigningKeyStatus"}, {"shape":"InvalidSigningStatus"}, - {"shape":"InvalidKMSArn"} + {"shape":"InvalidKMSArn"}, + {"shape":"InvalidInput"} ], "documentation":"

Deletes a key-signing key (KSK). Before you can delete a KSK, you must deactivate it. The KSK must be deactivated before you can delete it regardless of whether the hosted zone is enabled for DNSSEC signing.

" }, @@ -449,7 +452,7 @@ {"shape":"InvalidVPCId"}, {"shape":"InvalidInput"} ], - "documentation":"

Removes authorization to submit an AssociateVPCWithHostedZone request to associate a specified VPC with a hosted zone that was created by a different account. You must use the account that created the hosted zone to submit a DeleteVPCAssociationAuthorization request.

Sending this request only prevents the AWS account that created the VPC from associating the VPC with the Amazon Route 53 hosted zone in the future. If the VPC is already associated with the hosted zone, DeleteVPCAssociationAuthorization won't disassociate the VPC from the hosted zone. If you want to delete an existing association, use DisassociateVPCFromHostedZone.

" + "documentation":"

Removes authorization to submit an AssociateVPCWithHostedZone request to associate a specified VPC with a hosted zone that was created by a different account. You must use the account that created the hosted zone to submit a DeleteVPCAssociationAuthorization request.

Sending this request only prevents the account that created the VPC from associating the VPC with the Amazon Route 53 hosted zone in the future. If the VPC is already associated with the hosted zone, DeleteVPCAssociationAuthorization won't disassociate the VPC from the hosted zone. If you want to delete an existing association, use DisassociateVPCFromHostedZone.

" }, "DisableHostedZoneDNSSEC":{ "name":"DisableHostedZoneDNSSEC", @@ -466,7 +469,8 @@ {"shape":"KeySigningKeyInParentDSRecord"}, {"shape":"DNSSECNotFound"}, {"shape":"InvalidKeySigningKeyStatus"}, - {"shape":"InvalidKMSArn"} + {"shape":"InvalidKMSArn"}, + {"shape":"InvalidInput"} ], "documentation":"

Disables DNSSEC signing in a specific hosted zone. This action does not deactivate any key-signing keys (KSKs) that are active in the hosted zone.

" }, @@ -489,7 +493,7 @@ {"shape":"LastVPCAssociation"}, {"shape":"InvalidInput"} ], - "documentation":"

Disassociates an Amazon Virtual Private Cloud (Amazon VPC) from an Amazon Route 53 private hosted zone. Note the following:

  • You can't disassociate the last Amazon VPC from a private hosted zone.

  • You can't convert a private hosted zone into a public hosted zone.

  • You can submit a DisassociateVPCFromHostedZone request using either the account that created the hosted zone or the account that created the Amazon VPC.

  • Some services, such as AWS Cloud Map and Amazon Elastic File System (Amazon EFS) automatically create hosted zones and associate VPCs with the hosted zones. A service can create a hosted zone using your account or using its own account. You can disassociate a VPC from a hosted zone only if the service created the hosted zone using your account.

    When you run DisassociateVPCFromHostedZone, if the hosted zone has a value for OwningAccount, you can use DisassociateVPCFromHostedZone. If the hosted zone has a value for OwningService, you can't use DisassociateVPCFromHostedZone.

" + "documentation":"

Disassociates an Amazon Virtual Private Cloud (Amazon VPC) from an Amazon Route 53 private hosted zone. Note the following:

  • You can't disassociate the last Amazon VPC from a private hosted zone.

  • You can't convert a private hosted zone into a public hosted zone.

  • You can submit a DisassociateVPCFromHostedZone request using either the account that created the hosted zone or the account that created the Amazon VPC.

  • Some services, such as Cloud Map and Amazon Elastic File System (Amazon EFS) automatically create hosted zones and associate VPCs with the hosted zones. A service can create a hosted zone using your account or using its own account. You can disassociate a VPC from a hosted zone only if the service created the hosted zone using your account.

    When you run DisassociateVPCFromHostedZone, if the hosted zone has a value for OwningAccount, you can use DisassociateVPCFromHostedZone. If the hosted zone has a value for OwningService, you can't use DisassociateVPCFromHostedZone.

" }, "EnableHostedZoneDNSSEC":{ "name":"EnableHostedZoneDNSSEC", @@ -507,7 +511,8 @@ {"shape":"InvalidKMSArn"}, {"shape":"HostedZonePartiallyDelegated"}, {"shape":"DNSSECNotFound"}, - {"shape":"InvalidKeySigningKeyStatus"} + {"shape":"InvalidKeySigningKeyStatus"}, + {"shape":"InvalidInput"} ], "documentation":"

Enables DNSSEC signing in a specific hosted zone.

" }, @@ -522,7 +527,7 @@ "errors":[ {"shape":"InvalidInput"} ], - "documentation":"

Gets the specified limit for the current account, for example, the maximum number of health checks that you can create using the account.

For the default limit, see Limits in the Amazon Route 53 Developer Guide. To request a higher limit, open a case.

You can also view account limits in AWS Trusted Advisor. Sign in to the AWS Management Console and open the Trusted Advisor console at https://console.aws.amazon.com/trustedadvisor/. Then choose Service limits in the navigation pane.

" + "documentation":"

Gets the specified limit for the current account, for example, the maximum number of health checks that you can create using the account.

For the default limit, see Limits in the Amazon Route 53 Developer Guide. To request a higher limit, open a case.

You can also view account limits in Amazon Web Services Trusted Advisor. Sign in to the Management Console and open the Trusted Advisor console at https://console.aws.amazon.com/trustedadvisor/. Then choose Service limits in the navigation pane.

" }, "GetChange":{ "name":"GetChange", @@ -546,7 +551,7 @@ }, "input":{"shape":"GetCheckerIpRangesRequest"}, "output":{"shape":"GetCheckerIpRangesResponse"}, - "documentation":"

Route 53 does not perform authorization for this API because it retrieves information that is already available to the public.

GetCheckerIpRanges still works, but we recommend that you download ip-ranges.json, which includes IP address ranges for all AWS services. For more information, see IP Address Ranges of Amazon Route 53 Servers in the Amazon Route 53 Developer Guide.

" + "documentation":"

Route 53 does not perform authorization for this API because it retrieves information that is already available to the public.

GetCheckerIpRanges still works, but we recommend that you download ip-ranges.json, which includes IP address ranges for all Amazon Web Services services. For more information, see IP Address Ranges of Amazon Route 53 Servers in the Amazon Route 53 Developer Guide.

" }, "GetDNSSEC":{ "name":"GetDNSSEC", @@ -558,7 +563,8 @@ "output":{"shape":"GetDNSSECResponse"}, "errors":[ {"shape":"NoSuchHostedZone"}, - {"shape":"InvalidArgument"} + {"shape":"InvalidArgument"}, + {"shape":"InvalidInput"} ], "documentation":"

Returns information about DNSSEC for a specific hosted zone, including the key-signing keys (KSKs) in the hosted zone.

" }, @@ -599,7 +605,7 @@ }, "input":{"shape":"GetHealthCheckCountRequest"}, "output":{"shape":"GetHealthCheckCountResponse"}, - "documentation":"

Retrieves the number of health checks that are associated with the current AWS account.

" + "documentation":"

Retrieves the number of health checks that are associated with the current account.

" }, "GetHealthCheckLastFailureReason":{ "name":"GetHealthCheckLastFailureReason", @@ -627,7 +633,7 @@ {"shape":"NoSuchHealthCheck"}, {"shape":"InvalidInput"} ], - "documentation":"

Gets status of a specified health check.

" + "documentation":"

Gets status of a specified health check.

This API is intended for use during development to diagnose behavior. It doesn’t support production use-cases with high query rates that require immediate and actionable responses.

" }, "GetHostedZone":{ "name":"GetHostedZone", @@ -654,7 +660,7 @@ "errors":[ {"shape":"InvalidInput"} ], - "documentation":"

Retrieves the number of hosted zones that are associated with the current AWS account.

" + "documentation":"

Retrieves the number of hosted zones that are associated with the current account.

" }, "GetHostedZoneLimit":{ "name":"GetHostedZoneLimit", @@ -750,7 +756,7 @@ }, "input":{"shape":"GetTrafficPolicyInstanceCountRequest"}, "output":{"shape":"GetTrafficPolicyInstanceCountResponse"}, - "documentation":"

Gets the number of traffic policy instances that are associated with the current AWS account.

" + "documentation":"

Gets the number of traffic policy instances that are associated with the current account.

" }, "ListGeoLocations":{ "name":"ListGeoLocations", @@ -777,7 +783,7 @@ {"shape":"InvalidInput"}, {"shape":"IncompatibleVersion"} ], - "documentation":"

Retrieve a list of the health checks that are associated with the current AWS account.

" + "documentation":"

Retrieve a list of the health checks that are associated with the current account.

" }, "ListHostedZones":{ "name":"ListHostedZones", @@ -792,7 +798,7 @@ {"shape":"NoSuchDelegationSet"}, {"shape":"DelegationSetNotReusable"} ], - "documentation":"

Retrieves a list of the public and private hosted zones that are associated with the current AWS account. The response includes a HostedZones child element for each hosted zone.

Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of hosted zones, you can use the maxitems parameter to list them in groups of up to 100.

" + "documentation":"

Retrieves a list of the public and private hosted zones that are associated with the current account. The response includes a HostedZones child element for each hosted zone.

Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of hosted zones, you can use the maxitems parameter to list them in groups of up to 100.

" }, "ListHostedZonesByName":{ "name":"ListHostedZonesByName", @@ -806,7 +812,7 @@ {"shape":"InvalidInput"}, {"shape":"InvalidDomainName"} ], - "documentation":"

Retrieves a list of your hosted zones in lexicographic order. The response includes a HostedZones child element for each hosted zone created by the current AWS account.

ListHostedZonesByName sorts hosted zones by name with the labels reversed. For example:

com.example.www.

Note the trailing dot, which can change the sort order in some circumstances.

If the domain name includes escape characters or Punycode, ListHostedZonesByName alphabetizes the domain name using the escaped or Punycoded value, which is the format that Amazon Route 53 saves in its database. For example, to create a hosted zone for exämple.com, you specify ex\\344mple.com for the domain name. ListHostedZonesByName alphabetizes it as:

com.ex\\344mple.

The labels are reversed and alphabetized using the escaped value. For more information about valid domain name formats, including internationalized domain names, see DNS Domain Name Format in the Amazon Route 53 Developer Guide.

Route 53 returns up to 100 items in each response. If you have a lot of hosted zones, use the MaxItems parameter to list them in groups of up to 100. The response includes values that help navigate from one group of MaxItems hosted zones to the next:

  • The DNSName and HostedZoneId elements in the response contain the values, if any, specified for the dnsname and hostedzoneid parameters in the request that produced the current response.

  • The MaxItems element in the response contains the value, if any, that you specified for the maxitems parameter in the request that produced the current response.

  • If the value of IsTruncated in the response is true, there are more hosted zones associated with the current AWS account.

    If IsTruncated is false, this response includes the last hosted zone that is associated with the current account. The NextDNSName element and NextHostedZoneId elements are omitted from the response.

  • The NextDNSName and NextHostedZoneId elements in the response contain the domain name and the hosted zone ID of the next hosted zone that is associated with the current AWS account. If you want to list more hosted zones, make another call to ListHostedZonesByName, and specify the value of NextDNSName and NextHostedZoneId in the dnsname and hostedzoneid parameters, respectively.

" + "documentation":"

Retrieves a list of your hosted zones in lexicographic order. The response includes a HostedZones child element for each hosted zone created by the current account.

ListHostedZonesByName sorts hosted zones by name with the labels reversed. For example:

com.example.www.

Note the trailing dot, which can change the sort order in some circumstances.

If the domain name includes escape characters or Punycode, ListHostedZonesByName alphabetizes the domain name using the escaped or Punycoded value, which is the format that Amazon Route 53 saves in its database. For example, to create a hosted zone for exämple.com, you specify ex\\344mple.com for the domain name. ListHostedZonesByName alphabetizes it as:

com.ex\\344mple.

The labels are reversed and alphabetized using the escaped value. For more information about valid domain name formats, including internationalized domain names, see DNS Domain Name Format in the Amazon Route 53 Developer Guide.

Route 53 returns up to 100 items in each response. If you have a lot of hosted zones, use the MaxItems parameter to list them in groups of up to 100. The response includes values that help navigate from one group of MaxItems hosted zones to the next:

  • The DNSName and HostedZoneId elements in the response contain the values, if any, specified for the dnsname and hostedzoneid parameters in the request that produced the current response.

  • The MaxItems element in the response contains the value, if any, that you specified for the maxitems parameter in the request that produced the current response.

  • If the value of IsTruncated in the response is true, there are more hosted zones associated with the current account.

    If IsTruncated is false, this response includes the last hosted zone that is associated with the current account. The NextDNSName element and NextHostedZoneId elements are omitted from the response.

  • The NextDNSName and NextHostedZoneId elements in the response contain the domain name and the hosted zone ID of the next hosted zone that is associated with the current account. If you want to list more hosted zones, make another call to ListHostedZonesByName, and specify the value of NextDNSName and NextHostedZoneId in the dnsname and hostedzoneid parameters, respectively.

" }, "ListHostedZonesByVPC":{ "name":"ListHostedZonesByVPC", @@ -820,7 +826,7 @@ {"shape":"InvalidInput"}, {"shape":"InvalidPaginationToken"} ], - "documentation":"

Lists all the private hosted zones that a specified VPC is associated with, regardless of which AWS account or AWS service owns the hosted zones. The HostedZoneOwner structure in the response contains one of the following values:

  • An OwningAccount element, which contains the account number of either the current AWS account or another AWS account. Some services, such as AWS Cloud Map, create hosted zones using the current account.

  • An OwningService element, which identifies the AWS service that created and owns the hosted zone. For example, if a hosted zone was created by Amazon Elastic File System (Amazon EFS), the value of Owner is efs.amazonaws.com.

" + "documentation":"

Lists all the private hosted zones that a specified VPC is associated with, regardless of which account or Amazon Web Services service owns the hosted zones. The HostedZoneOwner structure in the response contains one of the following values:

  • An OwningAccount element, which contains the account number of either the current account or another account. Some services, such as Cloud Map, create hosted zones using the current account.

  • An OwningService element, which identifies the Amazon Web Services service that created and owns the hosted zone. For example, if a hosted zone was created by Amazon Elastic File System (Amazon EFS), the value of Owner is efs.amazonaws.com.

" }, "ListQueryLoggingConfigs":{ "name":"ListQueryLoggingConfigs", @@ -835,7 +841,7 @@ {"shape":"InvalidPaginationToken"}, {"shape":"NoSuchHostedZone"} ], - "documentation":"

Lists the configurations for DNS query logging that are associated with the current AWS account or the configuration that is associated with a specified hosted zone.

For more information about DNS query logs, see CreateQueryLoggingConfig. Additional information, including the format of DNS query logs, appears in Logging DNS Queries in the Amazon Route 53 Developer Guide.

" + "documentation":"

Lists the configurations for DNS query logging that are associated with the current account or the configuration that is associated with a specified hosted zone.

For more information about DNS query logs, see CreateQueryLoggingConfig. Additional information, including the format of DNS query logs, appears in Logging DNS Queries in the Amazon Route 53 Developer Guide.

" }, "ListResourceRecordSets":{ "name":"ListResourceRecordSets", @@ -862,7 +868,7 @@ "errors":[ {"shape":"InvalidInput"} ], - "documentation":"

Retrieves a list of the reusable delegation sets that are associated with the current AWS account.

" + "documentation":"

Retrieves a list of the reusable delegation sets that are associated with the current account.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -879,7 +885,7 @@ {"shape":"PriorRequestNotComplete"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists tags for one health check or hosted zone.

For information about using tags for cost allocation, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + "documentation":"

Lists tags for one health check or hosted zone.

For information about using tags for cost allocation, see Using Cost Allocation Tags in the Billing and Cost Management User Guide.

" }, "ListTagsForResources":{ "name":"ListTagsForResources", @@ -900,7 +906,7 @@ {"shape":"PriorRequestNotComplete"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists tags for up to 10 health checks or hosted zones.

For information about using tags for cost allocation, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + "documentation":"

Lists tags for up to 10 health checks or hosted zones.

For information about using tags for cost allocation, see Using Cost Allocation Tags in the Billing and Cost Management User Guide.

" }, "ListTrafficPolicies":{ "name":"ListTrafficPolicies", @@ -913,7 +919,7 @@ "errors":[ {"shape":"InvalidInput"} ], - "documentation":"

Gets information about the latest version for every traffic policy that is associated with the current AWS account. Policies are listed in the order that they were created in.

For information about how of deleting a traffic policy affects the response from ListTrafficPolicies, see DeleteTrafficPolicy.

" + "documentation":"

Gets information about the latest version for every traffic policy that is associated with the current account. Policies are listed in the order that they were created in.

For information about how of deleting a traffic policy affects the response from ListTrafficPolicies, see DeleteTrafficPolicy.

" }, "ListTrafficPolicyInstances":{ "name":"ListTrafficPolicyInstances", @@ -927,7 +933,7 @@ {"shape":"InvalidInput"}, {"shape":"NoSuchTrafficPolicyInstance"} ], - "documentation":"

Gets information about the traffic policy instances that you created by using the current AWS account.

After you submit an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.

Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policy instances, you can use the MaxItems parameter to list them in groups of up to 100.

" + "documentation":"

Gets information about the traffic policy instances that you created by using the current account.

After you submit an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.

Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policy instances, you can use the MaxItems parameter to list them in groups of up to 100.

" }, "ListTrafficPolicyInstancesByHostedZone":{ "name":"ListTrafficPolicyInstancesByHostedZone", @@ -1172,18 +1178,18 @@ "members":{ "HostedZoneId":{ "shape":"ResourceId", - "documentation":"

Alias resource records sets only: The value used depends on where you want to route traffic:

Amazon API Gateway custom regional APIs and edge-optimized APIs

Specify the hosted zone ID for your API. You can get the applicable value using the AWS CLI command get-domain-names:

  • For regional APIs, specify the value of regionalHostedZoneId.

  • For edge-optimized APIs, specify the value of distributionHostedZoneId.

Amazon Virtual Private Cloud interface VPC endpoint

Specify the hosted zone ID for your interface endpoint. You can get the value of HostedZoneId using the AWS CLI command describe-vpc-endpoints.

CloudFront distribution

Specify Z2FDTNDATAQYW2.

Alias resource record sets for CloudFront can't be created in a private zone.

Elastic Beanstalk environment

Specify the hosted zone ID for the region that you created the environment in. The environment must have a regionalized subdomain. For a list of regions and the corresponding hosted zone IDs, see AWS Elastic Beanstalk endpoints and quotas in the the Amazon Web Services General Reference.

ELB load balancer

Specify the value of the hosted zone ID for the load balancer. Use the following methods to get the hosted zone ID:

  • Elastic Load Balancing endpoints and quotas topic in the Amazon Web Services General Reference: Use the value that corresponds with the region that you created your load balancer in. Note that there are separate columns for Application and Classic Load Balancers and for Network Load Balancers.

  • AWS Management Console: Go to the Amazon EC2 page, choose Load Balancers in the navigation pane, select the load balancer, and get the value of the Hosted zone field on the Description tab.

  • Elastic Load Balancing API: Use DescribeLoadBalancers to get the applicable value. For more information, see the applicable guide:

  • AWS CLI: Use describe-load-balancers to get the applicable value. For more information, see the applicable guide:

AWS Global Accelerator accelerator

Specify Z2BJ6XQ5FK7U4H.

An Amazon S3 bucket configured as a static website

Specify the hosted zone ID for the region that you created the bucket in. For more information about valid values, see the table Amazon S3 Website Endpoints in the Amazon Web Services General Reference.

Another Route 53 resource record set in your hosted zone

Specify the hosted zone ID of your hosted zone. (An alias resource record set can't reference a resource record set in a different hosted zone.)

" + "documentation":"

Alias resource records sets only: The value used depends on where you want to route traffic:

Amazon API Gateway custom regional APIs and edge-optimized APIs

Specify the hosted zone ID for your API. You can get the applicable value using the CLI command get-domain-names:

  • For regional APIs, specify the value of regionalHostedZoneId.

  • For edge-optimized APIs, specify the value of distributionHostedZoneId.

Amazon Virtual Private Cloud interface VPC endpoint

Specify the hosted zone ID for your interface endpoint. You can get the value of HostedZoneId using the CLI command describe-vpc-endpoints.

CloudFront distribution

Specify Z2FDTNDATAQYW2.

Alias resource record sets for CloudFront can't be created in a private zone.

Elastic Beanstalk environment

Specify the hosted zone ID for the region that you created the environment in. The environment must have a regionalized subdomain. For a list of regions and the corresponding hosted zone IDs, see Elastic Beanstalk endpoints and quotas in the the Amazon Web Services General Reference.

ELB load balancer

Specify the value of the hosted zone ID for the load balancer. Use the following methods to get the hosted zone ID:

  • Elastic Load Balancing endpoints and quotas topic in the Amazon Web Services General Reference: Use the value that corresponds with the region that you created your load balancer in. Note that there are separate columns for Application and Classic Load Balancers and for Network Load Balancers.

  • Management Console: Go to the Amazon EC2 page, choose Load Balancers in the navigation pane, select the load balancer, and get the value of the Hosted zone field on the Description tab.

  • Elastic Load Balancing API: Use DescribeLoadBalancers to get the applicable value. For more information, see the applicable guide:

  • CLI: Use describe-load-balancers to get the applicable value. For more information, see the applicable guide:

Global Accelerator accelerator

Specify Z2BJ6XQ5FK7U4H.

An Amazon S3 bucket configured as a static website

Specify the hosted zone ID for the region that you created the bucket in. For more information about valid values, see the table Amazon S3 Website Endpoints in the Amazon Web Services General Reference.

Another Route 53 resource record set in your hosted zone

Specify the hosted zone ID of your hosted zone. (An alias resource record set can't reference a resource record set in a different hosted zone.)

" }, "DNSName":{ "shape":"DNSName", - "documentation":"

Alias resource record sets only: The value that you specify depends on where you want to route queries:

Amazon API Gateway custom regional APIs and edge-optimized APIs

Specify the applicable domain name for your API. You can get the applicable value using the AWS CLI command get-domain-names:

  • For regional APIs, specify the value of regionalDomainName.

  • For edge-optimized APIs, specify the value of distributionDomainName. This is the name of the associated CloudFront distribution, such as da1b2c3d4e5.cloudfront.net.

The name of the record that you're creating must match a custom domain name for your API, such as api.example.com.

Amazon Virtual Private Cloud interface VPC endpoint

Enter the API endpoint for the interface endpoint, such as vpce-123456789abcdef01-example-us-east-1a.elasticloadbalancing.us-east-1.vpce.amazonaws.com. For edge-optimized APIs, this is the domain name for the corresponding CloudFront distribution. You can get the value of DnsName using the AWS CLI command describe-vpc-endpoints.

CloudFront distribution

Specify the domain name that CloudFront assigned when you created your distribution.

Your CloudFront distribution must include an alternate domain name that matches the name of the resource record set. For example, if the name of the resource record set is acme.example.com, your CloudFront distribution must include acme.example.com as one of the alternate domain names. For more information, see Using Alternate Domain Names (CNAMEs) in the Amazon CloudFront Developer Guide.

You can't create a resource record set in a private hosted zone to route traffic to a CloudFront distribution.

For failover alias records, you can't specify a CloudFront distribution for both the primary and secondary records. A distribution must include an alternate domain name that matches the name of the record. However, the primary and secondary records have the same name, and you can't include the same alternate domain name in more than one distribution.

Elastic Beanstalk environment

If the domain name for your Elastic Beanstalk environment includes the region that you deployed the environment in, you can create an alias record that routes traffic to the environment. For example, the domain name my-environment.us-west-2.elasticbeanstalk.com is a regionalized domain name.

For environments that were created before early 2016, the domain name doesn't include the region. To route traffic to these environments, you must create a CNAME record instead of an alias record. Note that you can't create a CNAME record for the root domain name. For example, if your domain name is example.com, you can create a record that routes traffic for acme.example.com to your Elastic Beanstalk environment, but you can't create a record that routes traffic for example.com to your Elastic Beanstalk environment.

For Elastic Beanstalk environments that have regionalized subdomains, specify the CNAME attribute for the environment. You can use the following methods to get the value of the CNAME attribute:

  • AWS Management Console: For information about how to get the value by using the console, see Using Custom Domains with AWS Elastic Beanstalk in the AWS Elastic Beanstalk Developer Guide.

  • Elastic Beanstalk API: Use the DescribeEnvironments action to get the value of the CNAME attribute. For more information, see DescribeEnvironments in the AWS Elastic Beanstalk API Reference.

  • AWS CLI: Use the describe-environments command to get the value of the CNAME attribute. For more information, see describe-environments in the AWS CLI Command Reference.

ELB load balancer

Specify the DNS name that is associated with the load balancer. Get the DNS name by using the AWS Management Console, the ELB API, or the AWS CLI.

  • AWS Management Console: Go to the EC2 page, choose Load Balancers in the navigation pane, choose the load balancer, choose the Description tab, and get the value of the DNS name field.

    If you're routing traffic to a Classic Load Balancer, get the value that begins with dualstack. If you're routing traffic to another type of load balancer, get the value that applies to the record type, A or AAAA.

  • Elastic Load Balancing API: Use DescribeLoadBalancers to get the value of DNSName. For more information, see the applicable guide:

  • AWS CLI: Use describe-load-balancers to get the value of DNSName. For more information, see the applicable guide:

AWS Global Accelerator accelerator

Specify the DNS name for your accelerator:

Amazon S3 bucket that is configured as a static website

Specify the domain name of the Amazon S3 website endpoint that you created the bucket in, for example, s3-website.us-east-2.amazonaws.com. For more information about valid values, see the table Amazon S3 Website Endpoints in the Amazon Web Services General Reference. For more information about using S3 buckets for websites, see Getting Started with Amazon Route 53 in the Amazon Route 53 Developer Guide.

Another Route 53 resource record set

Specify the value of the Name element for a resource record set in the current hosted zone.

If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't specify the domain name for a record for which the value of Type is CNAME. This is because the alias record must have the same type as the record that you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.

" + "documentation":"

Alias resource record sets only: The value that you specify depends on where you want to route queries:

Amazon API Gateway custom regional APIs and edge-optimized APIs

Specify the applicable domain name for your API. You can get the applicable value using the CLI command get-domain-names:

  • For regional APIs, specify the value of regionalDomainName.

  • For edge-optimized APIs, specify the value of distributionDomainName. This is the name of the associated CloudFront distribution, such as da1b2c3d4e5.cloudfront.net.

The name of the record that you're creating must match a custom domain name for your API, such as api.example.com.

Amazon Virtual Private Cloud interface VPC endpoint

Enter the API endpoint for the interface endpoint, such as vpce-123456789abcdef01-example-us-east-1a.elasticloadbalancing.us-east-1.vpce.amazonaws.com. For edge-optimized APIs, this is the domain name for the corresponding CloudFront distribution. You can get the value of DnsName using the CLI command describe-vpc-endpoints.

CloudFront distribution

Specify the domain name that CloudFront assigned when you created your distribution.

Your CloudFront distribution must include an alternate domain name that matches the name of the resource record set. For example, if the name of the resource record set is acme.example.com, your CloudFront distribution must include acme.example.com as one of the alternate domain names. For more information, see Using Alternate Domain Names (CNAMEs) in the Amazon CloudFront Developer Guide.

You can't create a resource record set in a private hosted zone to route traffic to a CloudFront distribution.

For failover alias records, you can't specify a CloudFront distribution for both the primary and secondary records. A distribution must include an alternate domain name that matches the name of the record. However, the primary and secondary records have the same name, and you can't include the same alternate domain name in more than one distribution.

Elastic Beanstalk environment

If the domain name for your Elastic Beanstalk environment includes the region that you deployed the environment in, you can create an alias record that routes traffic to the environment. For example, the domain name my-environment.us-west-2.elasticbeanstalk.com is a regionalized domain name.

For environments that were created before early 2016, the domain name doesn't include the region. To route traffic to these environments, you must create a CNAME record instead of an alias record. Note that you can't create a CNAME record for the root domain name. For example, if your domain name is example.com, you can create a record that routes traffic for acme.example.com to your Elastic Beanstalk environment, but you can't create a record that routes traffic for example.com to your Elastic Beanstalk environment.

For Elastic Beanstalk environments that have regionalized subdomains, specify the CNAME attribute for the environment. You can use the following methods to get the value of the CNAME attribute:

  • Management Console: For information about how to get the value by using the console, see Using Custom Domains with Elastic Beanstalk in the Elastic Beanstalk Developer Guide.

  • Elastic Beanstalk API: Use the DescribeEnvironments action to get the value of the CNAME attribute. For more information, see DescribeEnvironments in the Elastic Beanstalk API Reference.

  • CLI: Use the describe-environments command to get the value of the CNAME attribute. For more information, see describe-environments in the CLI Command Reference.

ELB load balancer

Specify the DNS name that is associated with the load balancer. Get the DNS name by using the Management Console, the ELB API, or the CLI.

  • Management Console: Go to the EC2 page, choose Load Balancers in the navigation pane, choose the load balancer, choose the Description tab, and get the value of the DNS name field.

    If you're routing traffic to a Classic Load Balancer, get the value that begins with dualstack. If you're routing traffic to another type of load balancer, get the value that applies to the record type, A or AAAA.

  • Elastic Load Balancing API: Use DescribeLoadBalancers to get the value of DNSName. For more information, see the applicable guide:

  • CLI: Use describe-load-balancers to get the value of DNSName. For more information, see the applicable guide:

Global Accelerator accelerator

Specify the DNS name for your accelerator:

Amazon S3 bucket that is configured as a static website

Specify the domain name of the Amazon S3 website endpoint that you created the bucket in, for example, s3-website.us-east-2.amazonaws.com. For more information about valid values, see the table Amazon S3 Website Endpoints in the Amazon Web Services General Reference. For more information about using S3 buckets for websites, see Getting Started with Amazon Route 53 in the Amazon Route 53 Developer Guide.

Another Route 53 resource record set

Specify the value of the Name element for a resource record set in the current hosted zone.

If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't specify the domain name for a record for which the value of Type is CNAME. This is because the alias record must have the same type as the record that you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.

" }, "EvaluateTargetHealth":{ "shape":"AliasHealthEnabled", - "documentation":"

Applies only to alias, failover alias, geolocation alias, latency alias, and weighted alias resource record sets: When EvaluateTargetHealth is true, an alias resource record set inherits the health of the referenced AWS resource, such as an ELB load balancer or another resource record set in the hosted zone.

Note the following:

CloudFront distributions

You can't set EvaluateTargetHealth to true when the alias target is a CloudFront distribution.

Elastic Beanstalk environments that have regionalized subdomains

If you specify an Elastic Beanstalk environment in DNSName and the environment contains an ELB load balancer, Elastic Load Balancing routes queries only to the healthy Amazon EC2 instances that are registered with the load balancer. (An environment automatically contains an ELB load balancer if it includes more than one Amazon EC2 instance.) If you set EvaluateTargetHealth to true and either no Amazon EC2 instances are healthy or the load balancer itself is unhealthy, Route 53 routes queries to other available resources that are healthy, if any.

If the environment contains a single Amazon EC2 instance, there are no special requirements.

ELB load balancers

Health checking behavior depends on the type of load balancer:

  • Classic Load Balancers: If you specify an ELB Classic Load Balancer in DNSName, Elastic Load Balancing routes queries only to the healthy Amazon EC2 instances that are registered with the load balancer. If you set EvaluateTargetHealth to true and either no EC2 instances are healthy or the load balancer itself is unhealthy, Route 53 routes queries to other resources.

  • Application and Network Load Balancers: If you specify an ELB Application or Network Load Balancer and you set EvaluateTargetHealth to true, Route 53 routes queries to the load balancer based on the health of the target groups that are associated with the load balancer:

    • For an Application or Network Load Balancer to be considered healthy, every target group that contains targets must contain at least one healthy target. If any target group contains only unhealthy targets, the load balancer is considered unhealthy, and Route 53 routes queries to other resources.

    • A target group that has no registered targets is considered unhealthy.

When you create a load balancer, you configure settings for Elastic Load Balancing health checks; they're not Route 53 health checks, but they perform a similar function. Do not create Route 53 health checks for the EC2 instances that you register with an ELB load balancer.

S3 buckets

There are no special requirements for setting EvaluateTargetHealth to true when the alias target is an S3 bucket.

Other records in the same hosted zone

If the AWS resource that you specify in DNSName is a record or a group of records (for example, a group of weighted records) but is not another alias record, we recommend that you associate a health check with all of the records in the alias target. For more information, see What Happens When You Omit Health Checks? in the Amazon Route 53 Developer Guide.

For more information and examples, see Amazon Route 53 Health Checks and DNS Failover in the Amazon Route 53 Developer Guide.

" + "documentation":"

Applies only to alias, failover alias, geolocation alias, latency alias, and weighted alias resource record sets: When EvaluateTargetHealth is true, an alias resource record set inherits the health of the referenced Amazon Web Services resource, such as an ELB load balancer or another resource record set in the hosted zone.

Note the following:

CloudFront distributions

You can't set EvaluateTargetHealth to true when the alias target is a CloudFront distribution.

Elastic Beanstalk environments that have regionalized subdomains

If you specify an Elastic Beanstalk environment in DNSName and the environment contains an ELB load balancer, Elastic Load Balancing routes queries only to the healthy Amazon EC2 instances that are registered with the load balancer. (An environment automatically contains an ELB load balancer if it includes more than one Amazon EC2 instance.) If you set EvaluateTargetHealth to true and either no Amazon EC2 instances are healthy or the load balancer itself is unhealthy, Route 53 routes queries to other available resources that are healthy, if any.

If the environment contains a single Amazon EC2 instance, there are no special requirements.

ELB load balancers

Health checking behavior depends on the type of load balancer:

  • Classic Load Balancers: If you specify an ELB Classic Load Balancer in DNSName, Elastic Load Balancing routes queries only to the healthy Amazon EC2 instances that are registered with the load balancer. If you set EvaluateTargetHealth to true and either no EC2 instances are healthy or the load balancer itself is unhealthy, Route 53 routes queries to other resources.

  • Application and Network Load Balancers: If you specify an ELB Application or Network Load Balancer and you set EvaluateTargetHealth to true, Route 53 routes queries to the load balancer based on the health of the target groups that are associated with the load balancer:

    • For an Application or Network Load Balancer to be considered healthy, every target group that contains targets must contain at least one healthy target. If any target group contains only unhealthy targets, the load balancer is considered unhealthy, and Route 53 routes queries to other resources.

    • A target group that has no registered targets is considered unhealthy.

When you create a load balancer, you configure settings for Elastic Load Balancing health checks; they're not Route 53 health checks, but they perform a similar function. Do not create Route 53 health checks for the EC2 instances that you register with an ELB load balancer.

S3 buckets

There are no special requirements for setting EvaluateTargetHealth to true when the alias target is an S3 bucket.

Other records in the same hosted zone

If the Amazon Web Services resource that you specify in DNSName is a record or a group of records (for example, a group of weighted records) but is not another alias record, we recommend that you associate a health check with all of the records in the alias target. For more information, see What Happens When You Omit Health Checks? in the Amazon Route 53 Developer Guide.

For more information and examples, see Amazon Route 53 Health Checks and DNS Failover in the Amazon Route 53 Developer Guide.

" } }, - "documentation":"

Alias resource record sets only: Information about the AWS resource, such as a CloudFront distribution or an Amazon S3 bucket, that you want to route traffic to.

When creating resource record sets for a private hosted zone, note the following:

  • Creating geolocation alias resource record sets or latency alias resource record sets in a private hosted zone is unsupported.

  • For information about creating failover resource record sets in a private hosted zone, see Configuring Failover in a Private Hosted Zone.

" + "documentation":"

Alias resource record sets only: Information about the Amazon Web Services resource, such as a CloudFront distribution or an Amazon S3 bucket, that you want to route traffic to.

When creating resource record sets for a private hosted zone, note the following:

  • Creating geolocation alias resource record sets or latency alias resource record sets in a private hosted zone is unsupported.

  • For information about creating failover resource record sets in a private hosted zone, see Configuring Failover in a Private Hosted Zone.

" }, "AssociateVPCComment":{"type":"string"}, "AssociateVPCWithHostedZoneRequest":{ @@ -1272,7 +1278,7 @@ "members":{ "Id":{ "shape":"ResourceId", - "documentation":"

The ID of the request.

" + "documentation":"

This element contains an ID that you use when performing a GetChange action to get detailed information about the change.

" }, "Status":{ "shape":"ChangeStatus", @@ -1284,7 +1290,7 @@ }, "Comment":{ "shape":"ResourceDescription", - "documentation":"

A complex type that describes change information about changes made to your hosted zone.

This element contains an ID that you use when performing a GetChange action to get detailed information about the change.

" + "documentation":"

A comment you can provide.

" } }, "documentation":"

A complex type that describes change information about changes made to your hosted zone.

" @@ -1628,7 +1634,7 @@ }, "KeyManagementServiceArn":{ "shape":"SigningKeyString", - "documentation":"

The Amazon resource name (ARN) for a customer managed customer master key (CMK) in AWS Key Management Service (AWS KMS). The KeyManagementServiceArn must be unique for each key-signing key (KSK) in a single hosted zone. To see an example of KeyManagementServiceArn that grants the correct permissions for DNSSEC, scroll down to Example.

You must configure the customer managed CMK as follows:

Status

Enabled

Key spec

ECC_NIST_P256

Key usage

Sign and verify

Key policy

The key policy must give permission for the following actions:

  • DescribeKey

  • GetPublicKey

  • Sign

The key policy must also include the Amazon Route 53 service in the principal for your account. Specify the following:

  • \"Service\": \"dnssec.route53.aws.amazonaws.com\"

For more information about working with a customer managed CMK in AWS KMS, see AWS Key Management Service concepts.

" + "documentation":"

The Amazon resource name (ARN) for a customer managed customer master key (CMK) in Key Management Service (KMS). The KeyManagementServiceArn must be unique for each key-signing key (KSK) in a single hosted zone. To see an example of KeyManagementServiceArn that grants the correct permissions for DNSSEC, scroll down to Example.

You must configure the customer managed CMK as follows:

Status

Enabled

Key spec

ECC_NIST_P256

Key usage

Sign and verify

Key policy

The key policy must give permission for the following actions:

  • DescribeKey

  • GetPublicKey

  • Sign

The key policy must also include the Amazon Route 53 service in the principal for your account. Specify the following:

  • \"Service\": \"dnssec-route53.amazonaws.com\"

For more information about working with a customer managed CMK in KMS, see Key Management Service concepts.

" }, "Name":{ "shape":"SigningKeyName", @@ -1674,7 +1680,7 @@ }, "CloudWatchLogsLogGroupArn":{ "shape":"CloudWatchLogsLogGroupArn", - "documentation":"

The Amazon Resource Name (ARN) for the log group that you want to Amazon Route 53 to send query logs to. This is the format of the ARN:

arn:aws:logs:region:account-id:log-group:log_group_name

To get the ARN for a log group, you can use the CloudWatch console, the DescribeLogGroups API action, the describe-log-groups command, or the applicable command in one of the AWS SDKs.

" + "documentation":"

The Amazon Resource Name (ARN) for the log group that you want to Amazon Route 53 to send query logs to. This is the format of the ARN:

arn:aws:logs:region:account-id:log-group:log_group_name

To get the ARN for a log group, you can use the CloudWatch console, the DescribeLogGroups API action, the describe-log-groups command, or the applicable command in one of the Amazon Web Services SDKs.

" } } }, @@ -2215,16 +2221,16 @@ "members":{ "HostedZoneId":{ "shape":"ResourceId", - "documentation":"

When removing authorization to associate a VPC that was created by one AWS account with a hosted zone that was created with a different AWS account, the ID of the hosted zone.

", + "documentation":"

When removing authorization to associate a VPC that was created by one account with a hosted zone that was created with a different account, the ID of the hosted zone.

", "location":"uri", "locationName":"Id" }, "VPC":{ "shape":"VPC", - "documentation":"

When removing authorization to associate a VPC that was created by one AWS account with a hosted zone that was created with a different AWS account, a complex type that includes the ID and region of the VPC.

" + "documentation":"

When removing authorization to associate a VPC that was created by one account with a hosted zone that was created with a different account, a complex type that includes the ID and region of the VPC.

" } }, - "documentation":"

A complex type that contains information about the request to remove authorization to associate a VPC that was created by one AWS account with a hosted zone that was created with a different AWS account.

" + "documentation":"

A complex type that contains information about the request to remove authorization to associate a VPC that was created by one account with a hosted zone that was created with a different account.

" }, "DeleteVPCAssociationAuthorizationResponse":{ "type":"structure", @@ -2585,7 +2591,7 @@ "type":"structure", "members":{ }, - "documentation":"

A request for the number of health checks that are associated with the current AWS account.

" + "documentation":"

A request for the number of health checks that are associated with the current account.

" }, "GetHealthCheckCountResponse":{ "type":"structure", @@ -2593,7 +2599,7 @@ "members":{ "HealthCheckCount":{ "shape":"HealthCheckCount", - "documentation":"

The number of health checks associated with the current AWS account.

" + "documentation":"

The number of health checks associated with the current account.

" } }, "documentation":"

A complex type that contains the response to a GetHealthCheckCount request.

" @@ -2641,7 +2647,7 @@ "members":{ "HealthCheck":{ "shape":"HealthCheck", - "documentation":"

A complex type that contains information about one health check that is associated with the current AWS account.

" + "documentation":"

A complex type that contains information about one health check that is associated with the current account.

" } }, "documentation":"

A complex type that contains the response to a GetHealthCheck request.

" @@ -2674,7 +2680,7 @@ "type":"structure", "members":{ }, - "documentation":"

A request to retrieve a count of all the hosted zones that are associated with the current AWS account.

" + "documentation":"

A request to retrieve a count of all the hosted zones that are associated with the current account.

" }, "GetHostedZoneCountResponse":{ "type":"structure", @@ -2682,7 +2688,7 @@ "members":{ "HostedZoneCount":{ "shape":"HostedZoneCount", - "documentation":"

The total number of public and private hosted zones that are associated with the current AWS account.

" + "documentation":"

The total number of public and private hosted zones that are associated with the current account.

" } }, "documentation":"

A complex type that contains the response to a GetHostedZoneCount request.

" @@ -2849,7 +2855,7 @@ "type":"structure", "members":{ }, - "documentation":"

Request to get the number of traffic policy instances that are associated with the current AWS account.

" + "documentation":"

Request to get the number of traffic policy instances that are associated with the current account.

" }, "GetTrafficPolicyInstanceCountResponse":{ "type":"structure", @@ -2857,7 +2863,7 @@ "members":{ "TrafficPolicyInstanceCount":{ "shape":"TrafficPolicyInstanceCount", - "documentation":"

The number of traffic policy instances that are associated with the current AWS account.

" + "documentation":"

The number of traffic policy instances that are associated with the current account.

" } }, "documentation":"

A complex type that contains information about the resource record sets that Amazon Route 53 created based on a specified traffic policy.

" @@ -2953,7 +2959,7 @@ "documentation":"

A complex type that contains information about the CloudWatch alarm that Amazon Route 53 is monitoring for this health check.

" } }, - "documentation":"

A complex type that contains information about one health check that is associated with the current AWS account.

" + "documentation":"

A complex type that contains information about one health check that is associated with the current Amazon Web Services account.

" }, "HealthCheckAlreadyExists":{ "type":"structure", @@ -2981,7 +2987,7 @@ }, "Type":{ "shape":"HealthCheckType", - "documentation":"

The type of health check that you want to create, which indicates how Amazon Route 53 determines whether an endpoint is healthy.

You can't change the value of Type after you create a health check.

You can create the following types of health checks:

  • HTTP: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTP request and waits for an HTTP status code of 200 or greater and less than 400.

  • HTTPS: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater and less than 400.

    If you specify HTTPS for the value of Type, the endpoint must support TLS v1.0 or later.

  • HTTP_STR_MATCH: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTP request and searches the first 5,120 bytes of the response body for the string that you specify in SearchString.

  • HTTPS_STR_MATCH: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTPS request and searches the first 5,120 bytes of the response body for the string that you specify in SearchString.

  • TCP: Route 53 tries to establish a TCP connection.

  • CLOUDWATCH_METRIC: The health check is associated with a CloudWatch alarm. If the state of the alarm is OK, the health check is considered healthy. If the state is ALARM, the health check is considered unhealthy. If CloudWatch doesn't have sufficient data to determine whether the state is OK or ALARM, the health check status depends on the setting for InsufficientDataHealthStatus: Healthy, Unhealthy, or LastKnownStatus.

  • CALCULATED: For health checks that monitor the status of other health checks, Route 53 adds up the number of health checks that Route 53 health checkers consider to be healthy and compares that number with the value of HealthThreshold.

For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide.

" + "documentation":"

The type of health check that you want to create, which indicates how Amazon Route 53 determines whether an endpoint is healthy.

You can't change the value of Type after you create a health check.

You can create the following types of health checks:

  • HTTP: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTP request and waits for an HTTP status code of 200 or greater and less than 400.

  • HTTPS: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater and less than 400.

    If you specify HTTPS for the value of Type, the endpoint must support TLS v1.0 or later.

  • HTTP_STR_MATCH: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTP request and searches the first 5,120 bytes of the response body for the string that you specify in SearchString.

  • HTTPS_STR_MATCH: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTPS request and searches the first 5,120 bytes of the response body for the string that you specify in SearchString.

  • TCP: Route 53 tries to establish a TCP connection.

  • CLOUDWATCH_METRIC: The health check is associated with a CloudWatch alarm. If the state of the alarm is OK, the health check is considered healthy. If the state is ALARM, the health check is considered unhealthy. If CloudWatch doesn't have sufficient data to determine whether the state is OK or ALARM, the health check status depends on the setting for InsufficientDataHealthStatus: Healthy, Unhealthy, or LastKnownStatus.

  • CALCULATED: For health checks that monitor the status of other health checks, Route 53 adds up the number of health checks that Route 53 health checkers consider to be healthy and compares that number with the value of HealthThreshold.

  • RECOVERY_CONTROL: The health check is assocated with a Route53 Application Recovery Controller routing control. If the routing control state is ON, the health check is considered healthy. If the state is OFF, the health check is considered unhealthy.

For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide.

" }, "ResourcePath":{ "shape":"ResourcePath", @@ -3005,7 +3011,7 @@ }, "MeasureLatency":{ "shape":"MeasureLatency", - "documentation":"

Specify whether you want Amazon Route 53 to measure the latency between health checkers in multiple AWS regions and your endpoint, and to display CloudWatch latency graphs on the Health Checks page in the Route 53 console.

You can't change the value of MeasureLatency after you create a health check.

" + "documentation":"

Specify whether you want Amazon Route 53 to measure the latency between health checkers in multiple Amazon Web Services regions and your endpoint, and to display CloudWatch latency graphs on the Health Checks page in the Route 53 console.

You can't change the value of MeasureLatency after you create a health check.

" }, "Inverted":{ "shape":"Inverted", @@ -3038,6 +3044,10 @@ "InsufficientDataHealthStatus":{ "shape":"InsufficientDataHealthStatus", "documentation":"

When CloudWatch has insufficient data about the metric to determine the alarm state, the status that you want Amazon Route 53 to assign to the health check:

  • Healthy: Route 53 considers the health check to be healthy.

  • Unhealthy: Route 53 considers the health check to be unhealthy.

  • LastKnownStatus: Route 53 uses the status of the health check from the last time that CloudWatch had sufficient data to determine the alarm state. For new health checks that have no last known status, the default status for the health check is healthy.

" + }, + "RoutingControlArn":{ + "shape":"RoutingControlArn", + "documentation":"

The Amazon Resource Name (ARN) for Route53 Application Recovery Controller routing control.

" } }, "documentation":"

A complex type that contains information about the health check.

" @@ -3123,7 +3133,8 @@ "HTTPS_STR_MATCH", "TCP", "CALCULATED", - "CLOUDWATCH_METRIC" + "CLOUDWATCH_METRIC", + "RECOVERY_CONTROL" ] }, "HealthCheckVersion":{ @@ -3277,11 +3288,11 @@ "members":{ "OwningAccount":{ "shape":"AWSAccountID", - "documentation":"

If the hosted zone was created by an AWS account, or was created by an AWS service that creates hosted zones using the current account, OwningAccount contains the account ID of that account. For example, when you use AWS Cloud Map to create a hosted zone, Cloud Map creates the hosted zone using the current AWS account.

" + "documentation":"

If the hosted zone was created by an account, or was created by an Amazon Web Services service that creates hosted zones using the current account, OwningAccount contains the account ID of that account. For example, when you use Cloud Map to create a hosted zone, Cloud Map creates the hosted zone using the current account.

" }, "OwningService":{ "shape":"HostedZoneOwningService", - "documentation":"

If an AWS service uses its own account to create a hosted zone and associate the specified VPC with that hosted zone, OwningService contains an abbreviation that identifies the service. For example, if Amazon Elastic File System (Amazon EFS) created a hosted zone and associated a VPC with the hosted zone, the value of OwningService is efs.amazonaws.com.

" + "documentation":"

If an Amazon Web Services service uses its own account to create a hosted zone and associate the specified VPC with that hosted zone, OwningService contains an abbreviation that identifies the service. For example, if Amazon Elastic File System (Amazon EFS) created a hosted zone and associated a VPC with the hosted zone, the value of OwningService is efs.amazonaws.com.

" } }, "documentation":"

A complex type that identifies a hosted zone that a specified Amazon VPC is associated with and the owner of the hosted zone. If there is a value for OwningAccount, there is no value for OwningService, and vice versa.

" @@ -3324,7 +3335,7 @@ }, "Owner":{ "shape":"HostedZoneOwner", - "documentation":"

The owner of a private hosted zone that the specified VPC is associated with. The owner can be either an AWS account or an AWS service.

" + "documentation":"

The owner of a private hosted zone that the specified VPC is associated with. The owner can be either an account or an Amazon Web Services service.

" } }, "documentation":"

In the response to a ListHostedZonesByVPC request, the HostedZoneSummaries element contains one HostedZoneSummary element for each hosted zone that the specified Amazon VPC is associated with. Each HostedZoneSummary element contains the hosted zone name and ID, and information about who owns the hosted zone.

" @@ -3356,7 +3367,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

Amazon Route 53 doesn't have the permissions required to create log streams and send query logs to log streams. Possible causes include the following:

  • There is no resource policy that specifies the log group ARN in the value for Resource.

  • The resource policy that includes the log group ARN in the value for Resource doesn't have the necessary permissions.

  • The resource policy hasn't finished propagating yet.

", + "documentation":"

Amazon Route 53 doesn't have the permissions required to create log streams and send query logs to log streams. Possible causes include the following:

  • There is no resource policy that specifies the log group ARN in the value for Resource.

  • The resource policy that includes the log group ARN in the value for Resource doesn't have the necessary permissions.

  • The resource policy hasn't finished propagating yet.

  • The Key management service (KMS) key you specified doesn’t exist or it can’t be used with the log group associated with query log. Update or provide a resource policy to grant permissions for the KMS key.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3493,7 +3504,7 @@ }, "KmsArn":{ "shape":"SigningKeyString", - "documentation":"

The Amazon resource name (ARN) used to identify the customer managed customer master key (CMK) in AWS Key Management Service (AWS KMS). The KmsArn must be unique for each key-signing key (KSK) in a single hosted zone.

You must configure the CMK as follows:

Status

Enabled

Key spec

ECC_NIST_P256

Key usage

Sign and verify

Key policy

The key policy must give permission for the following actions:

  • DescribeKey

  • GetPublicKey

  • Sign

The key policy must also include the Amazon Route 53 service in the principal for your account. Specify the following:

  • \"Service\": \"api-service.dnssec.route53.aws.internal\"

For more information about working with the customer managed CMK in AWS KMS, see AWS Key Management Service concepts.

" + "documentation":"

The Amazon resource name (ARN) used to identify the customer managed customer master key (CMK) in Key Management Service (KMS). The KmsArn must be unique for each key-signing key (KSK) in a single hosted zone.

You must configure the CMK as follows:

Status

Enabled

Key spec

ECC_NIST_P256

Key usage

Sign and verify

Key policy

The key policy must give permission for the following actions:

  • DescribeKey

  • GetPublicKey

  • Sign

The key policy must also include the Amazon Route 53 service in the principal for your account. Specify the following:

  • \"Service\": \"dnssec-route53.amazonaws.com\"

For more information about working with the customer managed CMK in KMS, see Key Management Service concepts.

" }, "Flag":{ "shape":"SigningKeyInteger", @@ -3616,7 +3627,7 @@ "documentation":"

" } }, - "documentation":"

This operation can't be completed either because the current account has reached the limit on reusable delegation sets that it can create or because you've reached the limit on the number of Amazon VPCs that you can associate with a private hosted zone. To get the current limit on the number of reusable delegation sets, see GetAccountLimit. To get the current limit on the number of Amazon VPCs that you can associate with a private hosted zone, see GetHostedZoneLimit. To request a higher limit, create a case with the AWS Support Center.

", + "documentation":"

This operation can't be completed either because the current account has reached the limit on reusable delegation sets that it can create or because you've reached the limit on the number of Amazon VPCs that you can associate with a private hosted zone. To get the current limit on the number of reusable delegation sets, see GetAccountLimit. To get the current limit on the number of Amazon VPCs that you can associate with a private hosted zone, see GetHostedZoneLimit. To request a higher limit, create a case with the Amazon Web Services Support Center.

", "exception":true }, "LinkedService":{ @@ -3714,7 +3725,7 @@ "locationName":"maxitems" } }, - "documentation":"

A request to retrieve a list of the health checks that are associated with the current AWS account.

" + "documentation":"

A request to retrieve a list of the health checks that are associated with the current account.

" }, "ListHealthChecksResponse":{ "type":"structure", @@ -3727,7 +3738,7 @@ "members":{ "HealthChecks":{ "shape":"HealthChecks", - "documentation":"

A complex type that contains one HealthCheck element for each health check that is associated with the current AWS account.

" + "documentation":"

A complex type that contains one HealthCheck element for each health check that is associated with the current account.

" }, "Marker":{ "shape":"PageMarker", @@ -3753,7 +3764,7 @@ "members":{ "DNSName":{ "shape":"DNSName", - "documentation":"

(Optional) For your first request to ListHostedZonesByName, include the dnsname parameter only if you want to specify the name of the first hosted zone in the response. If you don't include the dnsname parameter, Amazon Route 53 returns all of the hosted zones that were created by the current AWS account, in ASCII order. For subsequent requests, include both dnsname and hostedzoneid parameters. For dnsname, specify the value of NextDNSName from the previous response.

", + "documentation":"

(Optional) For your first request to ListHostedZonesByName, include the dnsname parameter only if you want to specify the name of the first hosted zone in the response. If you don't include the dnsname parameter, Amazon Route 53 returns all of the hosted zones that were created by the current account, in ASCII order. For subsequent requests, include both dnsname and hostedzoneid parameters. For dnsname, specify the value of NextDNSName from the previous response.

", "location":"querystring", "locationName":"dnsname" }, @@ -3770,7 +3781,7 @@ "locationName":"maxitems" } }, - "documentation":"

Retrieves a list of the public and private hosted zones that are associated with the current AWS account in ASCII order by domain name.

" + "documentation":"

Retrieves a list of the public and private hosted zones that are associated with the current account in ASCII order by domain name.

" }, "ListHostedZonesByNameResponse":{ "type":"structure", @@ -3826,7 +3837,7 @@ }, "VPCRegion":{ "shape":"VPCRegion", - "documentation":"

For the Amazon VPC that you specified for VPCId, the AWS Region that you created the VPC in.

", + "documentation":"

For the Amazon VPC that you specified for VPCId, the Amazon Web Services Region that you created the VPC in.

", "location":"querystring", "locationName":"vpcregion" }, @@ -3843,7 +3854,7 @@ "locationName":"nexttoken" } }, - "documentation":"

Lists all the private hosted zones that a specified VPC is associated with, regardless of which AWS account created the hosted zones.

" + "documentation":"

Lists all the private hosted zones that a specified VPC is associated with, regardless of which account created the hosted zones.

" }, "ListHostedZonesByVPCResponse":{ "type":"structure", @@ -3862,7 +3873,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The value that you specified for NextToken in the most recent ListHostedZonesByVPC request.

" + "documentation":"

The value that you will use for NextToken in the next ListHostedZonesByVPC request.

" } } }, @@ -3888,7 +3899,7 @@ "locationName":"delegationsetid" } }, - "documentation":"

A request to retrieve a list of the public and private hosted zones that are associated with the current AWS account.

" + "documentation":"

A request to retrieve a list of the public and private hosted zones that are associated with the current account.

" }, "ListHostedZonesResponse":{ "type":"structure", @@ -3926,19 +3937,19 @@ "members":{ "HostedZoneId":{ "shape":"ResourceId", - "documentation":"

(Optional) If you want to list the query logging configuration that is associated with a hosted zone, specify the ID in HostedZoneId.

If you don't specify a hosted zone ID, ListQueryLoggingConfigs returns all of the configurations that are associated with the current AWS account.

", + "documentation":"

(Optional) If you want to list the query logging configuration that is associated with a hosted zone, specify the ID in HostedZoneId.

If you don't specify a hosted zone ID, ListQueryLoggingConfigs returns all of the configurations that are associated with the current account.

", "location":"querystring", "locationName":"hostedzoneid" }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

(Optional) If the current AWS account has more than MaxResults query logging configurations, use NextToken to get the second and subsequent pages of results.

For the first ListQueryLoggingConfigs request, omit this value.

For the second and subsequent requests, get the value of NextToken from the previous response and specify that value for NextToken in the request.

", + "documentation":"

(Optional) If the current account has more than MaxResults query logging configurations, use NextToken to get the second and subsequent pages of results.

For the first ListQueryLoggingConfigs request, omit this value.

For the second and subsequent requests, get the value of NextToken from the previous response and specify that value for NextToken in the request.

", "location":"querystring", "locationName":"nexttoken" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) The maximum number of query logging configurations that you want Amazon Route 53 to return in response to the current request. If the current AWS account has more than MaxResults configurations, use the value of NextToken in the response to get the next page of results.

If you don't specify a value for MaxResults, Route 53 returns up to 100 configurations.

", + "documentation":"

(Optional) The maximum number of query logging configurations that you want Amazon Route 53 to return in response to the current request. If the current account has more than MaxResults configurations, use the value of NextToken in the response to get the next page of results.

If you don't specify a value for MaxResults, Route 53 returns up to 100 configurations.

", "location":"querystring", "locationName":"maxresults" } @@ -3950,11 +3961,11 @@ "members":{ "QueryLoggingConfigs":{ "shape":"QueryLoggingConfigs", - "documentation":"

An array that contains one QueryLoggingConfig element for each configuration for DNS query logging that is associated with the current AWS account.

" + "documentation":"

An array that contains one QueryLoggingConfig element for each configuration for DNS query logging that is associated with the current account.

" }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If a response includes the last of the query logging configurations that are associated with the current AWS account, NextToken doesn't appear in the response.

If a response doesn't include the last of the configurations, you can get more configurations by submitting another ListQueryLoggingConfigs request. Get the value of NextToken that Amazon Route 53 returned in the previous response and include it in NextToken in the next request.

" + "documentation":"

If a response includes the last of the query logging configurations that are associated with the current account, NextToken doesn't appear in the response.

If a response doesn't include the last of the configurations, you can get more configurations by submitting another ListQueryLoggingConfigs request. Get the value of NextToken that Amazon Route 53 returned in the previous response and include it in NextToken in the next request.

" } } }, @@ -4046,7 +4057,7 @@ "locationName":"maxitems" } }, - "documentation":"

A request to get a list of the reusable delegation sets that are associated with the current AWS account.

" + "documentation":"

A request to get a list of the reusable delegation sets that are associated with the current account.

" }, "ListReusableDelegationSetsResponse":{ "type":"structure", @@ -4059,7 +4070,7 @@ "members":{ "DelegationSets":{ "shape":"DelegationSets", - "documentation":"

A complex type that contains one DelegationSet element for each reusable delegation set that was created by the current AWS account.

" + "documentation":"

A complex type that contains one DelegationSet element for each reusable delegation set that was created by the current account.

" }, "Marker":{ "shape":"PageMarker", @@ -4078,7 +4089,7 @@ "documentation":"

The value that you specified for the maxitems parameter in the call to ListReusableDelegationSets that produced the current response.

" } }, - "documentation":"

A complex type that contains information about the reusable delegation sets that are associated with the current AWS account.

" + "documentation":"

A complex type that contains information about the reusable delegation sets that are associated with the current account.

" }, "ListTagsForResourceRequest":{ "type":"structure", @@ -4160,7 +4171,7 @@ "locationName":"maxitems" } }, - "documentation":"

A complex type that contains the information about the request to list the traffic policies that are associated with the current AWS account.

" + "documentation":"

A complex type that contains the information about the request to list the traffic policies that are associated with the current account.

" }, "ListTrafficPoliciesResponse":{ "type":"structure", @@ -4173,7 +4184,7 @@ "members":{ "TrafficPolicySummaries":{ "shape":"TrafficPolicySummaries", - "documentation":"

A list that contains one TrafficPolicySummary element for each traffic policy that was created by the current AWS account.

" + "documentation":"

A list that contains one TrafficPolicySummary element for each traffic policy that was created by the current account.

" }, "IsTruncated":{ "shape":"PageTruncated", @@ -4361,7 +4372,7 @@ "locationName":"maxitems" } }, - "documentation":"

A request to get information about the traffic policy instances that you created by using the current AWS account.

" + "documentation":"

A request to get information about the traffic policy instances that you created by using the current account.

" }, "ListTrafficPolicyInstancesResponse":{ "type":"structure", @@ -4758,7 +4769,7 @@ }, "RecordDataEntry":{ "type":"string", - "documentation":"

A value that Amazon Route 53 returned for this resource record set. A RecordDataEntry element is one of the following:

  • For non-alias resource record sets, a RecordDataEntry element contains one value in the resource record set. If the resource record set contains multiple values, the response includes one RecordDataEntry element for each value.

  • For multiple resource record sets that have the same name and type, which includes weighted, latency, geolocation, and failover, a RecordDataEntry element contains the value from the appropriate resource record set based on the request.

  • For alias resource record sets that refer to AWS resources other than another resource record set, the RecordDataEntry element contains an IP address or a domain name for the AWS resource, depending on the type of resource.

  • For alias resource record sets that refer to other resource record sets, a RecordDataEntry element contains one value from the referenced resource record set. If the referenced resource record set contains multiple values, the response includes one RecordDataEntry element for each value.

", + "documentation":"

A value that Amazon Route 53 returned for this resource record set. A RecordDataEntry element is one of the following:

  • For non-alias resource record sets, a RecordDataEntry element contains one value in the resource record set. If the resource record set contains multiple values, the response includes one RecordDataEntry element for each value.

  • For multiple resource record sets that have the same name and type, which includes weighted, latency, geolocation, and failover, a RecordDataEntry element contains the value from the appropriate resource record set based on the request.

  • For alias resource record sets that refer to Amazon Web Services resources other than another resource record set, the RecordDataEntry element contains an IP address or a domain name for the Amazon Web Services resource, depending on the type of resource.

  • For alias resource record sets that refer to other resource record sets, a RecordDataEntry element contains one value from the referenced resource record set. If the referenced resource record set contains multiple values, the response includes one RecordDataEntry element for each value.

", "max":512, "min":0 }, @@ -4834,7 +4845,7 @@ }, "Region":{ "shape":"ResourceRecordSetRegion", - "documentation":"

Latency-based resource record sets only: The Amazon EC2 Region where you created the resource that this resource record set refers to. The resource typically is an AWS resource, such as an EC2 instance or an ELB load balancer, and is referred to by an IP address or a DNS domain name, depending on the record type.

Although creating latency and latency alias resource record sets in a private hosted zone is allowed, it's not supported.

When Amazon Route 53 receives a DNS query for a domain name and type for which you have created latency resource record sets, Route 53 selects the latency resource record set that has the lowest latency between the end user and the associated Amazon EC2 Region. Route 53 then returns the value that is associated with the selected resource record set.

Note the following:

  • You can only specify one ResourceRecord per latency resource record set.

  • You can only create one latency resource record set for each Amazon EC2 Region.

  • You aren't required to create latency resource record sets for all Amazon EC2 Regions. Route 53 will choose the region with the best latency from among the regions that you create latency resource record sets for.

  • You can't create non-latency resource record sets that have the same values for the Name and Type elements as latency resource record sets.

" + "documentation":"

Latency-based resource record sets only: The Amazon EC2 Region where you created the resource that this resource record set refers to. The resource typically is an Amazon Web Services resource, such as an EC2 instance or an ELB load balancer, and is referred to by an IP address or a DNS domain name, depending on the record type.

Although creating latency and latency alias resource record sets in a private hosted zone is allowed, it's not supported.

When Amazon Route 53 receives a DNS query for a domain name and type for which you have created latency resource record sets, Route 53 selects the latency resource record set that has the lowest latency between the end user and the associated Amazon EC2 Region. Route 53 then returns the value that is associated with the selected resource record set.

Note the following:

  • You can only specify one ResourceRecord per latency resource record set.

  • You can only create one latency resource record set for each Amazon EC2 Region.

  • You aren't required to create latency resource record sets for all Amazon EC2 Regions. Route 53 will choose the region with the best latency from among the regions that you create latency resource record sets for.

  • You can't create non-latency resource record sets that have the same values for the Name and Type elements as latency resource record sets.

" }, "GeoLocation":{ "shape":"GeoLocation", @@ -4858,7 +4869,7 @@ }, "AliasTarget":{ "shape":"AliasTarget", - "documentation":"

Alias resource record sets only: Information about the AWS resource, such as a CloudFront distribution or an Amazon S3 bucket, that you want to route traffic to.

If you're creating resource records sets for a private hosted zone, note the following:

  • You can't create an alias resource record set in a private hosted zone to route traffic to a CloudFront distribution.

  • Creating geolocation alias resource record sets or latency alias resource record sets in a private hosted zone is unsupported.

  • For information about creating failover resource record sets in a private hosted zone, see Configuring Failover in a Private Hosted Zone in the Amazon Route 53 Developer Guide.

" + "documentation":"

Alias resource record sets only: Information about the Amazon Web Services resource, such as a CloudFront distribution or an Amazon S3 bucket, that you want to route traffic to.

If you're creating resource records sets for a private hosted zone, note the following:

  • You can't create an alias resource record set in a private hosted zone to route traffic to a CloudFront distribution.

  • Creating geolocation alias resource record sets or latency alias resource record sets in a private hosted zone is unsupported.

  • For information about creating failover resource record sets in a private hosted zone, see Configuring Failover in a Private Hosted Zone in the Amazon Route 53 Developer Guide.

" }, "HealthCheckId":{ "shape":"HealthCheckId", @@ -4985,6 +4996,11 @@ "type":"string", "enum":["MAX_ZONES_BY_REUSABLE_DELEGATION_SET"] }, + "RoutingControlArn":{ + "type":"string", + "max":255, + "min":1 + }, "SearchString":{ "type":"string", "max":255 @@ -5143,7 +5159,7 @@ }, "ResolverIP":{ "shape":"IPAddress", - "documentation":"

If you want to simulate a request from a specific DNS resolver, specify the IP address for that resolver. If you omit this value, TestDnsAnswer uses the IP address of a DNS resolver in the AWS US East (N. Virginia) Region (us-east-1).

", + "documentation":"

If you want to simulate a request from a specific DNS resolver, specify the IP address for that resolver. If you omit this value, TestDnsAnswer uses the IP address of a DNS resolver in the Amazon Web Services US East (N. Virginia) Region (us-east-1).

", "location":"querystring", "locationName":"resolverip" }, @@ -5216,7 +5232,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

This health check can't be created because the current account has reached the limit on the number of active health checks.

For information about default limits, see Limits in the Amazon Route 53 Developer Guide.

For information about how to get the current limit for an account, see GetAccountLimit. To request a higher limit, create a case with the AWS Support Center.

You have reached the maximum number of active health checks for an AWS account. To request a higher limit, create a case with the AWS Support Center.

", + "documentation":"

This health check can't be created because the current account has reached the limit on the number of active health checks.

For information about default limits, see Limits in the Amazon Route 53 Developer Guide.

For information about how to get the current limit for an account, see GetAccountLimit. To request a higher limit, create a case with the Amazon Web Services Support Center.

You have reached the maximum number of active health checks for an account. To request a higher limit, create a case with the Amazon Web Services Support Center.

", "exception":true }, "TooManyHostedZones":{ @@ -5227,7 +5243,7 @@ "documentation":"

" } }, - "documentation":"

This operation can't be completed either because the current account has reached the limit on the number of hosted zones or because you've reached the limit on the number of hosted zones that can be associated with a reusable delegation set.

For information about default limits, see Limits in the Amazon Route 53 Developer Guide.

To get the current limit on hosted zones that can be created by an account, see GetAccountLimit.

To get the current limit on hosted zones that can be associated with a reusable delegation set, see GetReusableDelegationSetLimit.

To request a higher limit, create a case with the AWS Support Center.

", + "documentation":"

This operation can't be completed either because the current account has reached the limit on the number of hosted zones or because you've reached the limit on the number of hosted zones that can be associated with a reusable delegation set.

For information about default limits, see Limits in the Amazon Route 53 Developer Guide.

To get the current limit on hosted zones that can be created by an account, see GetAccountLimit.

To get the current limit on hosted zones that can be associated with a reusable delegation set, see GetReusableDelegationSetLimit.

To request a higher limit, create a case with the Amazon Web Services Support Center.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -5247,7 +5263,7 @@ "documentation":"

" } }, - "documentation":"

This traffic policy can't be created because the current account has reached the limit on the number of traffic policies.

For information about default limits, see Limits in the Amazon Route 53 Developer Guide.

To get the current limit for an account, see GetAccountLimit.

To request a higher limit, create a case with the AWS Support Center.

", + "documentation":"

This traffic policy can't be created because the current account has reached the limit on the number of traffic policies.

For information about default limits, see Limits in the Amazon Route 53 Developer Guide.

To get the current limit for an account, see GetAccountLimit.

To request a higher limit, create a case with the Amazon Web Services Support Center.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -5259,7 +5275,7 @@ "documentation":"

" } }, - "documentation":"

This traffic policy instance can't be created because the current account has reached the limit on the number of traffic policy instances.

For information about default limits, see Limits in the Amazon Route 53 Developer Guide.

For information about how to get the current limit for an account, see GetAccountLimit.

To request a higher limit, create a case with the AWS Support Center.

", + "documentation":"

This traffic policy instance can't be created because the current account has reached the limit on the number of traffic policy instances.

For information about default limits, see Limits in the Amazon Route 53 Developer Guide.

For information about how to get the current limit for an account, see GetAccountLimit.

To request a higher limit, create a case with the Amazon Web Services Support Center.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -5486,10 +5502,10 @@ }, "TrafficPolicyCount":{ "shape":"TrafficPolicyVersion", - "documentation":"

The number of traffic policies that are associated with the current AWS account.

" + "documentation":"

The number of traffic policies that are associated with the current account.

" } }, - "documentation":"

A complex type that contains information about the latest version of one traffic policy that is associated with the current AWS account.

" + "documentation":"

A complex type that contains information about the latest version of one traffic policy that is associated with the current account.

" }, "TrafficPolicyVersion":{ "type":"integer", diff --git a/services/route53domains/pom.xml b/services/route53domains/pom.xml index 48af7cb6612a..43ada70dbc09 100644 --- a/services/route53domains/pom.xml +++ b/services/route53domains/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT route53domains AWS Java SDK :: Services :: Amazon Route53 Domains diff --git a/services/route53recoverycluster/pom.xml b/services/route53recoverycluster/pom.xml new file mode 100644 index 000000000000..a83a04abe74c --- /dev/null +++ b/services/route53recoverycluster/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.17.16-SNAPSHOT + + route53recoverycluster + AWS Java SDK :: Services :: Route53 Recovery Cluster + The AWS Java SDK for Route53 Recovery Cluster module holds the client classes that are used for + communicating with Route53 Recovery Cluster. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.route53recoverycluster + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/route53recoverycluster/src/main/resources/codegen-resources/paginators-1.json b/services/route53recoverycluster/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/route53recoverycluster/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/route53recoverycluster/src/main/resources/codegen-resources/service-2.json b/services/route53recoverycluster/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..e578292554cd --- /dev/null +++ b/services/route53recoverycluster/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,308 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-12-02", + "endpointPrefix":"route53-recovery-cluster", + "jsonVersion":"1.0", + "protocol":"json", + "serviceFullName":"Route53 Recovery Cluster", + "serviceId":"Route53 Recovery Cluster", + "signatureVersion":"v4", + "signingName":"route53-recovery-cluster", + "targetPrefix":"ToggleCustomerAPI", + "uid":"route53-recovery-cluster-2019-12-02" + }, + "operations":{ + "GetRoutingControlState":{ + "name":"GetRoutingControlState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRoutingControlStateRequest"}, + "output":{"shape":"GetRoutingControlStateResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"EndpointTemporarilyUnavailableException"} + ], + "documentation":"

Get the state for a routing control. A routing control is a simple on/off switch that you can use to route traffic to cells. When the state is On, traffic flows to a cell. When it's off, traffic does not flow.

Before you can create a routing control, you first must create a cluster to host the control. For more information, see CreateCluster. Access one of the endpoints for the cluster to get or update the routing control state to redirect traffic.

For more information about working with routing controls, see Routing control in the Route 53 Application Recovery Controller Developer Guide.

" + }, + "UpdateRoutingControlState":{ + "name":"UpdateRoutingControlState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRoutingControlStateRequest"}, + "output":{"shape":"UpdateRoutingControlStateResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"EndpointTemporarilyUnavailableException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Set the state of the routing control to reroute traffic. You can set the value to be On or Off. When the state is On, traffic flows to a cell. When it's off, traffic does not flow.

For more information about working with routing controls, see Routing control in the Route 53 Application Recovery Controller Developer Guide.

" + }, + "UpdateRoutingControlStates":{ + "name":"UpdateRoutingControlStates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRoutingControlStatesRequest"}, + "output":{"shape":"UpdateRoutingControlStatesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"EndpointTemporarilyUnavailableException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Set multiple routing control states. You can set the value for each state to be On or Off. When the state is On, traffic flows to a cell. When it's off, traffic does not flow.

For more information about working with routing controls, see Routing control in the Route 53 Application Recovery Controller Developer Guide.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

You don't have sufficient permissions to query the routing control state.

", + "exception":true + }, + "Arn":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[A-Za-z0-9:\\/_-]*$" + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"Description of the ConflictException error" + }, + "resourceId":{ + "shape":"String", + "documentation":"Identifier of the resource in use" + }, + "resourceType":{ + "shape":"String", + "documentation":"Type of the resource in use" + } + }, + "documentation":"

There was a conflict with this request. Try again.

", + "exception":true + }, + "EndpointTemporarilyUnavailableException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The cluster endpoint isn't available. Try another cluster endpoint.

", + "exception":true + }, + "GetRoutingControlStateRequest":{ + "type":"structure", + "required":["RoutingControlArn"], + "members":{ + "RoutingControlArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Number (ARN) for the routing control that you want to get the state for.

" + } + } + }, + "GetRoutingControlStateResponse":{ + "type":"structure", + "required":[ + "RoutingControlArn", + "RoutingControlState" + ], + "members":{ + "RoutingControlArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Number (ARN) of the response.

" + }, + "RoutingControlState":{ + "shape":"RoutingControlState", + "documentation":"

The state of the routing control.

" + } + } + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{"shape":"RetryAfterSeconds"} + }, + "documentation":"

There was an unexpected error during processing of the request.

", + "exception":true, + "fault":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"Hypothetical resource identifier that was not found" + }, + "resourceType":{ + "shape":"String", + "documentation":"Hypothetical resource type that was not found" + } + }, + "documentation":"

The request references a routing control that was not found.

", + "exception":true + }, + "RetryAfterSeconds":{ + "type":"integer", + "documentation":"Advice to clients on when the call can be safely retried" + }, + "RoutingControlState":{ + "type":"string", + "enum":[ + "On", + "Off" + ] + }, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{"shape":"RetryAfterSeconds"} + }, + "documentation":"

The request was denied because of request throttling.

", + "exception":true + }, + "UpdateRoutingControlStateEntries":{ + "type":"list", + "member":{"shape":"UpdateRoutingControlStateEntry"} + }, + "UpdateRoutingControlStateEntry":{ + "type":"structure", + "required":[ + "RoutingControlArn", + "RoutingControlState" + ], + "members":{ + "RoutingControlArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Number (ARN) for the routing control state entry.

" + }, + "RoutingControlState":{ + "shape":"RoutingControlState", + "documentation":"

The routing control state in a set of routing control state entries.

" + } + }, + "documentation":"

A routing control state.

" + }, + "UpdateRoutingControlStateRequest":{ + "type":"structure", + "required":[ + "RoutingControlArn", + "RoutingControlState" + ], + "members":{ + "RoutingControlArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Number (ARN) for the routing control that you want to update the state for.

" + }, + "RoutingControlState":{ + "shape":"RoutingControlState", + "documentation":"

The state of the routing control. You can set the value to be On or Off.

" + } + } + }, + "UpdateRoutingControlStateResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateRoutingControlStatesRequest":{ + "type":"structure", + "required":["UpdateRoutingControlStateEntries"], + "members":{ + "UpdateRoutingControlStateEntries":{ + "shape":"UpdateRoutingControlStateEntries", + "documentation":"

A set of routing control entries that you want to update.

" + } + } + }, + "UpdateRoutingControlStatesResponse":{ + "type":"structure", + "members":{ + } + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "reason":{"shape":"ValidationExceptionReason"}, + "fields":{"shape":"ValidationExceptionFieldList"} + }, + "documentation":"

There was a validation error on the request.

", + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The field that had the validation exception.

" + }, + "message":{ + "shape":"String", + "documentation":"

Information about the validation exception.

" + } + }, + "documentation":"

There was a validation error on the request.

" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"}, + "documentation":"The fields that caused the error, if applicable" + }, + "ValidationExceptionReason":{ + "type":"string", + "documentation":"Reason the request failed validation", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other" + ] + } + }, + "documentation":"

Welcome to the Amazon Route 53 Application Recovery Controller API Reference Guide for Recovery Control Data Plane .

Recovery control in Route 53 Application Recovery Controller includes extremely reliable routing controls that enable you to recover applications by rerouting traffic, for example, across Availability Zones or AWS Regions. Routing controls are simple on/off switches hosted on a cluster. A cluster is a set of five redundant regional endpoints against which you can execute API calls to update or get the state of routing controls. You use routing controls to failover traffic to recover your application across Availability Zones or Regions.

This API guide includes information about how to get and update routing control states in Route 53 Application Recovery Controller.

For more information about Route 53 Application Recovery Controller, see the following:

" +} diff --git a/services/route53recoverycontrolconfig/pom.xml b/services/route53recoverycontrolconfig/pom.xml new file mode 100644 index 000000000000..df7f1836369b --- /dev/null +++ b/services/route53recoverycontrolconfig/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.17.16-SNAPSHOT + + route53recoverycontrolconfig + AWS Java SDK :: Services :: Route53 Recovery Control Config + The AWS Java SDK for Route53 Recovery Control Config module holds the client classes that are used for + communicating with Route53 Recovery Control Config. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.route53recoverycontrolconfig + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/paginators-1.json b/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..d5c61731adf4 --- /dev/null +++ b/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,29 @@ +{ + "pagination" : { + "ListAssociatedRoute53HealthChecks" : { + "input_token" : "NextToken", + "output_token" : "NextToken", + "limit_key" : "MaxResults" + }, + "ListClusters" : { + "input_token" : "NextToken", + "output_token" : "NextToken", + "limit_key" : "MaxResults" + }, + "ListControlPanels" : { + "input_token" : "NextToken", + "output_token" : "NextToken", + "limit_key" : "MaxResults" + }, + "ListRoutingControls" : { + "input_token" : "NextToken", + "output_token" : "NextToken", + "limit_key" : "MaxResults" + }, + "ListSafetyRules" : { + "input_token" : "NextToken", + "output_token" : "NextToken", + "limit_key" : "MaxResults" + } + } +} \ No newline at end of file diff --git a/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/service-2.json b/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..b9cb25ff29ec --- /dev/null +++ b/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1892 @@ +{ + "metadata": { + "apiVersion": "2020-11-02", + "endpointPrefix": "route53-recovery-control-config", + "signingName": "route53-recovery-control-config", + "serviceFullName": "AWS Route53 Recovery Control Config", + "serviceId": "Route53 Recovery Control Config", + "protocol": "rest-json", + "jsonVersion": "1.1", + "uid": "route53-recovery-control-config-2020-11-02", + "signatureVersion": "v4" + }, + "operations": { + "CreateCluster": { + "name": "CreateCluster", + "http": { + "method": "POST", + "requestUri": "/cluster", + "responseCode": 200 + }, + "input": { + "shape": "CreateClusterRequest" + }, + "output": { + "shape": "CreateClusterResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

402 response

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ConflictException", + "documentation": "

409 response - ConflictException. You might be using a predefined variable.

" + } + ], + "documentation": "

Create a new cluster. A cluster is a set of redundant Regional endpoints against which you can run API calls to update or get the state of one or more routing controls. Each cluster has a name, status, Amazon Resource Name (ARN), and an array of the five cluster endpoints (one for each supported Amazon Web Services Region) that you can use with API calls to the Amazon Route 53 Application Recovery Controller cluster data plane.

" + }, + "CreateControlPanel": { + "name": "CreateControlPanel", + "http": { + "method": "POST", + "requestUri": "/controlpanel", + "responseCode": 200 + }, + "input": { + "shape": "CreateControlPanelRequest" + }, + "output": { + "shape": "CreateControlPanelResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

402 response

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ConflictException", + "documentation": "

409 response - ConflictException. You might be using a predefined variable.

" + } + ], + "documentation": "

Creates a new control panel. A control panel represents a group of routing controls that can be changed together in a single transaction. You can use a control panel to centrally view the operational status of applications across your organization, and trigger multi-app failovers in a single transaction, for example, to fail over an Availability Zone or AWS Region.

" + }, + "CreateRoutingControl": { + "name": "CreateRoutingControl", + "http": { + "method": "POST", + "requestUri": "/routingcontrol", + "responseCode": 200 + }, + "input": { + "shape": "CreateRoutingControlRequest" + }, + "output": { + "shape": "CreateRoutingControlResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

402 response

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ConflictException", + "documentation": "

409 response - ConflictException. You might be using a predefined variable.

" + } + ], + "documentation": "

Creates a new routing control.

A routing control has one of two states: ON and OFF. You can map the routing control state to the state of an Amazon Route 53 health check, which can be used to control traffic routing.

To get or update the routing control state, see the Recovery Cluster (data plane) API actions for Amazon Route 53 Application Recovery Controller.

" + }, + "CreateSafetyRule": { + "name": "CreateSafetyRule", + "http": { + "method": "POST", + "requestUri": "/safetyrule", + "responseCode": 200 + }, + "input": { + "shape": "CreateSafetyRuleRequest" + }, + "output": { + "shape": "CreateSafetyRuleResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + } + ], + "documentation": "

Creates a safety rule in a control panel. Safety rules let you add safeguards around enabling and disabling routing controls, to help prevent unexpected outcomes.

There are two types of safety rules: assertion rules and gating rules.

Assertion rule: An assertion rule enforces that, when a routing control state is changed, the criteria set by the rule configuration is met. Otherwise, the change to the routing control is not accepted.

Gating rule: A gating rule verifies that a set of gating controls evaluates as true, based on a rule configuration that you specify. If the gating rule evaluates to true, Amazon Route 53 Application Recovery Controller allows a set of routing control state changes to run and complete against the set of target controls.

" + }, + "DeleteCluster": { + "name": "DeleteCluster", + "http": { + "method": "DELETE", + "requestUri": "/cluster/{ClusterArn}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteClusterRequest" + }, + "output": { + "shape": "DeleteClusterResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ConflictException", + "documentation": "

409 response - ConflictException. You might be using a predefined variable.

" + } + ], + "documentation": "

Delete a cluster.

" + }, + "DeleteControlPanel": { + "name": "DeleteControlPanel", + "http": { + "method": "DELETE", + "requestUri": "/controlpanel/{ControlPanelArn}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteControlPanelRequest" + }, + "output": { + "shape": "DeleteControlPanelResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ConflictException", + "documentation": "

409 response - ConflictException. You might be using a predefined variable.

" + } + ], + "documentation": "

Deletes a control panel.

" + }, + "DeleteRoutingControl": { + "name": "DeleteRoutingControl", + "http": { + "method": "DELETE", + "requestUri": "/routingcontrol/{RoutingControlArn}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteRoutingControlRequest" + }, + "output": { + "shape": "DeleteRoutingControlResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ConflictException", + "documentation": "

409 response - ConflictException. You might be using a predefined variable.

" + } + ], + "documentation": "

Deletes a routing control.

" + }, + "DeleteSafetyRule": { + "name": "DeleteSafetyRule", + "http": { + "method": "DELETE", + "requestUri": "/safetyrule/{SafetyRuleArn}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteSafetyRuleRequest" + }, + "output": { + "shape": "DeleteSafetyRuleResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + } + ], + "documentation": "

Deletes a safety rule.

/>" + }, + "DescribeCluster": { + "name": "DescribeCluster", + "http": { + "method": "GET", + "requestUri": "/cluster/{ClusterArn}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeClusterRequest" + }, + "output": { + "shape": "DescribeClusterResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ConflictException", + "documentation": "

409 response - ConflictException. You might be using a predefined variable.

" + } + ], + "documentation": "

Display the details about a cluster. The response includes the cluster name, endpoints, status, and Amazon Resource Name (ARN).

" + }, + "DescribeControlPanel": { + "name": "DescribeControlPanel", + "http": { + "method": "GET", + "requestUri": "/controlpanel/{ControlPanelArn}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeControlPanelRequest" + }, + "output": { + "shape": "DescribeControlPanelResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ConflictException", + "documentation": "

409 response - ConflictException. You might be using a predefined variable.

" + } + ], + "documentation": "

Displays details about a control panel.

" + }, + "DescribeRoutingControl": { + "name": "DescribeRoutingControl", + "http": { + "method": "GET", + "requestUri": "/routingcontrol/{RoutingControlArn}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeRoutingControlRequest" + }, + "output": { + "shape": "DescribeRoutingControlResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ConflictException", + "documentation": "

409 response - ConflictException. You might be using a predefined variable.

" + } + ], + "documentation": "

Displays details about a routing control. A routing control has one of two states: ON and OFF. You can map the routing control state to the state of an Amazon Route 53 health check, which can be used to control routing.

To get or update the routing control state, see the Recovery Cluster (data plane) API actions for Amazon Route 53 Application Recovery Controller.

" + }, + "DescribeSafetyRule": { + "name": "DescribeSafetyRule", + "http": { + "method": "GET", + "requestUri": "/safetyrule/{SafetyRuleArn}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeSafetyRuleRequest" + }, + "output": { + "shape": "DescribeSafetyRuleResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + } + ], + "documentation": "

Describes the safety rules (that is, the assertion rules and gating rules) for the routing controls in a control panel.

" + }, + "ListAssociatedRoute53HealthChecks": { + "name": "ListAssociatedRoute53HealthChecks", + "http": { + "method": "GET", + "requestUri": "/routingcontrol/{RoutingControlArn}/associatedRoute53HealthChecks", + "responseCode": 200 + }, + "input": { + "shape": "ListAssociatedRoute53HealthChecksRequest" + }, + "output": { + "shape": "ListAssociatedRoute53HealthChecksResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + } + ], + "documentation": "

Returns an array of all Amazon Route 53 health checks associated with a specific routing control.

" + }, + "ListClusters": { + "name": "ListClusters", + "http": { + "method": "GET", + "requestUri": "/cluster", + "responseCode": 200 + }, + "input": { + "shape": "ListClustersRequest" + }, + "output": { + "shape": "ListClustersResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + } + ], + "documentation": "

Returns an array of all the clusters in an account.

" + }, + "ListControlPanels": { + "name": "ListControlPanels", + "http": { + "method": "GET", + "requestUri": "/controlpanels", + "responseCode": 200 + }, + "input": { + "shape": "ListControlPanelsRequest" + }, + "output": { + "shape": "ListControlPanelsResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + } + ], + "documentation": "

Returns an array of control panels for a cluster.

" + }, + "ListRoutingControls": { + "name": "ListRoutingControls", + "http": { + "method": "GET", + "requestUri": "/controlpanel/{ControlPanelArn}/routingcontrols", + "responseCode": 200 + }, + "input": { + "shape": "ListRoutingControlsRequest" + }, + "output": { + "shape": "ListRoutingControlsResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + } + ], + "documentation": "

Returns an array of routing controls for a control panel. A routing control is an Amazon Route 53 Application Recovery Controller construct that has one of two states: ON and OFF. You can map the routing control state to the state of an Amazon Route 53 health check, which can be used to control routing.

" + }, + "ListSafetyRules": { + "name": "ListSafetyRules", + "http": { + "method": "GET", + "requestUri": "/controlpanel/{ControlPanelArn}/safetyrules", + "responseCode": 200 + }, + "input": { + "shape": "ListSafetyRulesRequest" + }, + "output": { + "shape": "ListSafetyRulesResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + } + ], + "documentation": "

List the safety rules (the assertion rules and gating rules) that you've defined for the routing controls in a control panel.

" + }, + "UpdateControlPanel": { + "name": "UpdateControlPanel", + "http": { + "method": "PUT", + "requestUri": "/controlpanel", + "responseCode": 200 + }, + "input": { + "shape": "UpdateControlPanelRequest" + }, + "output": { + "shape": "UpdateControlPanelResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ConflictException", + "documentation": "

409 response - ConflictException. You might be using a predefined variable.

" + } + ], + "documentation": "

Updates a control panel. The only update you can make to a control panel is to change the name of the control panel.

" + }, + "UpdateRoutingControl": { + "name": "UpdateRoutingControl", + "http": { + "method": "PUT", + "requestUri": "/routingcontrol", + "responseCode": 200 + }, + "input": { + "shape": "UpdateRoutingControlRequest" + }, + "output": { + "shape": "UpdateRoutingControlResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response - AccessDeniedException. You do not hace sufficient access to perform this action.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response - LimitExceededException or TooManyRequestsException.

" + }, + { + "shape": "ConflictException", + "documentation": "

409 response - ConflictException. You might be using a predefined variable.

" + } + ], + "documentation": "

Updates a routing control. You can only update the name of the routing control. To get or update the routing control state, see the Recovery Cluster (data plane) API actions for Amazon Route 53 Application Recovery Controller.

" + }, + "UpdateSafetyRule": { + "name": "UpdateSafetyRule", + "http": { + "method": "PUT", + "requestUri": "/safetyrule", + "responseCode": 200 + }, + "input": { + "shape": "UpdateSafetyRuleRequest" + }, + "output": { + "shape": "UpdateSafetyRuleResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "ValidationException", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + } + ], + "documentation": "

Update a safety rule (an assertion rule or gating rule) for the routing controls in a control panel. You can only update the name and the waiting period for a safety rule. To make other updates, delete the safety rule and create a new safety rule.

" + } + }, + "shapes": { + "AccessDeniedException": { + "type": "structure", + "documentation": "

403 response - AccessDeniedException. You do not have sufficient access to perform this action.

", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 403 + } + }, + "AssertionRule": { + "type": "structure", + "members": { + "AssertedControls": { + "shape": "__listOf__string", + "documentation": "

The routing controls that are part of transactions that are evaluated to determine if a request to change a routing control state is allowed. For example, you might include three routing controls, one for each of three Amazon Web Services Regions.

" + }, + "ControlPanelArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the control panel.

" + }, + "Name": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

Name of the assertion rule. You can use any non-white space character in the name.

" + }, + "RuleConfig": { + "shape": "RuleConfig", + "documentation": "

The criteria that you set for specific assertion controls (routing controls) that designate how many controls must be enabled as the result of a transaction. For example, if you have three assertion controls, you might specify atleast 2 for your rule configuration. This means that at least two assertion controls must be enabled, so that at least two Amazon Web Services Regions are enabled.

" + }, + "SafetyRuleArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the assertion rule.

" + }, + "Status": { + "shape": "Status", + "documentation": "

The deployment status of an assertion rule. Status can be one of the following: PENDING, DEPLOYED, PENDING_DELETION.

" + }, + "WaitPeriodMs": { + "shape": "__integer", + "documentation": "

An evaluation period, in milliseconds (ms), during which any request against the target routing controls will fail. This helps prevent \"flapping\" of state. The wait period is 5000 ms by default, but you can choose a custom value.

" + } + }, + "documentation": "

An assertion rule enforces that, when a routing control state is changed, the criteria set by the rule configuration is met. Otherwise, the change to the routing control is not accepted.

", + "required": [ + "Status", + "ControlPanelArn", + "SafetyRuleArn", + "AssertedControls", + "RuleConfig", + "WaitPeriodMs", + "Name" + ] + }, + "AssertionRuleUpdate": { + "type": "structure", + "members": { + "Name": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name of the assertion rule. You can use any non-white space character in the name.

" + }, + "SafetyRuleArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the assertion rule.

" + }, + "WaitPeriodMs": { + "shape": "__integer", + "documentation": "

An evaluation period, in milliseconds (ms), during which any request against the target routing controls will fail. This helps prevent \"flapping\" of state. The wait period is 5000 ms by default, but you can choose a custom value.

" + } + }, + "documentation": "

An update to an assertion rule. You can update the name or the evaluation period (wait period). If you don't specify one of the items to update, the item is unchanged.

", + "required": [ + "SafetyRuleArn", + "WaitPeriodMs", + "Name" + ] + }, + "Cluster": { + "type": "structure", + "documentation": "

A cluster is a set of five consensus-forming Regional endpoints that represent the infrastructure that hosts your routing controls. Typically, you host together on one cluster all of the routing controls for your applications.

", + "members": { + "ClusterArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the cluster.

" + }, + "ClusterEndpoints": { + "shape": "__listOfClusterEndpoint", + "documentation": "

Endpoints for a cluster. Specify one of these endpoints when you want to set or retrieve a routing control state in the cluster.

To get or update the routing control state, see the Amazon Route 53 Application Recovery Controller Cluster (Data Plane) Actions.

" + }, + "Name": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name of the cluster.

" + }, + "Status": { + "shape": "Status", + "documentation": "

Deployment status of a resource. Status can be one of the following: PENDING, DEPLOYED, PENDING_DELETION.

" + } + } + }, + "ClusterEndpoint": { + "type": "structure", + "members": { + "Endpoint": { + "shape": "__stringMin1Max128", + "documentation": "

A cluster endpoint. Specify an endpoint and Amazon Web Services Region when you want to set or retrieve a routing control state in the cluster.

To get or update the routing control state, see the Amazon Route 53 Application Recovery Controller Cluster (Data Plane) Actions.

" + }, + "Region": { + "shape": "__stringMin1Max32", + "documentation": "

The Amazon Web Services Region for a cluster endpoint.

" + } + }, + "documentation": "

A cluster endpoint. Specify an endpoint when you want to set or retrieve a routing control state in the cluster.

" + }, + "ConflictException": { + "type": "structure", + "documentation": "

409 response - ConflictException.

", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 409 + } + }, + "ControlPanel": { + "type": "structure", + "members": { + "ClusterArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the cluster that includes the control panel.

" + }, + "ControlPanelArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the control panel.

" + }, + "DefaultControlPanel": { + "shape": "__boolean", + "documentation": "

A flag that Amazon Route 53 Application Recovery Controller sets to true to designate the default control panel for a cluster. When you create a cluster, Amazon Route 53 Application Recovery Controller creates a control panel, and sets this flag for that control panel. If you create a control panel yourself, this flag is set to false.

" + }, + "Name": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name of the control panel. You can use any non-white space character in the name.

" + }, + "RoutingControlCount": { + "shape": "__integer", + "documentation": "

The number of routing controls in the control panel.

" + }, + "Status": { + "shape": "Status", + "documentation": "

The deployment status of control panel. Status can be one of the following: PENDING, DEPLOYED, PENDING_DELETION.

" + } + }, + "documentation": "

A control panel represents a group of routing controls that can be changed together in a single transaction.

" + }, + "CreateClusterRequest": { + "type": "structure", + "members": { + "ClientToken": { + "shape": "__stringMax64", + "idempotencyToken": true, + "documentation": "

Unique client idempotency token.

" + }, + "ClusterName": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name of the cluster.

" + } + }, + "documentation": "

Creates a cluster.

", + "required": [ + "ClusterName" + ] + }, + "CreateClusterResponse": { + "type": "structure", + "members": { + "Cluster": { + "shape": "Cluster", + "documentation": "

The cluster that was created.

" + } + } + }, + "CreateControlPanelRequest": { + "type": "structure", + "members": { + "ClientToken": { + "shape": "__stringMax64", + "documentation": "

Unique client idempotency token.

", + "idempotencyToken": true + }, + "ClusterArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the cluster for the control panel.

" + }, + "ControlPanelName": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name of the control panel.

" + } + }, + "documentation": "

The details of the control panel that you're creating.

", + "required": [ + "ClusterArn", + "ControlPanelName" + ] + }, + "CreateControlPanelResponse": { + "type": "structure", + "members": { + "ControlPanel": { + "shape": "ControlPanel", + "documentation": "

Information about a control panel.

" + } + } + }, + "CreateRoutingControlRequest": { + "type": "structure", + "members": { + "ClientToken": { + "shape": "__stringMax64", + "documentation": "

Unique client idempotency token.

", + "idempotencyToken": true + }, + "ClusterArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the cluster that includes the routing control.

" + }, + "ControlPanelArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the control panel that includes the routing control.

" + }, + "RoutingControlName": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name of the routing control.

" + } + }, + "documentation": "

The details of the routing control that you're creating.

", + "required": [ + "ClusterArn", + "RoutingControlName" + ] + }, + "CreateRoutingControlResponse": { + "type": "structure", + "members": { + "RoutingControl": { + "shape": "RoutingControl", + "documentation": "

The routing control that is created.

" + } + } + }, + "CreateSafetyRuleRequest": { + "type": "structure", + "members": { + "AssertionRule": { + "shape": "NewAssertionRule" + }, + "ClientToken": { + "shape": "__stringMax64", + "documentation": "

Unique client idempotency token.

", + "idempotencyToken": true + }, + "GatingRule": { + "shape": "NewGatingRule" + } + }, + "documentation": "

The request body that you include when you create a safety rule.

" + }, + "CreateSafetyRuleResponse": { + "type": "structure", + "members": { + "AssertionRule": { + "shape": "AssertionRule" + }, + "GatingRule": { + "shape": "GatingRule" + } + } + }, + "DeleteClusterRequest": { + "type": "structure", + "members": { + "ClusterArn": { + "shape": "__string", + "location": "uri", + "locationName": "ClusterArn", + "documentation": "

The Amazon Resource Name (ARN) of the cluster that you're deleting.

" + } + }, + "required": [ + "ClusterArn" + ] + }, + "DeleteClusterResponse": { + "type": "structure", + "members": {} + }, + "DeleteControlPanelRequest": { + "type": "structure", + "members": { + "ControlPanelArn": { + "shape": "__string", + "location": "uri", + "locationName": "ControlPanelArn", + "documentation": "

The Amazon Resource Name (ARN) of the control panel that you're deleting.

" + } + }, + "required": [ + "ControlPanelArn" + ] + }, + "DeleteControlPanelResponse": { + "type": "structure", + "members": {} + }, + "DeleteRoutingControlRequest": { + "type": "structure", + "members": { + "RoutingControlArn": { + "shape": "__string", + "location": "uri", + "locationName": "RoutingControlArn", + "documentation": "

The Amazon Resource Name (ARN) of the routing control that you're deleting.

" + } + }, + "required": [ + "RoutingControlArn" + ] + }, + "DeleteRoutingControlResponse": { + "type": "structure", + "members": {} + }, + "DeleteSafetyRuleRequest": { + "type": "structure", + "members": { + "SafetyRuleArn": { + "shape": "__string", + "location": "uri", + "locationName": "SafetyRuleArn", + "documentation": "

The request body that you include when you update a safety rule.

" + } + }, + "required": [ + "SafetyRuleArn" + ] + }, + "DeleteSafetyRuleResponse": { + "type": "structure", + "members": {} + }, + "DescribeClusterRequest": { + "type": "structure", + "members": { + "ClusterArn": { + "shape": "__string", + "location": "uri", + "locationName": "ClusterArn", + "documentation": "

The Amazon Resource Name (ARN) of the cluster that you're getting details for.

" + } + }, + "required": [ + "ClusterArn" + ] + }, + "DescribeClusterResponse": { + "type": "structure", + "members": { + "Cluster": { + "shape": "Cluster", + "documentation": "

The cluster for the DescribeCluster request.

" + } + } + }, + "DescribeControlPanelRequest": { + "type": "structure", + "members": { + "ControlPanelArn": { + "shape": "__string", + "location": "uri", + "locationName": "ControlPanelArn", + "documentation": "

The Amazon Resource Name (ARN) of the control panel that you're getting details for.

" + } + }, + "required": [ + "ControlPanelArn" + ] + }, + "DescribeControlPanelResponse": { + "type": "structure", + "members": { + "ControlPanel": { + "shape": "ControlPanel", + "documentation": "

Information about the control panel.

" + } + } + }, + "DescribeRoutingControlRequest": { + "type": "structure", + "members": { + "RoutingControlArn": { + "shape": "__string", + "location": "uri", + "locationName": "RoutingControlArn", + "documentation": "

The Amazon Resource Name (ARN) of the routing control that you're getting details for.

" + } + }, + "required": [ + "RoutingControlArn" + ] + }, + "DescribeRoutingControlResponse": { + "type": "structure", + "members": { + "RoutingControl": { + "shape": "RoutingControl", + "documentation": "

Information about the routing control.

" + } + } + }, + "DescribeSafetyRuleRequest": { + "type": "structure", + "members": { + "SafetyRuleArn": { + "shape": "__string", + "location": "uri", + "locationName": "SafetyRuleArn", + "documentation": "

The request body that you include when you update a safety rule.

" + } + }, + "required": [ + "SafetyRuleArn" + ] + }, + "DescribeSafetyRuleResponse": { + "type": "structure", + "members": { + "AssertionRule": { + "shape": "AssertionRule" + }, + "GatingRule": { + "shape": "GatingRule" + } + } + }, + "GatingRule": { + "type": "structure", + "members": { + "ControlPanelArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the control panel.

" + }, + "GatingControls": { + "shape": "__listOf__string", + "documentation": "

The gating controls for the gating rule. That is, routing controls that are evaluated by the rule configuration that you specify.

" + }, + "Name": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name for the gating rule.

" + }, + "RuleConfig": { + "shape": "RuleConfig", + "documentation": "

The criteria that you set for specific gating controls (routing controls) that designates how many controls must be enabled to allow you to change (set or unset) the target controls.

" + }, + "SafetyRuleArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the gating rule.

" + }, + "Status": { + "shape": "Status", + "documentation": "

The deployment status of a gating rule. Status can be one of the following: PENDING, DEPLOYED, PENDING_DELETION.

" + }, + "TargetControls": { + "shape": "__listOf__string", + "documentation": "

Routing controls that can only be set or unset if the specified RuleConfig evaluates to true for the specified GatingControls. For example, say you have three gating controls, one for each of three Amazon Web Services Regions. Now you specify ATLEAST 2 as your RuleConfig. With these settings, you can only change (set or unset) the routing controls that you have specified as TargetControls if that rule evaluates to true.

In other words, your ability to change the routing controls that you have specified as TargetControls is gated by the rule that you set for the routing controls in GatingControls.

" + }, + "WaitPeriodMs": { + "shape": "__integer", + "documentation": "

An evaluation period, in milliseconds (ms), during which any request against the target routing controls will fail. This helps prevent \"flapping\" of state. The wait period is 5000 ms by default, but you can choose a custom value.

" + } + }, + "documentation": "

A gating rule verifies that a set of gating controls evaluates as true, based on a rule configuration that you specify. If the gating rule evaluates to true, Amazon Route 53 Application Recovery Controller allows a set of routing control state changes to run and complete against the set of target controls.

", + "required": [ + "Status", + "TargetControls", + "ControlPanelArn", + "SafetyRuleArn", + "GatingControls", + "RuleConfig", + "WaitPeriodMs", + "Name" + ] + }, + "GatingRuleUpdate": { + "type": "structure", + "members": { + "Name": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name for the gating rule.

" + }, + "SafetyRuleArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the gating rule.

" + }, + "WaitPeriodMs": { + "shape": "__integer", + "documentation": "

An evaluation period, in milliseconds (ms), during which any request against the target routing controls will fail. This helps prevent \"flapping\" of state. The wait period is 5000 ms by default, but you can choose a custom value.

" + } + }, + "documentation": "

Update to a gating rule. You can update the name or the evaluation period (wait period). If you don't specify one of the items to update, the item is unchanged.

", + "required": [ + "SafetyRuleArn", + "WaitPeriodMs", + "Name" + ] + }, + "InternalServerException": { + "type": "structure", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 500 + } + }, + "ListAssociatedRoute53HealthChecksRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "MaxResults", + "documentation": "

The number of objects that you want to return with this call.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "NextToken", + "documentation": "

The token that identifies which batch of results you want to see.

" + }, + "RoutingControlArn": { + "shape": "__string", + "location": "uri", + "locationName": "RoutingControlArn", + "documentation": "

The Amazon Resource Name (ARN) of the routing control that you're getting details for.

" + } + }, + "required": [ + "RoutingControlArn" + ] + }, + "ListAssociatedRoute53HealthChecksResponse": { + "type": "structure", + "members": { + "HealthCheckIds": { + "shape": "__listOf__string", + "documentation": "

Identifiers for the health checks.

" + }, + "NextToken": { + "shape": "__stringMax8096", + "documentation": "

The token that identifies which batch of results you want to see.

" + } + } + }, + "ListClustersRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "MaxResults", + "documentation": "

The number of objects that you want to return with this call.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "NextToken", + "documentation": "

The token that identifies which batch of results you want to see.

" + } + } + }, + "ListClustersResponse": { + "type": "structure", + "members": { + "Clusters": { + "shape": "__listOfCluster", + "documentation": "

An array of the clusters in an account.

" + }, + "NextToken": { + "shape": "__stringMax8096", + "documentation": "

The token that identifies which batch of results you want to see.

" + } + } + }, + "ListControlPanelsRequest": { + "type": "structure", + "members": { + "ClusterArn": { + "shape": "__string", + "location": "querystring", + "locationName": "ClusterArn", + "documentation": "

The Amazon Resource Name (ARN) of a cluster.

" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "MaxResults", + "documentation": "

The number of objects that you want to return with this call.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "NextToken", + "documentation": "

The token that identifies which batch of results you want to see.

" + } + } + }, + "ListControlPanelsResponse": { + "type": "structure", + "members": { + "ControlPanels": { + "shape": "__listOfControlPanel", + "documentation": "

The result of a successful ListControlPanel request.

" + }, + "NextToken": { + "shape": "__stringMax8096", + "documentation": "

The token that identifies which batch of results you want to see.

" + } + } + }, + "ListRoutingControlsRequest": { + "type": "structure", + "members": { + "ControlPanelArn": { + "shape": "__string", + "location": "uri", + "locationName": "ControlPanelArn", + "documentation": "

The Amazon Resource Name (ARN) of the control panel that you're getting routing control details for.

" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "MaxResults", + "documentation": "

The number of objects that you want to return with this call.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "NextToken", + "documentation": "

The token that identifies which batch of results you want to see.

" + } + }, + "required": [ + "ControlPanelArn" + ] + }, + "ListRoutingControlsResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__stringMax8096", + "documentation": "

The token that identifies which batch of results you want to see.

" + }, + "RoutingControls": { + "shape": "__listOfRoutingControl", + "documentation": "

An array of routing controls.

" + } + } + }, + "ListSafetyRulesRequest": { + "type": "structure", + "members": { + "ControlPanelArn": { + "shape": "__string", + "location": "uri", + "locationName": "ControlPanelArn", + "documentation": "

The Amazon Resource Name (ARN) of the control panel that you're getting details for.

" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "MaxResults", + "documentation": "

The number of objects that you want to return with this call.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "NextToken", + "documentation": "

The token that identifies which batch of results you want to see.

" + } + }, + "required": [ + "ControlPanelArn" + ] + }, + "ListSafetyRulesResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__stringMax8096", + "documentation": "

The token that identifies which batch of results you want to see.

" + }, + "SafetyRules": { + "shape": "__listOfRule", + "documentation": "

The list of safety rules in a control panel.

" + } + } + }, + "MaxResults": { + "type": "integer", + "min": 1, + "max": 1000 + }, + "NewAssertionRule": { + "type": "structure", + "members": { + "AssertedControls": { + "shape": "__listOf__string", + "documentation": "

The routing controls that are part of transactions that are evaluated to determine if a request to change a routing control state is allowed. For example, you might include three routing controls, one for each of three Amazon Web Services Regions.

" + }, + "ControlPanelArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) for the control panel.

" + }, + "Name": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name of the assertion rule. You can use any non-white space character in the name.

" + }, + "RuleConfig": { + "shape": "RuleConfig", + "documentation": "

The criteria that you set for specific assertion controls (routing controls) that designate how many controls must be enabled as the result of a transaction. For example, if you have three assertion controls, you might specify atleast 2 for your rule configuration. This means that at least two assertion controls must be enabled, so that at least two Amazon Web Services Regions are enabled.

" + }, + "WaitPeriodMs": { + "shape": "__integer", + "documentation": "

An evaluation period, in milliseconds (ms), during which any request against the target routing controls will fail. This helps prevent \"flapping\" of state. The wait period is 5000 ms by default, but you can choose a custom value.

" + } + }, + "documentation": "

A new assertion rule for a control panel.

", + "required": [ + "ControlPanelArn", + "AssertedControls", + "RuleConfig", + "WaitPeriodMs", + "Name" + ] + }, + "NewGatingRule": { + "type": "structure", + "members": { + "ControlPanelArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the control panel.

" + }, + "GatingControls": { + "shape": "__listOf__string", + "documentation": "

The gating controls for the new gating rule. That is, routing controls that are evaluated by the rule configuration that you specify.

" + }, + "Name": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name for the new gating rule.

" + }, + "RuleConfig": { + "shape": "RuleConfig", + "documentation": "

The criteria that you set for specific gating controls (routing controls) that designates how many controls must be enabled to allow you to change (set or unset) the target controls.

" + }, + "TargetControls": { + "shape": "__listOf__string", + "documentation": "

Routing controls that can only be set or unset if the specified RuleConfig evaluates to true for the specified GatingControls. For example, say you have three gating controls, one for each of three Amazon Web Services Regions. Now you specify AtLeast 2 as your RuleConfig. With these settings, you can only change (set or unset) the routing controls that you have specified as TargetControls if that rule evaluates to true.

In other words, your ability to change the routing controls that you have specified as TargetControls is gated by the rule that you set for the routing controls in GatingControls.

" + }, + "WaitPeriodMs": { + "shape": "__integer", + "documentation": "

An evaluation period, in milliseconds (ms), during which any request against the target routing controls will fail. This helps prevent \"flapping\" of state. The wait period is 5000 ms by default, but you can choose a custom value.

" + } + }, + "documentation": "

A new gating rule for a control panel.

", + "required": [ + "TargetControls", + "ControlPanelArn", + "GatingControls", + "RuleConfig", + "WaitPeriodMs", + "Name" + ] + }, + "ResourceNotFoundException": { + "type": "structure", + "documentation": "

404 response - The query string contains a syntax error or resource not found.

", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 404 + } + }, + "RoutingControl": { + "type": "structure", + "members": { + "ControlPanelArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the control panel that includes the routing control.

" + }, + "Name": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name of the routing control.

" + }, + "RoutingControlArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the routing control.

" + }, + "Status": { + "shape": "Status", + "documentation": "

The deployment status of a routing control. Status can be one of the following: PENDING, DEPLOYED, PENDING_DELETION.

" + } + }, + "documentation": "

A routing control has one of two states: ON and OFF. You can map the routing control state to the state of an Amazon Route 53 health check, which can be used to control traffic routing.

" + }, + "Rule": { + "type": "structure", + "members": { + "ASSERTION": { + "shape": "AssertionRule", + "documentation": "

An assertion rule enforces that, when a routing control state is changed, the criteria set by the rule configuration is met. Otherwise, the change to the routing control is not accepted.

" + }, + "GATING": { + "shape": "GatingRule", + "documentation": "

A gating rule verifies that a set of gating controls evaluates as true, based on a rule configuration that you specify. If the gating rule evaluates to true, Amazon Route 53 Application Recovery Controller allows a set of routing control state changes to run and complete against the set of target controls.

" + } + }, + "documentation": "

A safety rule. A safety rule can be an assertion rule or a gating rule.

" + }, + "RuleConfig": { + "type": "structure", + "members": { + "Inverted": { + "shape": "__boolean", + "documentation": "

Logical negation of the rule. If the rule would usually evaluate true, it's evaluated as false, and vice versa.

" + }, + "Threshold": { + "shape": "__integer", + "documentation": "

The value of N, when you specify an ATLEAST rule type. That is, Threshold is the number of controls that must be set when you specify an ATLEAST type.

" + }, + "Type": { + "shape": "RuleType", + "documentation": "

A rule can be one of the following: ATLEAST, AND, or OR.

" + } + }, + "documentation": "

The rule configuration for an assertion rule. That is, the criteria that you set for specific assertion controls (routing controls) that specify how many controls must be enabled after a transaction completes.

", + "required": [ + "Type", + "Inverted", + "Threshold" + ] + }, + "RuleType": { + "type": "string", + "documentation": "

An enumerated type that determines how the evaluated rules are processed. RuleType can be one of the following:

ATLEAST - At least N routing controls must be set. You specify N as the Threshold in the rule configuration.

AND - All routing controls must be set. This is a shortcut for \"At least N,\" where N is the total number of controls in the rule.

OR - Any control must be set. This is a shortcut for \"At least N,\" where N is 1.

", + "enum": [ + "ATLEAST", + "AND", + "OR" + ] + }, + "ServiceQuotaExceededException": { + "type": "structure", + "documentation": "

402 response

", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 402 + } + }, + "Status": { + "type": "string", + "documentation": "

The deployment status of a resource. Status can be one of the following:

PENDING: Amazon Route 53 Application Recovery Controller is creating the resource.

DEPLOYED: The resource is deployed and ready to use.

PENDING_DELETION: Amazon Route 53 Application Recovery Controller is deleting the resource.

", + "enum": [ + "PENDING", + "DEPLOYED", + "PENDING_DELETION" + ] + }, + "ThrottlingException": { + "type": "structure", + "documentation": "

429 response - ThrottlingException.

", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 429 + } + }, + "UpdateControlPanelRequest": { + "type": "structure", + "members": { + "ControlPanelArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the control panel.

" + }, + "ControlPanelName": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name of the control panel.

" + } + }, + "documentation": "

The details of the control panel that you're updating.

", + "required": [ + "ControlPanelArn", + "ControlPanelName" + ] + }, + "UpdateControlPanelResponse": { + "type": "structure", + "members": { + "ControlPanel": { + "shape": "ControlPanel", + "documentation": "

The control panel to update.

" + } + } + }, + "UpdateRoutingControlRequest": { + "type": "structure", + "members": { + "RoutingControlArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the routing control.

" + }, + "RoutingControlName": { + "shape": "__stringMin1Max64PatternS", + "documentation": "

The name of the routing control.

" + } + }, + "documentation": "

The details of the routing control that you're updating.

", + "required": [ + "RoutingControlName", + "RoutingControlArn" + ] + }, + "UpdateRoutingControlResponse": { + "type": "structure", + "members": { + "RoutingControl": { + "shape": "RoutingControl", + "documentation": "

The routing control that was updated.

" + } + } + }, + "UpdateSafetyRuleRequest": { + "type": "structure", + "members": { + "AssertionRuleUpdate": { + "shape": "AssertionRuleUpdate" + }, + "GatingRuleUpdate": { + "shape": "GatingRuleUpdate" + } + } + }, + "UpdateSafetyRuleResponse": { + "type": "structure", + "members": { + "AssertionRule": { + "shape": "AssertionRule" + }, + "GatingRule": { + "shape": "GatingRule" + } + } + }, + "ValidationException": { + "type": "structure", + "documentation": "

400 response - Multiple causes. For example, you might have a malformed query string and input parameter might be out of range, or you used parameters together incorrectly.

", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 400 + } + }, + "__boolean": { + "type": "boolean" + }, + "__double": { + "type": "double" + }, + "__integer": { + "type": "integer" + }, + "__listOfCluster": { + "type": "list", + "member": { + "shape": "Cluster" + } + }, + "__listOfClusterEndpoint": { + "type": "list", + "member": { + "shape": "ClusterEndpoint" + } + }, + "__listOfControlPanel": { + "type": "list", + "member": { + "shape": "ControlPanel" + } + }, + "__listOfRoutingControl": { + "type": "list", + "member": { + "shape": "RoutingControl" + } + }, + "__listOfRule": { + "type": "list", + "member": { + "shape": "Rule" + } + }, + "__listOf__string": { + "type": "list", + "member": { + "shape": "__string" + } + }, + "__long": { + "type": "long" + }, + "__string": { + "type": "string" + }, + "__stringMax64": { + "type": "string", + "max": 64 + }, + "__stringMax8096": { + "type": "string", + "max": 8096 + }, + "__stringMin1Max128": { + "type": "string", + "min": 1, + "max": 128 + }, + "__stringMin1Max32": { + "type": "string", + "min": 1, + "max": 32 + }, + "__stringMin1Max64PatternS": { + "type": "string", + "min": 1, + "max": 64, + "pattern": "^\\S+$" + }, + "__timestampIso8601": { + "type": "timestamp", + "timestampFormat": "iso8601" + }, + "__timestampUnix": { + "type": "timestamp", + "timestampFormat": "unixTimestamp" + } + }, + "documentation": "

Recovery Control Configuration API Reference for Amazon Route 53 Application Recovery Controller

" +} diff --git a/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/waiters-2.json b/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..1794757e6d14 --- /dev/null +++ b/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,152 @@ +{ + "version": 2, + "waiters": { + "ClusterCreated": { + "description": "Wait until a cluster is created", + "operation": "DescribeCluster", + "delay": 5, + "maxAttempts": 26, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "Cluster.Status", + "expected": "DEPLOYED" + }, + { + "state": "retry", + "matcher": "path", + "argument": "Cluster.Status", + "expected": "PENDING" + }, + { + "state": "retry", + "matcher": "status", + "expected": 500 + } + ] + }, + "ClusterDeleted": { + "description": "Wait for a cluster to be deleted", + "operation": "DescribeCluster", + "delay": 5, + "maxAttempts": 26, + "acceptors": [ + { + "state": "success", + "matcher": "status", + "expected": 404 + }, + { + "state": "retry", + "matcher": "path", + "argument": "Cluster.Status", + "expected": "PENDING_DELETION" + }, + { + "state": "retry", + "matcher": "status", + "expected": 500 + } + ] + }, + "ControlPanelCreated": { + "description": "Wait until a control panel is created", + "operation": "DescribeControlPanel", + "delay": 5, + "maxAttempts": 26, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "ControlPanel.Status", + "expected": "DEPLOYED" + }, + { + "state": "retry", + "matcher": "path", + "argument": "ControlPanel.Status", + "expected": "PENDING" + }, + { + "state": "retry", + "matcher": "status", + "expected": 500 + } + ] + }, + "ControlPanelDeleted": { + "description": "Wait until a control panel is deleted", + "operation": "DescribeControlPanel", + "delay": 5, + "maxAttempts": 26, + "acceptors": [ + { + "state": "success", + "matcher": "status", + "expected": 404 + }, + { + "state": "retry", + "matcher": "path", + "argument": "ControlPanel.Status", + "expected": "PENDING_DELETION" + }, + { + "state": "retry", + "matcher": "status", + "expected": 500 + } + ] + }, + "RoutingControlCreated": { + "description": "Wait until a routing control is created", + "operation": "DescribeRoutingControl", + "delay": 5, + "maxAttempts": 26, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "RoutingControl.Status", + "expected": "DEPLOYED" + }, + { + "state": "retry", + "matcher": "path", + "argument": "RoutingControl.Status", + "expected": "PENDING" + }, + { + "state": "retry", + "matcher": "status", + "expected": 500 + } + ] + }, + "RoutingControlDeleted": { + "description": "Wait for a routing control to be deleted", + "operation": "DescribeRoutingControl", + "delay": 5, + "maxAttempts": 26, + "acceptors": [ + { + "state": "success", + "matcher": "status", + "expected": 404 + }, + { + "state": "retry", + "matcher": "path", + "argument": "RoutingControl.Status", + "expected": "PENDING_DELETION" + }, + { + "state": "retry", + "matcher": "status", + "expected": 500 + } + ] + } + } +} diff --git a/services/route53recoveryreadiness/pom.xml b/services/route53recoveryreadiness/pom.xml new file mode 100644 index 000000000000..0f99c9e07473 --- /dev/null +++ b/services/route53recoveryreadiness/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.17.16-SNAPSHOT + + route53recoveryreadiness + AWS Java SDK :: Services :: Route53 Recovery Readiness + The AWS Java SDK for Route53 Recovery Readiness module holds the client classes that are used for + communicating with Route53 Recovery Readiness. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.route53recoveryreadiness + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/route53recoveryreadiness/src/main/resources/codegen-resources/paginators-1.json b/services/route53recoveryreadiness/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..4bcdba2fe99f --- /dev/null +++ b/services/route53recoveryreadiness/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,76 @@ +{ + "pagination" : { + "ListReadinessChecks" : { + "input_token" : "NextToken", + "output_token" : "NextToken", + "limit_key" : "MaxResults", + "result_key" : "ReadinessChecks" + }, + "ListResourceSets" : { + "input_token" : "NextToken", + "output_token" : "NextToken", + "limit_key" : "MaxResults", + "result_key" : "ResourceSets" + }, + "ListCells" : { + "input_token" : "NextToken", + "output_token" : "NextToken", + "limit_key" : "MaxResults", + "result_key" : "Cells" + }, + "ListRecoveryGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "RecoveryGroups" + }, + "ListRules": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Rules" + }, + "ListCrossAccountAuthorizations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "CrossAccountAuthorizations" + }, + "GetCellReadinessSummary": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ReadinessChecks", + "non_aggregate_keys": [ + "Readiness" + ] + }, + "GetRecoveryGroupReadinessSummary": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ReadinessChecks", + "non_aggregate_keys": [ + "Readiness" + ] + }, + "GetReadinessCheckStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Resources", + "non_aggregate_keys": [ + "Readiness", "Messages" + ] + }, + "GetReadinessCheckResourceStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Rules", + "non_aggregate_keys": [ + "Readiness" + ] + } + } +} diff --git a/services/route53recoveryreadiness/src/main/resources/codegen-resources/service-2.json b/services/route53recoveryreadiness/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..9c251eb1762d --- /dev/null +++ b/services/route53recoveryreadiness/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2770 @@ +{ + "metadata": { + "apiVersion": "2019-12-02", + "endpointPrefix": "route53-recovery-readiness", + "signingName": "route53-recovery-readiness", + "serviceFullName": "AWS Route53 Recovery Readiness", + "serviceId": "Route53 Recovery Readiness", + "protocol": "rest-json", + "jsonVersion": "1.1", + "uid": "route53-recovery-readiness-2019-12-02", + "signatureVersion": "v4" + }, + "operations": { + "CreateCell": { + "name": "CreateCell", + "http": { + "method": "POST", + "requestUri": "/cells", + "responseCode": 200 + }, + "input": { + "shape": "CreateCellRequest" + }, + "output": { + "shape": "CreateCellResponse", + "documentation": "Result of a CreateCell call" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ConflictException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Creates a new Cell." + }, + "CreateCrossAccountAuthorization": { + "name": "CreateCrossAccountAuthorization", + "http": { + "method": "POST", + "requestUri": "/crossaccountauthorizations", + "responseCode": 200 + }, + "input": { + "shape": "CreateCrossAccountAuthorizationRequest" + }, + "output": { + "shape": "CreateCrossAccountAuthorizationResponse", + "documentation": "Result of a CreateCrossAccountAuthorization call" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ConflictException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Create a new cross account readiness authorization." + }, + "CreateReadinessCheck": { + "name": "CreateReadinessCheck", + "http": { + "method": "POST", + "requestUri": "/readinesschecks", + "responseCode": 200 + }, + "input": { + "shape": "CreateReadinessCheckRequest" + }, + "output": { + "shape": "CreateReadinessCheckResponse", + "documentation": "Result of a CreateReadinessCheck call" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ConflictException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Creates a new Readiness Check." + }, + "CreateRecoveryGroup": { + "name": "CreateRecoveryGroup", + "http": { + "method": "POST", + "requestUri": "/recoverygroups", + "responseCode": 200 + }, + "input": { + "shape": "CreateRecoveryGroupRequest" + }, + "output": { + "shape": "CreateRecoveryGroupResponse", + "documentation": "Result of a CreateRecoveryGroup call" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ConflictException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Creates a new Recovery Group." + }, + "CreateResourceSet": { + "name": "CreateResourceSet", + "http": { + "method": "POST", + "requestUri": "/resourcesets", + "responseCode": 200 + }, + "input": { + "shape": "CreateResourceSetRequest" + }, + "output": { + "shape": "CreateResourceSetResponse", + "documentation": "Result of a CreateResourceSet call" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ConflictException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Creates a new Resource Set." + }, + "DeleteCell": { + "name": "DeleteCell", + "http": { + "method": "DELETE", + "requestUri": "/cells/{cellName}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteCellRequest" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Deletes an existing Cell." + }, + "DeleteCrossAccountAuthorization": { + "name": "DeleteCrossAccountAuthorization", + "http": { + "method": "DELETE", + "requestUri": "/crossaccountauthorizations/{crossAccountAuthorization}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteCrossAccountAuthorizationRequest" + }, + "output": { + "shape": "DeleteCrossAccountAuthorizationResponse", + "documentation": "Result of a DeleteCrossAccountAuthorization call" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Delete cross account readiness authorization" + }, + "DeleteReadinessCheck": { + "name": "DeleteReadinessCheck", + "http": { + "method": "DELETE", + "requestUri": "/readinesschecks/{readinessCheckName}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteReadinessCheckRequest" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Deletes an existing Readiness Check." + }, + "DeleteRecoveryGroup": { + "name": "DeleteRecoveryGroup", + "http": { + "method": "DELETE", + "requestUri": "/recoverygroups/{recoveryGroupName}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteRecoveryGroupRequest" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Deletes an existing Recovery Group." + }, + "DeleteResourceSet": { + "name": "DeleteResourceSet", + "http": { + "method": "DELETE", + "requestUri": "/resourcesets/{resourceSetName}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteResourceSetRequest" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Deletes an existing Resource Set." + }, + "GetArchitectureRecommendations": { + "name": "GetArchitectureRecommendations", + "http": { + "method": "GET", + "requestUri": "/recoverygroups/{recoveryGroupName}/architectureRecommendations", + "responseCode": 200 + }, + "input": { + "shape": "GetArchitectureRecommendationsRequest" + }, + "output": { + "shape": "GetArchitectureRecommendationsResponse", + "documentation": "Result of a GetArchitectureRecommendations call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns a collection of recommendations to improve resilliance and readiness check quality for a Recovery Group." + }, + "GetCell": { + "name": "GetCell", + "http": { + "method": "GET", + "requestUri": "/cells/{cellName}", + "responseCode": 200 + }, + "input": { + "shape": "GetCellRequest" + }, + "output": { + "shape": "GetCellResponse", + "documentation": "Result of a GetCell call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns information about a Cell." + }, + "GetCellReadinessSummary": { + "name": "GetCellReadinessSummary", + "http": { + "method": "GET", + "requestUri": "/cellreadiness/{cellName}", + "responseCode": 200 + }, + "input": { + "shape": "GetCellReadinessSummaryRequest" + }, + "output": { + "shape": "GetCellReadinessSummaryResponse", + "documentation": "Result of a GetCellReadinessSummary call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns information about readiness of a Cell." + }, + "GetReadinessCheck": { + "name": "GetReadinessCheck", + "http": { + "method": "GET", + "requestUri": "/readinesschecks/{readinessCheckName}", + "responseCode": 200 + }, + "input": { + "shape": "GetReadinessCheckRequest" + }, + "output": { + "shape": "GetReadinessCheckResponse", + "documentation": "Result of a GetReadinessCheck call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns information about a ReadinessCheck." + }, + "GetReadinessCheckResourceStatus": { + "name": "GetReadinessCheckResourceStatus", + "http": { + "method": "GET", + "requestUri": "/readinesschecks/{readinessCheckName}/resource/{resourceIdentifier}/status", + "responseCode": 200 + }, + "input": { + "shape": "GetReadinessCheckResourceStatusRequest" + }, + "output": { + "shape": "GetReadinessCheckResourceStatusResponse", + "documentation": "Result of a GetReadinessCheckResourceStatus call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns detailed information about the status of an individual resource within a Readiness Check's Resource Set." + }, + "GetReadinessCheckStatus": { + "name": "GetReadinessCheckStatus", + "http": { + "method": "GET", + "requestUri": "/readinesschecks/{readinessCheckName}/status", + "responseCode": 200 + }, + "input": { + "shape": "GetReadinessCheckStatusRequest" + }, + "output": { + "shape": "GetReadinessCheckStatusResponse", + "documentation": "Result of a GetReadinessCheckStatus call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns information about the status of a Readiness Check." + }, + "GetRecoveryGroup": { + "name": "GetRecoveryGroup", + "http": { + "method": "GET", + "requestUri": "/recoverygroups/{recoveryGroupName}", + "responseCode": 200 + }, + "input": { + "shape": "GetRecoveryGroupRequest" + }, + "output": { + "shape": "GetRecoveryGroupResponse", + "documentation": "Result of a GetRecoveryGroup call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns information about a Recovery Group." + }, + "GetRecoveryGroupReadinessSummary": { + "name": "GetRecoveryGroupReadinessSummary", + "http": { + "method": "GET", + "requestUri": "/recoverygroupreadiness/{recoveryGroupName}", + "responseCode": 200 + }, + "input": { + "shape": "GetRecoveryGroupReadinessSummaryRequest" + }, + "output": { + "shape": "GetRecoveryGroupReadinessSummaryResponse", + "documentation": "Result of a GetRecoveryGroupReadinessSummary call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns information about a Recovery Group." + }, + "GetResourceSet": { + "name": "GetResourceSet", + "http": { + "method": "GET", + "requestUri": "/resourcesets/{resourceSetName}", + "responseCode": 200 + }, + "input": { + "shape": "GetResourceSetRequest" + }, + "output": { + "shape": "GetResourceSetResponse", + "documentation": "Result of a GetResourceSets call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns information about a Resource Set." + }, + "ListCells": { + "name": "ListCells", + "http": { + "method": "GET", + "requestUri": "/cells", + "responseCode": 200 + }, + "input": { + "shape": "ListCellsRequest" + }, + "output": { + "shape": "ListCellsResponse", + "documentation": "Result of a ListCells call" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns a collection of Cells." + }, + "ListCrossAccountAuthorizations": { + "name": "ListCrossAccountAuthorizations", + "http": { + "method": "GET", + "requestUri": "/crossaccountauthorizations", + "responseCode": 200 + }, + "input": { + "shape": "ListCrossAccountAuthorizationsRequest" + }, + "output": { + "shape": "ListCrossAccountAuthorizationsResponse", + "documentation": "Result of a ListCrossAccountAuthorizations call" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns a collection of cross account readiness authorizations." + }, + "ListReadinessChecks": { + "name": "ListReadinessChecks", + "http": { + "method": "GET", + "requestUri": "/readinesschecks", + "responseCode": 200 + }, + "input": { + "shape": "ListReadinessChecksRequest" + }, + "output": { + "shape": "ListReadinessChecksResponse", + "documentation": "Result of a ListReadinessChecks call" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns a collection of Readiness Checks." + }, + "ListRecoveryGroups": { + "name": "ListRecoveryGroups", + "http": { + "method": "GET", + "requestUri": "/recoverygroups", + "responseCode": 200 + }, + "input": { + "shape": "ListRecoveryGroupsRequest" + }, + "output": { + "shape": "ListRecoveryGroupsResponse", + "documentation": "Result of a ListRecoveryGroups call" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns a collection of Recovery Groups." + }, + "ListResourceSets": { + "name": "ListResourceSets", + "http": { + "method": "GET", + "requestUri": "/resourcesets", + "responseCode": 200 + }, + "input": { + "shape": "ListResourceSetsRequest" + }, + "output": { + "shape": "ListResourceSetsResponse", + "documentation": "Result of a ListResourceSets call" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns a collection of Resource Sets." + }, + "ListRules": { + "name": "ListRules", + "http": { + "method": "GET", + "requestUri": "/rules", + "responseCode": 200 + }, + "input": { + "shape": "ListRulesRequest" + }, + "output": { + "shape": "ListRulesResponse", + "documentation": "Result of a ListRules call" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Returns a collection of rules that are applied as part of Readiness Checks." + }, + "ListTagsForResources": { + "name": "ListTagsForResources", + "http": { + "method": "GET", + "requestUri": "/tags/{resource-arn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourcesRequest" + }, + "output": { + "shape": "ListTagsForResourcesResponse", + "documentation": "200 response" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "requested resource was not found" + }, + { + "shape": "ValidationException", + "documentation": "an invalid request" + }, + { + "shape": "InternalServerException", + "documentation": "Internal service error" + } + ], + "documentation": "Returns a list of the tags assigned to the specified resource." + }, + "TagResource": { + "name": "TagResource", + "http": { + "method": "POST", + "requestUri": "/tags/{resource-arn}", + "responseCode": 200 + }, + "input": { + "shape": "TagResourceRequest" + }, + "output": { + "shape": "TagResourceResponse", + "documentation": "200 response" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "requested resource was not found" + }, + { + "shape": "ValidationException", + "documentation": "an invalid request" + }, + { + "shape": "InternalServerException", + "documentation": "Internal service error" + } + ], + "documentation": "Adds tags to the specified resource. You can specify one or more tags to add." + }, + "UntagResource": { + "name": "UntagResource", + "http": { + "method": "DELETE", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "UntagResourceRequest" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "requested resource was not found" + }, + { + "shape": "ValidationException", + "documentation": "an invalid request" + }, + { + "shape": "InternalServerException", + "documentation": "Internal service error" + } + ], + "documentation": "Removes tags from the specified resource. You can specify one or more tags to remove." + }, + "UpdateCell": { + "name": "UpdateCell", + "http": { + "method": "PUT", + "requestUri": "/cells/{cellName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateCellRequest" + }, + "output": { + "shape": "UpdateCellResponse", + "documentation": "Result of a UpdateCell call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Updates an existing Cell." + }, + "UpdateReadinessCheck": { + "name": "UpdateReadinessCheck", + "http": { + "method": "PUT", + "requestUri": "/readinesschecks/{readinessCheckName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateReadinessCheckRequest" + }, + "output": { + "shape": "UpdateReadinessCheckResponse", + "documentation": "Result of a UpdateReadinessChecks call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Updates an exisiting Readiness Check." + }, + "UpdateRecoveryGroup": { + "name": "UpdateRecoveryGroup", + "http": { + "method": "PUT", + "requestUri": "/recoverygroups/{recoveryGroupName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateRecoveryGroupRequest" + }, + "output": { + "shape": "UpdateRecoveryGroupResponse", + "documentation": "Result of a UpdateRecoveryGroups call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Updates an existing Recovery Group." + }, + "UpdateResourceSet": { + "name": "UpdateResourceSet", + "http": { + "method": "PUT", + "requestUri": "/resourcesets/{resourceSetName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateResourceSetRequest" + }, + "output": { + "shape": "UpdateResourceSetResponse", + "documentation": "Result of a UpdateResourceSets call" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ], + "documentation": "Updates an existing Resource Set." + } + }, + "shapes": { + "AccessDeniedException": { + "type": "structure", + "exception": true, + "error": { + "httpStatusCode": 403 + }, + "documentation": "User does not have sufficient access to perform this action.", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + } + }, + "CellOutput": { + "type": "structure", + "members": { + "CellArn": { + "shape": "__stringMax256", + "locationName": "cellArn", + "documentation": "The arn for the Cell" + }, + "CellName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "cellName", + "documentation": "The name of the Cell" + }, + "Cells": { + "shape": "__listOf__string", + "locationName": "cells", + "documentation": "A list of Cell arns" + }, + "ParentReadinessScopes": { + "shape": "__listOf__string", + "locationName": "parentReadinessScopes", + "documentation": "A list of Cell ARNs and/or RecoveryGroup ARNs" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + }, + "documentation": "A Cell and its properties", + "required": [ + "ParentReadinessScopes", + "CellArn", + "CellName", + "Cells" + ] + }, + "ConflictException": { + "type": "structure", + "exception": true, + "error": { + "httpStatusCode": 409 + }, + "documentation": "Updating or deleting a resource can cause an inconsistent state.", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + } + }, + "CreateCellRequest": { + "type": "structure", + "members": { + "CellName": { + "shape": "__string", + "locationName": "cellName", + "documentation": "The name of the Cell to create" + }, + "Cells": { + "shape": "__listOf__string", + "locationName": "cells", + "documentation": "A list of Cell arns contained within this Cell (for use in nested Cells, e.g. regions within which AZs)" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + }, + "documentation": "The Cell to create", + "required": [ + "CellName" + ] + }, + "CreateCellResponse": { + "type": "structure", + "members": { + "CellArn": { + "shape": "__stringMax256", + "locationName": "cellArn", + "documentation": "The arn for the Cell" + }, + "CellName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "cellName", + "documentation": "The name of the Cell" + }, + "Cells": { + "shape": "__listOf__string", + "locationName": "cells", + "documentation": "A list of Cell arns" + }, + "ParentReadinessScopes": { + "shape": "__listOf__string", + "locationName": "parentReadinessScopes", + "documentation": "A list of Cell ARNs and/or RecoveryGroup ARNs" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "CreateCrossAccountAuthorizationRequest": { + "type": "structure", + "members": { + "CrossAccountAuthorization": { + "shape": "CrossAccountAuthorization", + "locationName": "crossAccountAuthorization", + "documentation": "The cross account authorization" + } + }, + "documentation": "The cross account authorization", + "required": [ + "CrossAccountAuthorization" + ] + }, + "CreateCrossAccountAuthorizationResponse": { + "type": "structure", + "members": { + "CrossAccountAuthorization": { + "shape": "CrossAccountAuthorization", + "locationName": "crossAccountAuthorization", + "documentation": "The cross account authorization" + } + } + }, + "CreateReadinessCheckRequest": { + "type": "structure", + "members": { + "ReadinessCheckName": { + "shape": "__string", + "locationName": "readinessCheckName", + "documentation": "The name of the ReadinessCheck to create" + }, + "ResourceSetName": { + "shape": "__string", + "locationName": "resourceSetName", + "documentation": "The name of the ResourceSet to check" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + }, + "documentation": "The ReadinessCheck to create", + "required": [ + "ResourceSetName", + "ReadinessCheckName" + ] + }, + "CreateReadinessCheckResponse": { + "type": "structure", + "members": { + "ReadinessCheckArn": { + "shape": "__stringMax256", + "locationName": "readinessCheckArn", + "documentation": "Arn associated with ReadinessCheck" + }, + "ReadinessCheckName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "readinessCheckName", + "documentation": "Name for a ReadinessCheck" + }, + "ResourceSet": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "resourceSet", + "documentation": "Name of the ResourceSet to be checked" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "CreateRecoveryGroupRequest": { + "type": "structure", + "members": { + "Cells": { + "shape": "__listOf__string", + "locationName": "cells", + "documentation": "A list of Cell arns" + }, + "RecoveryGroupName": { + "shape": "__string", + "locationName": "recoveryGroupName", + "documentation": "The name of the RecoveryGroup to create" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + }, + "documentation": "The RecoveryGroup to create", + "required": [ + "RecoveryGroupName" + ] + }, + "CreateRecoveryGroupResponse": { + "type": "structure", + "members": { + "Cells": { + "shape": "__listOf__string", + "locationName": "cells", + "documentation": "A list of Cell arns" + }, + "RecoveryGroupArn": { + "shape": "__stringMax256", + "locationName": "recoveryGroupArn", + "documentation": "The arn for the RecoveryGroup" + }, + "RecoveryGroupName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "recoveryGroupName", + "documentation": "The name of the RecoveryGroup" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "CreateResourceSetRequest": { + "type": "structure", + "members": { + "ResourceSetName": { + "shape": "__string", + "locationName": "resourceSetName", + "documentation": "The name of the ResourceSet to create" + }, + "ResourceSetType": { + "shape": "__stringPatternAWSAZaZ09AZaZ09", + "locationName": "resourceSetType", + "documentation": "AWS Resource type of the resources in the ResourceSet" + }, + "Resources": { + "shape": "__listOfResource", + "locationName": "resources", + "documentation": "A list of Resource objects" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + }, + "documentation": "The ResourceSet to create", + "required": [ + "ResourceSetType", + "ResourceSetName", + "Resources" + ] + }, + "CreateResourceSetResponse": { + "type": "structure", + "members": { + "ResourceSetArn": { + "shape": "__stringMax256", + "locationName": "resourceSetArn", + "documentation": "The arn for the ResourceSet" + }, + "ResourceSetName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "resourceSetName", + "documentation": "The name of the ResourceSet" + }, + "ResourceSetType": { + "shape": "__stringPatternAWSAZaZ09AZaZ09", + "locationName": "resourceSetType", + "documentation": "AWS Resource Type of the resources in the ResourceSet" + }, + "Resources": { + "shape": "__listOfResource", + "locationName": "resources", + "documentation": "A list of Resource objects" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "CrossAccountAuthorization": { + "type": "string", + "documentation": "A cross-account authorization, e.g. arn:aws:iam::123456789012:root" + }, + "DNSTargetResource": { + "type": "structure", + "members": { + "DomainName": { + "shape": "__string", + "locationName": "domainName", + "documentation": "The DNS Name that acts as ingress point to a portion of application" + }, + "HostedZoneArn": { + "shape": "__string", + "locationName": "hostedZoneArn", + "documentation": "The Hosted Zone ARN that contains the DNS record with the provided name of target resource." + }, + "RecordSetId": { + "shape": "__string", + "locationName": "recordSetId", + "documentation": "The R53 Set Id to uniquely identify a record given a Name and a Type" + }, + "RecordType": { + "shape": "__string", + "locationName": "recordType", + "documentation": "The Type of DNS Record of target resource" + }, + "TargetResource": { + "shape": "TargetResource", + "locationName": "targetResource" + } + }, + "documentation": "A component for DNS/Routing Control Readiness Checks" + }, + "DeleteCellRequest": { + "type": "structure", + "members": { + "CellName": { + "shape": "__string", + "location": "uri", + "locationName": "cellName", + "documentation": "The Cell to delete" + } + }, + "required": [ + "CellName" + ] + }, + "DeleteCrossAccountAuthorizationRequest": { + "type": "structure", + "members": { + "CrossAccountAuthorization": { + "shape": "__string", + "location": "uri", + "locationName": "crossAccountAuthorization", + "documentation": "The cross account authorization" + } + }, + "required": [ + "CrossAccountAuthorization" + ] + }, + "DeleteCrossAccountAuthorizationResponse": { + "type": "structure", + "members": {} + }, + "DeleteReadinessCheckRequest": { + "type": "structure", + "members": { + "ReadinessCheckName": { + "shape": "__string", + "location": "uri", + "locationName": "readinessCheckName", + "documentation": "The ReadinessCheck to delete" + } + }, + "required": [ + "ReadinessCheckName" + ] + }, + "DeleteRecoveryGroupRequest": { + "type": "structure", + "members": { + "RecoveryGroupName": { + "shape": "__string", + "location": "uri", + "locationName": "recoveryGroupName", + "documentation": "The RecoveryGroup to delete" + } + }, + "required": [ + "RecoveryGroupName" + ] + }, + "DeleteResourceSetRequest": { + "type": "structure", + "members": { + "ResourceSetName": { + "shape": "__string", + "location": "uri", + "locationName": "resourceSetName", + "documentation": "The ResourceSet to delete" + } + }, + "required": [ + "ResourceSetName" + ] + }, + "GetArchitectureRecommendationsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "Upper bound on number of records to return." + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection." + }, + "RecoveryGroupName": { + "shape": "__string", + "location": "uri", + "locationName": "recoveryGroupName", + "documentation": "Name of RecoveryGroup (top level resource) to be analyzed." + } + }, + "required": [ + "RecoveryGroupName" + ] + }, + "GetArchitectureRecommendationsResponse": { + "type": "structure", + "members": { + "LastAuditTimestamp": { + "shape": "LastAuditTimestamp", + "locationName": "lastAuditTimestamp", + "documentation": "The time a Recovery Group was last assessed for recommendations in UTC ISO-8601 format." + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection" + }, + "Recommendations": { + "shape": "__listOfRecommendation", + "locationName": "recommendations", + "documentation": "A list of recommendations for the customer's application" + } + } + }, + "GetCellReadinessSummaryRequest": { + "type": "structure", + "members": { + "CellName": { + "shape": "__string", + "location": "uri", + "locationName": "cellName", + "documentation": "The name of the Cell" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "Upper bound on number of records to return." + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "A token used to resume pagination from the end of a previous request." + } + }, + "required": [ + "CellName" + ] + }, + "GetCellReadinessSummaryResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection." + }, + "Readiness": { + "shape": "Readiness", + "locationName": "readiness", + "documentation": "The readiness at Cell level." + }, + "ReadinessChecks": { + "shape": "__listOfReadinessCheckSummary", + "locationName": "readinessChecks", + "documentation": "Summaries for the ReadinessChecks making up the Cell" + } + } + }, + "GetCellRequest": { + "type": "structure", + "members": { + "CellName": { + "shape": "__string", + "location": "uri", + "locationName": "cellName", + "documentation": "The Cell to get" + } + }, + "required": [ + "CellName" + ] + }, + "GetCellResponse": { + "type": "structure", + "members": { + "CellArn": { + "shape": "__stringMax256", + "locationName": "cellArn", + "documentation": "The arn for the Cell" + }, + "CellName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "cellName", + "documentation": "The name of the Cell" + }, + "Cells": { + "shape": "__listOf__string", + "locationName": "cells", + "documentation": "A list of Cell arns" + }, + "ParentReadinessScopes": { + "shape": "__listOf__string", + "locationName": "parentReadinessScopes", + "documentation": "A list of Cell ARNs and/or RecoveryGroup ARNs" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "GetReadinessCheckRequest": { + "type": "structure", + "members": { + "ReadinessCheckName": { + "shape": "__string", + "location": "uri", + "locationName": "readinessCheckName", + "documentation": "The ReadinessCheck to get" + } + }, + "required": [ + "ReadinessCheckName" + ] + }, + "GetReadinessCheckResourceStatusRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "Upper bound on number of records to return." + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "A token used to resume pagination from the end of a previous request." + }, + "ReadinessCheckName": { + "shape": "__string", + "location": "uri", + "locationName": "readinessCheckName", + "documentation": "The ReadinessCheck to get" + }, + "ResourceIdentifier": { + "shape": "__string", + "location": "uri", + "locationName": "resourceIdentifier", + "documentation": "The resource ARN or component Id to get" + } + }, + "required": [ + "ReadinessCheckName", + "ResourceIdentifier" + ] + }, + "GetReadinessCheckResourceStatusResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection." + }, + "Readiness": { + "shape": "Readiness", + "locationName": "readiness", + "documentation": "The readiness at rule level." + }, + "Rules": { + "shape": "__listOfRuleResult", + "locationName": "rules", + "documentation": "Details of the rules's results" + } + } + }, + "GetReadinessCheckResponse": { + "type": "structure", + "members": { + "ReadinessCheckArn": { + "shape": "__stringMax256", + "locationName": "readinessCheckArn", + "documentation": "Arn associated with ReadinessCheck" + }, + "ReadinessCheckName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "readinessCheckName", + "documentation": "Name for a ReadinessCheck" + }, + "ResourceSet": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "resourceSet", + "documentation": "Name of the ResourceSet to be checked" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "GetReadinessCheckStatusRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "Upper bound on number of records to return." + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "A token used to resume pagination from the end of a previous request." + }, + "ReadinessCheckName": { + "shape": "__string", + "location": "uri", + "locationName": "readinessCheckName", + "documentation": "The ReadinessCheck to get" + } + }, + "required": [ + "ReadinessCheckName" + ] + }, + "GetReadinessCheckStatusResponse": { + "type": "structure", + "members": { + "Messages": { + "shape": "__listOfMessage", + "locationName": "messages", + "documentation": "Top level messages for readiness check status" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection." + }, + "Readiness": { + "shape": "Readiness", + "locationName": "readiness", + "documentation": "The readiness at rule level." + }, + "Resources": { + "shape": "__listOfResourceResult", + "locationName": "resources", + "documentation": "Summary of resources's readiness" + } + } + }, + "GetRecoveryGroupReadinessSummaryRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "Upper bound on number of records to return." + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "A token used to resume pagination from the end of a previous request." + }, + "RecoveryGroupName": { + "shape": "__string", + "location": "uri", + "locationName": "recoveryGroupName", + "documentation": "The name of the RecoveryGroup" + } + }, + "required": [ + "RecoveryGroupName" + ] + }, + "GetRecoveryGroupReadinessSummaryResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection." + }, + "Readiness": { + "shape": "Readiness", + "locationName": "readiness", + "documentation": "The readiness at RecoveryGroup level." + }, + "ReadinessChecks": { + "shape": "__listOfReadinessCheckSummary", + "locationName": "readinessChecks", + "documentation": "Summaries for the ReadinessChecks making up the RecoveryGroup" + } + } + }, + "GetRecoveryGroupRequest": { + "type": "structure", + "members": { + "RecoveryGroupName": { + "shape": "__string", + "location": "uri", + "locationName": "recoveryGroupName", + "documentation": "The RecoveryGroup to get" + } + }, + "required": [ + "RecoveryGroupName" + ] + }, + "GetRecoveryGroupResponse": { + "type": "structure", + "members": { + "Cells": { + "shape": "__listOf__string", + "locationName": "cells", + "documentation": "A list of Cell arns" + }, + "RecoveryGroupArn": { + "shape": "__stringMax256", + "locationName": "recoveryGroupArn", + "documentation": "The arn for the RecoveryGroup" + }, + "RecoveryGroupName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "recoveryGroupName", + "documentation": "The name of the RecoveryGroup" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "GetResourceSetRequest": { + "type": "structure", + "members": { + "ResourceSetName": { + "shape": "__string", + "location": "uri", + "locationName": "resourceSetName", + "documentation": "The ResourceSet to get" + } + }, + "required": [ + "ResourceSetName" + ] + }, + "GetResourceSetResponse": { + "type": "structure", + "members": { + "ResourceSetArn": { + "shape": "__stringMax256", + "locationName": "resourceSetArn", + "documentation": "The arn for the ResourceSet" + }, + "ResourceSetName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "resourceSetName", + "documentation": "The name of the ResourceSet" + }, + "ResourceSetType": { + "shape": "__stringPatternAWSAZaZ09AZaZ09", + "locationName": "resourceSetType", + "documentation": "AWS Resource Type of the resources in the ResourceSet" + }, + "Resources": { + "shape": "__listOfResource", + "locationName": "resources", + "documentation": "A list of Resource objects" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "InternalServerException": { + "type": "structure", + "exception": true, + "error": { + "httpStatusCode": 500 + }, + "documentation": "An unexpected error occurred.", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + } + }, + "LastAuditTimestamp": { + "type": "timestamp", + "documentation": "The time a Recovery Group was last assessed for recommendations in UTC ISO-8601 format", + "timestampFormat": "iso8601" + }, + "ListCellsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "Upper bound on number of records to return." + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "A token used to resume pagination from the end of a previous request." + } + } + }, + "ListCellsResponse": { + "type": "structure", + "members": { + "Cells": { + "shape": "__listOfCellOutput", + "locationName": "cells", + "documentation": "A list of Cells" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection." + } + } + }, + "ListCrossAccountAuthorizationsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "Upper bound on number of records to return." + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "A token used to resume pagination from the end of a previous request." + } + } + }, + "ListCrossAccountAuthorizationsResponse": { + "type": "structure", + "members": { + "CrossAccountAuthorizations": { + "shape": "__listOfCrossAccountAuthorization", + "locationName": "crossAccountAuthorizations", + "documentation": "A list of CrossAccountAuthorizations" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection." + } + } + }, + "ListReadinessChecksRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "Upper bound on number of records to return." + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "A token used to resume pagination from the end of a previous request." + } + } + }, + "ListReadinessChecksResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection." + }, + "ReadinessChecks": { + "shape": "__listOfReadinessCheckOutput", + "locationName": "readinessChecks", + "documentation": "A list of ReadinessCheck associated with the account" + } + } + }, + "ListRecoveryGroupsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "Upper bound on number of records to return." + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "A token used to resume pagination from the end of a previous request." + } + } + }, + "ListRecoveryGroupsResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection." + }, + "RecoveryGroups": { + "shape": "__listOfRecoveryGroupOutput", + "locationName": "recoveryGroups", + "documentation": "A list of RecoveryGroups" + } + } + }, + "ListResourceSetsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "Upper bound on number of records to return." + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "A token used to resume pagination from the end of a previous request." + } + } + }, + "ListResourceSetsResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection." + }, + "ResourceSets": { + "shape": "__listOfResourceSetOutput", + "locationName": "resourceSets", + "documentation": "A list of ResourceSets associated with the account" + } + } + }, + "ListRulesOutput": { + "type": "structure", + "members": { + "ResourceType": { + "shape": "__stringMax64", + "locationName": "resourceType", + "documentation": "The resource type the rule applies to." + }, + "RuleDescription": { + "shape": "__stringMax256", + "locationName": "ruleDescription", + "documentation": "A description of the rule" + }, + "RuleId": { + "shape": "__stringMax64", + "locationName": "ruleId", + "documentation": "The Rule's ID." + } + }, + "documentation": "A collection of rules used in a readiness check", + "required": [ + "RuleDescription", + "RuleId", + "ResourceType" + ] + }, + "ListRulesRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "Upper bound on number of records to return." + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "A token used to resume pagination from the end of a previous request." + }, + "ResourceType": { + "shape": "__string", + "location": "querystring", + "locationName": "resourceType", + "documentation": "Filter parameter which specifies the rules to return given a resource type." + } + } + }, + "ListRulesResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection." + }, + "Rules": { + "shape": "__listOfListRulesOutput", + "locationName": "rules", + "documentation": "A list of rules" + } + } + }, + "ListTagsForResourcesRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn", + "documentation": "The Amazon Resource Name (ARN) for the resource. You can get this from the response to any request to the resource." + } + }, + "required": [ + "ResourceArn" + ] + }, + "ListTagsForResourcesResponse": { + "type": "structure", + "members": { + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "MaxResults": { + "type": "integer", + "min": 1, + "max": 1000 + }, + "Message": { + "type": "structure", + "members": { + "MessageText": { + "shape": "__string", + "locationName": "messageText", + "documentation": "The text of a readiness check message" + } + }, + "documentation": "Information relating to readiness check status" + }, + "NLBResource": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "An NLB resource arn" + } + }, + "documentation": "The NLB resource a DNS Target Resource points to" + }, + "R53ResourceRecord": { + "type": "structure", + "members": { + "DomainName": { + "shape": "__string", + "locationName": "domainName", + "documentation": "The DNS target name" + }, + "RecordSetId": { + "shape": "__string", + "locationName": "recordSetId", + "documentation": "The Resource Record set id" + } + }, + "documentation": "The Route 53 resource a DNS Target Resource record points to" + }, + "Readiness": { + "type": "string", + "documentation": "The readiness of an entire ReadinessCheck or an individual resource ARN.", + "enum": [ + "READY", + "NOT_READY", + "UNKNOWN", + "NOT_AUTHORIZED" + ] + }, + "ReadinessCheckOutput": { + "type": "structure", + "members": { + "ReadinessCheckArn": { + "shape": "__stringMax256", + "locationName": "readinessCheckArn", + "documentation": "Arn associated with ReadinessCheck" + }, + "ReadinessCheckName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "readinessCheckName", + "documentation": "Name for a ReadinessCheck" + }, + "ResourceSet": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "resourceSet", + "documentation": "Name of the ResourceSet to be checked" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + }, + "documentation": "A resource used for checking the readiness of a Resource Set", + "required": [ + "ReadinessCheckArn", + "ResourceSet" + ] + }, + "ReadinessCheckSummary": { + "type": "structure", + "members": { + "Readiness": { + "shape": "Readiness", + "locationName": "readiness", + "documentation": "The readiness of this ReadinessCheck" + }, + "ReadinessCheckName": { + "shape": "__string", + "locationName": "readinessCheckName", + "documentation": "The name of a ReadinessCheck which is part of the given RecoveryGroup or Cell" + } + }, + "documentation": "Summary of ReadinessCheck status, paginated in GetRecoveryGroupReadinessSummary and GetCellReadinessSummary" + }, + "ReadinessCheckTimestamp": { + "type": "timestamp", + "documentation": "The time the Cell was last checked for readiness, in ISO-8601 format, UTC.", + "timestampFormat": "iso8601" + }, + "Recommendation": { + "type": "structure", + "members": { + "RecommendationText": { + "shape": "__string", + "locationName": "recommendationText", + "documentation": "Guidance text for recommendation" + } + }, + "documentation": "Guidance for improving Recovery Group resilliancy", + "required": [ + "RecommendationText" + ] + }, + "RecoveryGroupOutput": { + "type": "structure", + "members": { + "Cells": { + "shape": "__listOf__string", + "locationName": "cells", + "documentation": "A list of Cell arns" + }, + "RecoveryGroupArn": { + "shape": "__stringMax256", + "locationName": "recoveryGroupArn", + "documentation": "The arn for the RecoveryGroup" + }, + "RecoveryGroupName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "recoveryGroupName", + "documentation": "The name of the RecoveryGroup" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + }, + "documentation": "A Recovery Group generally containing multiple Cells", + "required": [ + "RecoveryGroupArn", + "RecoveryGroupName", + "Cells" + ] + }, + "Resource": { + "type": "structure", + "members": { + "ComponentId": { + "shape": "__string", + "locationName": "componentId", + "documentation": "The component id of the resource, generated by the service when dnsTargetResource is used" + }, + "DnsTargetResource": { + "shape": "DNSTargetResource", + "locationName": "dnsTargetResource" + }, + "ReadinessScopes": { + "shape": "__listOf__string", + "locationName": "readinessScopes", + "documentation": "A list of RecoveryGroup ARNs and/or Cell ARNs that this resource is contained within." + }, + "ResourceArn": { + "shape": "__string", + "locationName": "resourceArn", + "documentation": "The ARN of the AWS resource, can be skipped if dnsTargetResource is used" + } + }, + "documentation": "The resource element of a ResourceSet" + }, + "ResourceNotFoundException": { + "type": "structure", + "exception": true, + "error": { + "httpStatusCode": 404 + }, + "documentation": "The requested resource does not exist.", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + } + }, + "ResourceResult": { + "type": "structure", + "members": { + "ComponentId": { + "shape": "__string", + "locationName": "componentId", + "documentation": "The component id of the resource" + }, + "LastCheckedTimestamp": { + "shape": "ReadinessCheckTimestamp", + "locationName": "lastCheckedTimestamp", + "documentation": "The time the resource was last checked for readiness, in ISO-8601 format, UTC." + }, + "Readiness": { + "shape": "Readiness", + "locationName": "readiness", + "documentation": "The readiness of the resource." + }, + "ResourceArn": { + "shape": "__string", + "locationName": "resourceArn", + "documentation": "The ARN of the resource" + } + }, + "documentation": "Result with status for an individual resource.", + "required": [ + "Readiness", + "LastCheckedTimestamp" + ] + }, + "ResourceSetOutput": { + "type": "structure", + "members": { + "ResourceSetArn": { + "shape": "__stringMax256", + "locationName": "resourceSetArn", + "documentation": "The arn for the ResourceSet" + }, + "ResourceSetName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "resourceSetName", + "documentation": "The name of the ResourceSet" + }, + "ResourceSetType": { + "shape": "__stringPatternAWSAZaZ09AZaZ09", + "locationName": "resourceSetType", + "documentation": "AWS Resource Type of the resources in the ResourceSet" + }, + "Resources": { + "shape": "__listOfResource", + "locationName": "resources", + "documentation": "A list of Resource objects" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + }, + "documentation": "A collection of resources of the same type", + "required": [ + "ResourceSetType", + "ResourceSetName", + "ResourceSetArn", + "Resources" + ] + }, + "RuleResult": { + "type": "structure", + "members": { + "LastCheckedTimestamp": { + "shape": "ReadinessCheckTimestamp", + "locationName": "lastCheckedTimestamp", + "documentation": "The time the resource was last checked for readiness, in ISO-8601 format, UTC." + }, + "Messages": { + "shape": "__listOfMessage", + "locationName": "messages", + "documentation": "Details about the resource's readiness" + }, + "Readiness": { + "shape": "Readiness", + "locationName": "readiness", + "documentation": "The readiness at rule level." + }, + "RuleId": { + "shape": "__string", + "locationName": "ruleId", + "documentation": "The identifier of the rule." + } + }, + "documentation": "Result with status for an individual rule..", + "required": [ + "Messages", + "Readiness", + "RuleId", + "LastCheckedTimestamp" + ] + }, + "TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn", + "documentation": "The Amazon Resource Name (ARN) for the resource. You can get this from the response to any request to the resource." + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + }, + "required": [ + "ResourceArn", + "Tags" + ] + }, + "TagResourceResponse": { + "type": "structure", + "members": {} + }, + "Tags": { + "type": "map", + "documentation": "A collection of tags associated with a resource", + "key": { + "shape": "__string" + }, + "value": { + "shape": "__string" + } + }, + "TargetResource": { + "type": "structure", + "members": { + "NLBResource": { + "shape": "NLBResource", + "locationName": "nLBResource" + }, + "R53Resource": { + "shape": "R53ResourceRecord", + "locationName": "r53Resource" + } + }, + "documentation": "The target resource the R53 record points to" + }, + "ThrottlingException": { + "type": "structure", + "exception": true, + "error": { + "httpStatusCode": 429 + }, + "documentation": "Request was denied due to request throttling.", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + } + }, + "UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn", + "documentation": "The Amazon Resource Name (ARN) for the resource. You can get this from the response to any request to the resource." + }, + "TagKeys": { + "shape": "__listOf__string", + "location": "querystring", + "locationName": "tagKeys", + "documentation": "A comma-separated list of the tag keys to remove from the resource." + } + }, + "required": [ + "TagKeys", + "ResourceArn" + ] + }, + "UpdateCellRequest": { + "type": "structure", + "members": { + "CellName": { + "shape": "__string", + "location": "uri", + "locationName": "cellName", + "documentation": "The Cell to update" + }, + "Cells": { + "shape": "__listOf__string", + "locationName": "cells", + "documentation": "A list of Cell arns, completely replaces previous list" + } + }, + "documentation": "Parameters to update for the Cell", + "required": [ + "CellName", + "Cells" + ] + }, + "UpdateCellResponse": { + "type": "structure", + "members": { + "CellArn": { + "shape": "__stringMax256", + "locationName": "cellArn", + "documentation": "The arn for the Cell" + }, + "CellName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "cellName", + "documentation": "The name of the Cell" + }, + "Cells": { + "shape": "__listOf__string", + "locationName": "cells", + "documentation": "A list of Cell arns" + }, + "ParentReadinessScopes": { + "shape": "__listOf__string", + "locationName": "parentReadinessScopes", + "documentation": "A list of Cell ARNs and/or RecoveryGroup ARNs" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "UpdateReadinessCheckRequest": { + "type": "structure", + "members": { + "ReadinessCheckName": { + "shape": "__string", + "location": "uri", + "locationName": "readinessCheckName", + "documentation": "The ReadinessCheck to update" + }, + "ResourceSetName": { + "shape": "__string", + "locationName": "resourceSetName", + "documentation": "The name of the ResourceSet to check" + } + }, + "documentation": "The new Readiness Check values", + "required": [ + "ReadinessCheckName", + "ResourceSetName" + ] + }, + "UpdateReadinessCheckResponse": { + "type": "structure", + "members": { + "ReadinessCheckArn": { + "shape": "__stringMax256", + "locationName": "readinessCheckArn", + "documentation": "Arn associated with ReadinessCheck" + }, + "ReadinessCheckName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "readinessCheckName", + "documentation": "Name for a ReadinessCheck" + }, + "ResourceSet": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "resourceSet", + "documentation": "Name of the ResourceSet to be checked" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "UpdateRecoveryGroupRequest": { + "type": "structure", + "members": { + "Cells": { + "shape": "__listOf__string", + "locationName": "cells", + "documentation": "A list of Cell arns, completely replaces previous list" + }, + "RecoveryGroupName": { + "shape": "__string", + "location": "uri", + "locationName": "recoveryGroupName", + "documentation": "The RecoveryGroup to update" + } + }, + "documentation": "Parameters to update for the RecoveryGroup", + "required": [ + "RecoveryGroupName", + "Cells" + ] + }, + "UpdateRecoveryGroupResponse": { + "type": "structure", + "members": { + "Cells": { + "shape": "__listOf__string", + "locationName": "cells", + "documentation": "A list of Cell arns" + }, + "RecoveryGroupArn": { + "shape": "__stringMax256", + "locationName": "recoveryGroupArn", + "documentation": "The arn for the RecoveryGroup" + }, + "RecoveryGroupName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "recoveryGroupName", + "documentation": "The name of the RecoveryGroup" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "UpdateResourceSetRequest": { + "type": "structure", + "members": { + "ResourceSetName": { + "shape": "__string", + "location": "uri", + "locationName": "resourceSetName", + "documentation": "The ResourceSet to update" + }, + "ResourceSetType": { + "shape": "__stringPatternAWSAZaZ09AZaZ09", + "locationName": "resourceSetType", + "documentation": "AWS Resource Type of the resources in the ResourceSet" + }, + "Resources": { + "shape": "__listOfResource", + "locationName": "resources", + "documentation": "A list of Resource objects" + } + }, + "documentation": "configuration for the desired", + "required": [ + "ResourceSetName", + "ResourceSetType", + "Resources" + ] + }, + "UpdateResourceSetResponse": { + "type": "structure", + "members": { + "ResourceSetArn": { + "shape": "__stringMax256", + "locationName": "resourceSetArn", + "documentation": "The arn for the ResourceSet" + }, + "ResourceSetName": { + "shape": "__stringMax64PatternAAZAZ09Z", + "locationName": "resourceSetName", + "documentation": "The name of the ResourceSet" + }, + "ResourceSetType": { + "shape": "__stringPatternAWSAZaZ09AZaZ09", + "locationName": "resourceSetType", + "documentation": "AWS Resource Type of the resources in the ResourceSet" + }, + "Resources": { + "shape": "__listOfResource", + "locationName": "resources", + "documentation": "A list of Resource objects" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "ValidationException": { + "type": "structure", + "exception": true, + "error": { + "httpStatusCode": 400 + }, + "documentation": "The input fails to satisfy the constraints specified by an AWS service.", + "members": { + "Message": { + "shape": "__string", + "locationName": "message" + } + } + }, + "__listOfCellOutput": { + "type": "list", + "member": { + "shape": "CellOutput" + } + }, + "__listOfCrossAccountAuthorization": { + "type": "list", + "member": { + "shape": "CrossAccountAuthorization" + } + }, + "__listOfListRulesOutput": { + "type": "list", + "member": { + "shape": "ListRulesOutput" + } + }, + "__listOfMessage": { + "type": "list", + "member": { + "shape": "Message" + } + }, + "__listOfReadinessCheckOutput": { + "type": "list", + "member": { + "shape": "ReadinessCheckOutput" + } + }, + "__listOfReadinessCheckSummary": { + "type": "list", + "member": { + "shape": "ReadinessCheckSummary" + } + }, + "__listOfRecommendation": { + "type": "list", + "member": { + "shape": "Recommendation" + } + }, + "__listOfRecoveryGroupOutput": { + "type": "list", + "member": { + "shape": "RecoveryGroupOutput" + } + }, + "__listOfResource": { + "type": "list", + "member": { + "shape": "Resource" + } + }, + "__listOfResourceResult": { + "type": "list", + "member": { + "shape": "ResourceResult" + } + }, + "__listOfResourceSetOutput": { + "type": "list", + "member": { + "shape": "ResourceSetOutput" + } + }, + "__listOfRuleResult": { + "type": "list", + "member": { + "shape": "RuleResult" + } + }, + "__listOf__string": { + "type": "list", + "member": { + "shape": "__string" + } + }, + "__string": { + "type": "string" + }, + "__stringMax256": { + "type": "string", + "max": 256 + }, + "__stringMax64": { + "type": "string", + "max": 64 + }, + "__stringMax64PatternAAZAZ09Z": { + "type": "string", + "max": 64, + "pattern": "\\A[a-zA-Z0-9_]+\\z" + }, + "__stringPatternAWSAZaZ09AZaZ09": { + "type": "string", + "pattern": "AWS::[A-Za-z0-9]+::[A-Za-z0-9]+" + } + }, + "documentation": "AWS Route53 Recovery Readiness" +} diff --git a/services/route53resolver/pom.xml b/services/route53resolver/pom.xml index 7834ea96def3..3c4ae4174601 100644 --- a/services/route53resolver/pom.xml +++ b/services/route53resolver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT route53resolver AWS Java SDK :: Services :: Route53Resolver diff --git a/services/s3/pom.xml b/services/s3/pom.xml index 91b8383f74c2..06f03942083b 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT s3 AWS Java SDK :: Services :: Amazon S3 diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/CopySourceIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/CopySourceIntegrationTest.java new file mode 100644 index 000000000000..1b47754040d4 --- /dev/null +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/CopySourceIntegrationTest.java @@ -0,0 +1,150 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.internal.handlers.CopySourceInterceptor; +import software.amazon.awssdk.services.s3.model.BucketVersioningStatus; +import software.amazon.awssdk.services.s3.model.CopyObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; + +/** + * Integration tests for the {@code sourceBucket}, {@code sourceKey}, and {@code sourceVersionId} parameters for + * {@link CopyObjectRequest}. Specifically, we ensure that users are able to seamlessly use the same input for both the + * {@link PutObjectRequest} key and the {@link CopyObjectRequest} source key (and not be required to manually URL encode the + * COPY source key). This also effectively tests for parity with the SDK v1 behavior. + * + * @see CopySourceInterceptor + */ +@RunWith(Parameterized.class) +public class CopySourceIntegrationTest extends S3IntegrationTestBase { + + private static final String SOURCE_UNVERSIONED_BUCKET_NAME = temporaryBucketName("copy-source-integ-test-src"); + private static final String SOURCE_VERSIONED_BUCKET_NAME = temporaryBucketName("copy-source-integ-test-versioned-src"); + private static final String DESTINATION_BUCKET_NAME = temporaryBucketName("copy-source-integ-test-dest"); + + @BeforeClass + public static void initializeTestData() throws Exception { + createBucket(SOURCE_UNVERSIONED_BUCKET_NAME); + createBucket(SOURCE_VERSIONED_BUCKET_NAME); + s3.putBucketVersioning(r -> r + .bucket(SOURCE_VERSIONED_BUCKET_NAME) + .versioningConfiguration(v -> v.status(BucketVersioningStatus.ENABLED))); + createBucket(DESTINATION_BUCKET_NAME); + } + + @AfterClass + public static void tearDown() { + deleteBucketAndAllContents(SOURCE_UNVERSIONED_BUCKET_NAME); + deleteBucketAndAllContents(SOURCE_VERSIONED_BUCKET_NAME); + deleteBucketAndAllContents(DESTINATION_BUCKET_NAME); + } + + @Parameters + public static Collection parameters() throws Exception { + return Arrays.asList( + "simpleKey", + "key/with/slashes", + "\uD83E\uDEA3", + "specialChars/ +!#$&'()*,:;=?@\"", + "%20" + ); + } + + private final String key; + + public CopySourceIntegrationTest(String key) { + this.key = key; + } + + @Test + public void copyObject_WithoutVersion_AcceptsSameKeyAsPut() throws Exception { + String originalContent = UUID.randomUUID().toString(); + + s3.putObject(PutObjectRequest.builder() + .bucket(SOURCE_UNVERSIONED_BUCKET_NAME) + .key(key) + .build(), RequestBody.fromString(originalContent, StandardCharsets.UTF_8)); + + s3.copyObject(CopyObjectRequest.builder() + .sourceBucket(SOURCE_UNVERSIONED_BUCKET_NAME) + .sourceKey(key) + .destinationBucket(DESTINATION_BUCKET_NAME) + .destinationKey(key) + .build()); + + String copiedContent = s3.getObjectAsBytes(GetObjectRequest.builder() + .bucket(DESTINATION_BUCKET_NAME) + .key(key) + .build()).asUtf8String(); + + assertThat(copiedContent, is(originalContent)); + } + + /** + * Test that we can correctly copy versioned source objects. + *

+ * Motivated by: https://github.com/aws/aws-sdk-js/issues/727 + */ + @Test + public void copyObject_WithVersion_AcceptsSameKeyAsPut() throws Exception { + Map versionToContentMap = new HashMap<>(); + int numVersionsToCreate = 3; + for (int i = 0; i < numVersionsToCreate; i++) { + String originalContent = UUID.randomUUID().toString(); + PutObjectResponse response = s3.putObject(PutObjectRequest.builder() + .bucket(SOURCE_VERSIONED_BUCKET_NAME) + .key(key) + .build(), + RequestBody.fromString(originalContent, StandardCharsets.UTF_8)); + versionToContentMap.put(response.versionId(), originalContent); + } + + versionToContentMap.forEach((versionId, originalContent) -> { + s3.copyObject(CopyObjectRequest.builder() + .sourceBucket(SOURCE_VERSIONED_BUCKET_NAME) + .sourceKey(key) + .sourceVersionId(versionId) + .destinationBucket(DESTINATION_BUCKET_NAME) + .destinationKey(key) + .build()); + + String copiedContent = s3.getObjectAsBytes(GetObjectRequest.builder() + .bucket(DESTINATION_BUCKET_NAME) + .key(key) + .build()).asUtf8String(); + assertThat(copiedContent, is(originalContent)); + }); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingInputStream.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingInputStream.java index ab089377e8cc..36fff298baf0 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingInputStream.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingInputStream.java @@ -17,11 +17,12 @@ import java.io.IOException; import java.io.InputStream; -import java.nio.ByteBuffer; +import java.util.Arrays; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.http.Abortable; +import software.amazon.awssdk.utils.BinaryUtils; @SdkInternalApi public class ChecksumValidatingInputStream extends InputStream implements Abortable { @@ -34,7 +35,7 @@ public class ChecksumValidatingInputStream extends InputStream implements Aborta private long lengthRead = 0; // Preserve the computed checksum because some InputStream readers (e.g., java.util.Properties) read more than once at the // end of the stream. - private Integer computedChecksum; + private byte[] computedChecksum; /** * Creates an input stream using the specified Checksum, input stream, and length. @@ -162,26 +163,15 @@ public void close() throws IOException { inputStream.close(); } - /** - * Gets the stream's checksum as an integer. - * - * @return checksum. - */ - public int getStreamChecksum() { - ByteBuffer bb = ByteBuffer.wrap(streamChecksum); - return bb.getInt(); - } - private void validateAndThrow() { - int streamChecksumInt = getStreamChecksum(); if (computedChecksum == null) { - computedChecksum = ByteBuffer.wrap(checkSum.getChecksumBytes()).getInt(); + computedChecksum = checkSum.getChecksumBytes(); } - if (streamChecksumInt != computedChecksum) { + if (!Arrays.equals(computedChecksum, streamChecksum)) { throw SdkClientException.builder().message( - String.format("Data read has a different checksum than expected. Was %d, but expected %d", - computedChecksum, streamChecksumInt)).build(); + String.format("Data read has a different checksum than expected. Was 0x%s, but expected 0x%s", + BinaryUtils.toHex(computedChecksum), BinaryUtils.toHex(streamChecksum))).build(); } } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisher.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisher.java index 2c871470d84e..a3310331dd23 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisher.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisher.java @@ -132,12 +132,11 @@ public void onError(Throwable t) { @Override public void onComplete() { if (strippedLength > 0) { - int streamChecksumInt = ByteBuffer.wrap(streamChecksum).getInt(); - int computedChecksumInt = ByteBuffer.wrap(sdkChecksum.getChecksumBytes()).getInt(); - if (streamChecksumInt != computedChecksumInt) { + byte[] computedChecksum = sdkChecksum.getChecksumBytes(); + if (!Arrays.equals(computedChecksum, streamChecksum)) { onError(SdkClientException.create( - String.format("Data read has a different checksum than expected. Was %d, but expected %d", - computedChecksumInt, streamChecksumInt))); + String.format("Data read has a different checksum than expected. Was 0x%s, but expected 0x%s", + BinaryUtils.toHex(computedChecksum), BinaryUtils.toHex(streamChecksum)))); return; // Return after onError and not call onComplete below } } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/CopySourceInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/CopySourceInterceptor.java new file mode 100644 index 000000000000..81816bfd01af --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/CopySourceInterceptor.java @@ -0,0 +1,126 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.handlers; + +import static software.amazon.awssdk.utils.http.SdkHttpUtils.urlEncodeIgnoreSlashes; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.interceptor.Context.ModifyRequest; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.services.s3.internal.resource.S3ArnUtils; +import software.amazon.awssdk.services.s3.internal.resource.S3ResourceType; +import software.amazon.awssdk.services.s3.model.CopyObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; +import software.amazon.awssdk.utils.Validate; + +/** + * This interceptor transforms the {@code sourceBucket}, {@code sourceKey}, and {@code sourceVersionId} parameters for + * {@link CopyObjectRequest} and {@link UploadPartCopyRequest} into a {@code copySource} parameter. The logic needed to + * construct a {@code copySource} can be considered non-trivial, so this interceptor facilitates allowing users to + * use higher-level constructs that more closely match other APIs, like {@link PutObjectRequest}. Additionally, this + * interceptor is responsible for URL encoding the relevant portions of the {@code copySource} value. + *

+ * API_CopyObject_RequestParameters + *

+ * API_UploadPartCopy_RequestParameters + */ +@SdkInternalApi +public final class CopySourceInterceptor implements ExecutionInterceptor { + + @Override + public SdkRequest modifyRequest(ModifyRequest context, ExecutionAttributes executionAttributes) { + SdkRequest request = context.request(); + if (request instanceof CopyObjectRequest) { + return modifyCopyObjectRequest((CopyObjectRequest) request); + } + if (request instanceof UploadPartCopyRequest) { + return modifyUploadPartCopyRequest((UploadPartCopyRequest) request); + } + return request; + } + + private static SdkRequest modifyCopyObjectRequest(CopyObjectRequest request) { + if (request.copySource() != null) { + requireNotSet(request.sourceBucket(), "sourceBucket"); + requireNotSet(request.sourceKey(), "sourceKey"); + requireNotSet(request.sourceVersionId(), "sourceVersionId"); + return request; + } + String copySource = constructCopySource( + requireSet(request.sourceBucket(), "sourceBucket"), + requireSet(request.sourceKey(), "sourceKey"), + request.sourceVersionId() + ); + return request.toBuilder() + .sourceBucket(null) + .sourceKey(null) + .sourceVersionId(null) + .copySource(copySource) + .build(); + } + + private static SdkRequest modifyUploadPartCopyRequest(UploadPartCopyRequest request) { + if (request.copySource() != null) { + requireNotSet(request.sourceBucket(), "sourceBucket"); + requireNotSet(request.sourceKey(), "sourceKey"); + requireNotSet(request.sourceVersionId(), "sourceVersionId"); + return request; + } + String copySource = constructCopySource( + requireSet(request.sourceBucket(), "sourceBucket"), + requireSet(request.sourceKey(), "sourceKey"), + request.sourceVersionId() + ); + return request.toBuilder() + .sourceBucket(null) + .sourceKey(null) + .sourceVersionId(null) + .copySource(copySource) + .build(); + } + + private static String constructCopySource(String sourceBucket, String sourceKey, String sourceVersionId) { + StringBuilder copySource = new StringBuilder(); + copySource.append("/"); + copySource.append(urlEncodeIgnoreSlashes(sourceBucket)); + S3ArnUtils.getArnType(sourceBucket).ifPresent(arnType -> { + if (arnType == S3ResourceType.ACCESS_POINT || arnType == S3ResourceType.OUTPOST) { + copySource.append("/object"); + } + }); + copySource.append("/"); + copySource.append(urlEncodeIgnoreSlashes(sourceKey)); + if (sourceVersionId != null) { + copySource.append("?versionId="); + copySource.append(urlEncodeIgnoreSlashes(sourceVersionId)); + } + return copySource.toString(); + } + + private static void requireNotSet(Object value, String paramName) { + Validate.isTrue(value == null, "Parameter 'copySource' must not be used in conjunction with '%s'", + paramName); + } + + private static T requireSet(T value, String paramName) { + Validate.isTrue(value != null, "Parameter '%s' must not be null", + paramName); + return value; + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtils.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtils.java index ec5262419c5a..4aa5bbeb10e5 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtils.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtils.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.services.s3.internal.resource; +import java.util.Optional; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.arns.Arn; import software.amazon.awssdk.arns.ArnResource; @@ -72,4 +73,15 @@ public static IntermediateOutpostResource parseOutpostArn(Arn arn) { .outpostSubresource(ArnResource.fromString(subresource)) .build(); } + + public static Optional getArnType(String arnString) { + try { + Arn arn = Arn.fromString(arnString); + String resourceType = arn.resource().resourceType().get(); + S3ResourceType s3ResourceType = S3ResourceType.fromValue(resourceType); + return Optional.of(s3ResourceType); + } catch (Exception ignored) { + return Optional.empty(); + } + } } diff --git a/services/s3/src/main/resources/META-INF/native-image/software.amazon.awssdk/s3/reflect-config.json b/services/s3/src/main/resources/META-INF/native-image/software.amazon.awssdk/s3/reflect-config.json index 27a7bbe640b9..a8cb0f7a641a 100644 --- a/services/s3/src/main/resources/META-INF/native-image/software.amazon.awssdk/s3/reflect-config.json +++ b/services/s3/src/main/resources/META-INF/native-image/software.amazon.awssdk/s3/reflect-config.json @@ -115,5 +115,14 @@ "parameterTypes": [] } ] + }, + { + "name": "software.amazon.awssdk.services.s3.internal.handlers.CopySourceInterceptor", + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] } ] \ No newline at end of file diff --git a/services/s3/src/main/resources/codegen-resources/customization.config b/services/s3/src/main/resources/codegen-resources/customization.config index e284b3aa21cf..04413d672c4f 100644 --- a/services/s3/src/main/resources/codegen-resources/customization.config +++ b/services/s3/src/main/resources/codegen-resources/customization.config @@ -17,8 +17,62 @@ ] }, "CopyObjectRequest": { + "inject": [ + { + "SourceBucket": { + "shape": "BucketName", + "documentation": "The name of the bucket containing the object to copy. The provided input will be URL encoded. The {@code sourceBucket}, {@code sourceKey}, and {@code sourceVersionId} parameters must not be used in conjunction with the {@code copySource} parameter." + }, + "SourceKey": { + "shape": "ObjectKey", + "documentation": "The key of the object to copy. The provided input will be URL encoded. The {@code sourceBucket}, {@code sourceKey}, and {@code sourceVersionId} parameters must not be used in conjunction with the {@code copySource} parameter." + }, + "SourceVersionId": { + "shape": "ObjectVersionId", + "documentation": "Specifies a particular version of the source object to copy. By default the latest version is copied. The {@code sourceBucket}, {@code sourceKey}, and {@code sourceVersionId} parameters must not be used in conjunction with the {@code copySource} parameter." + } + } + ], "modify": [ { + "CopySource": { + "deprecated": true, + "deprecatedMessage": "The {@code copySource} parameter has been deprecated in favor of the more user-friendly {@code sourceBucket}, {@code sourceKey}, and {@code sourceVersionId} parameters. The {@code copySource} parameter will remain fully functional, but it must not be used in conjunction with its replacement parameters." + }, + "Bucket": { + "emitPropertyName": "DestinationBucket", + "existingNameDeprecated": true + }, + "Key": { + "emitPropertyName": "DestinationKey", + "existingNameDeprecated": true + } + } + ] + }, + "UploadPartCopyRequest": { + "inject": [ + { + "SourceBucket": { + "shape": "BucketName", + "documentation": "The name of the bucket containing the object to copy. The provided input will be URL encoded. The {@code sourceBucket}, {@code sourceKey}, and {@code sourceVersionId} parameters must not be used in conjunction with the {@code copySource} parameter." + }, + "SourceKey": { + "shape": "ObjectKey", + "documentation": "The key of the object to copy. The provided input will be URL encoded. The {@code sourceBucket}, {@code sourceKey}, and {@code sourceVersionId} parameters must not be used in conjunction with the {@code copySource} parameter." + }, + "SourceVersionId": { + "shape": "ObjectVersionId", + "documentation": "Specifies a particular version of the source object to copy. By default the latest version is copied. The {@code sourceBucket}, {@code sourceKey}, and {@code sourceVersionId} parameters must not be used in conjunction with the {@code copySource} parameter." + } + } + ], + "modify": [ + { + "CopySource": { + "deprecated": true, + "deprecatedMessage": "The {@code copySource} parameter has been deprecated in favor of the more user-friendly {@code sourceBucket}, {@code sourceKey}, and {@code sourceVersionId} parameters. The {@code copySource} parameter will remain fully functional, but it must not be used in conjunction with its replacement parameters." + }, "Bucket": { "emitPropertyName": "DestinationBucket", "existingNameDeprecated": true diff --git a/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors b/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors index d9d447cb955d..a0cad21e1180 100644 --- a/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors +++ b/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors @@ -10,4 +10,5 @@ software.amazon.awssdk.services.s3.internal.handlers.AsyncChecksumValidationInte software.amazon.awssdk.services.s3.internal.handlers.SyncChecksumValidationInterceptor software.amazon.awssdk.services.s3.internal.handlers.EnableTrailingChecksumInterceptor software.amazon.awssdk.services.s3.internal.handlers.ExceptionTranslationInterceptor -software.amazon.awssdk.services.s3.internal.handlers.GetObjectInterceptor \ No newline at end of file +software.amazon.awssdk.services.s3.internal.handlers.GetObjectInterceptor +software.amazon.awssdk.services.s3.internal.handlers.CopySourceInterceptor \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingInputStreamTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingInputStreamTest.java new file mode 100644 index 000000000000..a83bf45b4155 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingInputStreamTest.java @@ -0,0 +1,87 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.checksums; + +import static org.junit.Assert.assertArrayEquals; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.core.checksums.Md5Checksum; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.utils.IoUtils; + +public class ChecksumValidatingInputStreamTest { + private static final int TEST_DATA_SIZE = 32; + private static final int CHECKSUM_SIZE = 16; + + private static byte[] testData; + private static byte[] testDataWithoutChecksum; + + @BeforeClass + public static void populateData() { + testData = new byte[TEST_DATA_SIZE + CHECKSUM_SIZE]; + for (int i = 0; i < TEST_DATA_SIZE; i++) { + testData[i] = (byte)(i & 0x7f); + } + + Md5Checksum checksum = new Md5Checksum(); + checksum.update(testData, 0, TEST_DATA_SIZE); + byte[] checksumBytes = checksum.getChecksumBytes(); + + for (int i = 0; i < CHECKSUM_SIZE; i++) { + testData[TEST_DATA_SIZE + i] = checksumBytes[i]; + } + + testDataWithoutChecksum = Arrays.copyOfRange(testData, 0, TEST_DATA_SIZE); + } + + @Test + public void validChecksumSucceeds() throws IOException { + InputStream validatingInputStream = newValidatingStream(testData); + byte[] dataFromValidatingStream = IoUtils.toByteArray(validatingInputStream); + + assertArrayEquals(testDataWithoutChecksum, dataFromValidatingStream); + } + + @Test + public void invalidChecksumFails() throws IOException { + for (int i = 0; i < testData.length; i++) { + // Make sure that corruption of any byte in the test data causes a checksum validation failure. + byte[] corruptedChecksumData = Arrays.copyOf(testData, testData.length); + corruptedChecksumData[i] = (byte) ~corruptedChecksumData[i]; + + InputStream validatingInputStream = newValidatingStream(corruptedChecksumData); + + try { + IoUtils.toByteArray(validatingInputStream); + Assert.fail("Corruption at byte " + i + " was not detected."); + } catch (SdkClientException e) { + // Expected + } + } + } + + private InputStream newValidatingStream(byte[] dataFromS3) { + return new ChecksumValidatingInputStream(new ByteArrayInputStream(dataFromS3), + new Md5Checksum(), + TEST_DATA_SIZE + CHECKSUM_SIZE); + } +} \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisherTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisherTest.java index 935b656d8539..23027a2317fc 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisherTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisherTest.java @@ -16,11 +16,13 @@ package software.amazon.awssdk.services.s3.checksums; import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; @@ -31,6 +33,7 @@ import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import software.amazon.awssdk.core.checksums.Md5Checksum; +import software.amazon.awssdk.utils.BinaryUtils; /** * Unit test for ChecksumValidatingPublisher @@ -39,6 +42,7 @@ public class ChecksumValidatingPublisherTest { private static int TEST_DATA_SIZE = 32; // size of the test data, in bytes private static final int CHECKSUM_SIZE = 16; private static byte[] testData; + private static byte[] testDataWithoutChecksum; @BeforeClass public static void populateData() { @@ -52,27 +56,47 @@ public static void populateData() { for (int i = 0; i < CHECKSUM_SIZE; i++) { testData[TEST_DATA_SIZE + i] = checksumBytes[i]; } + + testDataWithoutChecksum = Arrays.copyOfRange(testData, 0, TEST_DATA_SIZE); } @Test public void testSinglePacket() { final TestPublisher driver = new TestPublisher(); - final TestSubscriber s = new TestSubscriber(Arrays.copyOfRange(testData, 0, TEST_DATA_SIZE)); + final TestSubscriber s = new TestSubscriber(); final ChecksumValidatingPublisher p = new ChecksumValidatingPublisher(driver, new Md5Checksum(), TEST_DATA_SIZE + CHECKSUM_SIZE); p.subscribe(s); driver.doOnNext(ByteBuffer.wrap(testData)); driver.doOnComplete(); + assertArrayEquals(testDataWithoutChecksum, s.receivedData()); assertTrue(s.hasCompleted()); assertFalse(s.isOnErrorCalled()); } + @Test + public void testLastChecksumByteCorrupted() { + TestPublisher driver = new TestPublisher(); + + TestSubscriber s = new TestSubscriber(); + ChecksumValidatingPublisher p = new ChecksumValidatingPublisher(driver, new Md5Checksum(), TEST_DATA_SIZE + CHECKSUM_SIZE); + p.subscribe(s); + + byte[] incorrectChecksumData = Arrays.copyOfRange(testData, 0, TEST_DATA_SIZE); + incorrectChecksumData[TEST_DATA_SIZE - 1] = (byte) ~incorrectChecksumData[TEST_DATA_SIZE - 1]; + driver.doOnNext(ByteBuffer.wrap(incorrectChecksumData)); + driver.doOnComplete(); + + assertFalse(s.hasCompleted()); + assertTrue(s.isOnErrorCalled()); + } + @Test public void testTwoPackets() { for (int i = 1; i < TEST_DATA_SIZE + CHECKSUM_SIZE - 1; i++) { final TestPublisher driver = new TestPublisher(); - final TestSubscriber s = new TestSubscriber(Arrays.copyOfRange(testData, 0, TEST_DATA_SIZE)); + final TestSubscriber s = new TestSubscriber(); final ChecksumValidatingPublisher p = new ChecksumValidatingPublisher(driver, new Md5Checksum(), TEST_DATA_SIZE + CHECKSUM_SIZE); p.subscribe(s); @@ -80,6 +104,7 @@ public void testTwoPackets() { driver.doOnNext(ByteBuffer.wrap(testData, i, TEST_DATA_SIZE + CHECKSUM_SIZE - i)); driver.doOnComplete(); + assertArrayEquals(testDataWithoutChecksum, s.receivedData()); assertTrue(s.hasCompleted()); assertFalse(s.isOnErrorCalled()); } @@ -89,7 +114,7 @@ public void testTwoPackets() { public void testTinyPackets() { for (int packetSize = 1; packetSize < CHECKSUM_SIZE; packetSize++) { final TestPublisher driver = new TestPublisher(); - final TestSubscriber s = new TestSubscriber(Arrays.copyOfRange(testData, 0, TEST_DATA_SIZE)); + final TestSubscriber s = new TestSubscriber(); final ChecksumValidatingPublisher p = new ChecksumValidatingPublisher(driver, new Md5Checksum(), TEST_DATA_SIZE + CHECKSUM_SIZE); p.subscribe(s); int currOffset = 0; @@ -100,6 +125,7 @@ public void testTinyPackets() { } driver.doOnComplete(); + assertArrayEquals(testDataWithoutChecksum, s.receivedData()); assertTrue(s.hasCompleted()); assertFalse(s.isOnErrorCalled()); } @@ -109,7 +135,7 @@ public void testTinyPackets() { public void testUnknownLength() { // When the length is unknown, the last 16 bytes are treated as a checksum, but are later ignored when completing final TestPublisher driver = new TestPublisher(); - final TestSubscriber s = new TestSubscriber(Arrays.copyOfRange(testData, 0, TEST_DATA_SIZE)); + final TestSubscriber s = new TestSubscriber(); final ChecksumValidatingPublisher p = new ChecksumValidatingPublisher(driver, new Md5Checksum(), 0); p.subscribe(s); @@ -122,6 +148,7 @@ public void testUnknownLength() { driver.doOnNext(ByteBuffer.wrap(randomChecksumData)); driver.doOnComplete(); + assertArrayEquals(testDataWithoutChecksum, s.receivedData()); assertTrue(s.hasCompleted()); assertFalse(s.isOnErrorCalled()); } @@ -130,7 +157,7 @@ public void testUnknownLength() { public void checksumValidationFailure_throwsSdkClientException_NotNPE() { final byte[] incorrectData = new byte[0]; final TestPublisher driver = new TestPublisher(); - final TestSubscriber s = new TestSubscriber(Arrays.copyOfRange(incorrectData, 0, TEST_DATA_SIZE)); + final TestSubscriber s = new TestSubscriber(); final ChecksumValidatingPublisher p = new ChecksumValidatingPublisher(driver, new Md5Checksum(), TEST_DATA_SIZE + CHECKSUM_SIZE); p.subscribe(s); @@ -142,13 +169,11 @@ public void checksumValidationFailure_throwsSdkClientException_NotNPE() { } private class TestSubscriber implements Subscriber { - final byte[] expected; final List received; boolean completed; boolean onErrorCalled; - TestSubscriber(byte[] expected) { - this.expected = expected; + TestSubscriber() { this.received = new ArrayList<>(); this.completed = false; } @@ -172,17 +197,21 @@ public void onError(Throwable t) { @Override public void onComplete() { - int matchPos = 0; - for (ByteBuffer buffer : received) { - byte[] bufferData = new byte[buffer.limit() - buffer.position()]; - buffer.get(bufferData); - assertArrayEquals(Arrays.copyOfRange(expected, matchPos, matchPos + bufferData.length), bufferData); - matchPos += bufferData.length; - } - assertEquals(expected.length, matchPos); completed = true; } + public byte[] receivedData() { + try { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + for (ByteBuffer buffer : received) { + os.write(BinaryUtils.copyBytesFrom(buffer)); + } + return os.toByteArray(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + public boolean hasCompleted() { return completed; } diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/CopySourceInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/CopySourceInterceptorTest.java new file mode 100644 index 000000000000..5bbe9b025adf --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/CopySourceInterceptorTest.java @@ -0,0 +1,173 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.handlers; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy; + +import java.util.Arrays; +import java.util.Collection; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.services.s3.model.CopyObjectRequest; +import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; + +@RunWith(Parameterized.class) +public class CopySourceInterceptorTest { + private final CopySourceInterceptor interceptor = new CopySourceInterceptor(); + + @Parameters + public static Collection parameters() throws Exception { + return Arrays.asList(new String[][] { + {"bucket", "simpleKey", null, + "/bucket/simpleKey"}, + + {"bucket", "key/with/slashes", null, + "/bucket/key/with/slashes"}, + + {"bucket", "\uD83E\uDEA3", null, + "/bucket/%F0%9F%AA%A3"}, + + {"bucket", "specialChars._ +!#$&'()*,:;=?@\"", null, + "/bucket/specialChars._%20%2B%21%23%24%26%27%28%29%2A%2C%3A%3B%3D%3F%40%22"}, + + {"bucket", "%20", null, + "/bucket/%2520"}, + + {"bucket", "key/with/version", "ZJlqdTGGfnWjRWjm.WtQc5XRTNJn3sz_", + "/bucket/key/with/version?versionId=ZJlqdTGGfnWjRWjm.WtQc5XRTNJn3sz_"} + }); + } + + private final String sourceBucket; + private final String sourceKey; + private final String sourceVersionId; + private final String expectedCopySource; + + public CopySourceInterceptorTest(String sourceBucket, String sourceKey, String sourceVersionId, String expectedCopySource) { + this.sourceBucket = sourceBucket; + this.sourceKey = sourceKey; + this.sourceVersionId = sourceVersionId; + this.expectedCopySource = expectedCopySource; + } + + @Test + public void modifyRequest_ConstructsUrlEncodedCopySource_whenCopyObjectRequest() { + CopyObjectRequest originalRequest = CopyObjectRequest.builder() + .sourceBucket(sourceBucket) + .sourceKey(sourceKey) + .sourceVersionId(sourceVersionId) + .build(); + CopyObjectRequest modifiedRequest = (CopyObjectRequest) interceptor + .modifyRequest(() -> originalRequest, new ExecutionAttributes()); + + assertThat(modifiedRequest.copySource()).isEqualTo(expectedCopySource); + } + + @Test + public void modifyRequest_ConstructsUrlEncodedCopySource_whenUploadPartCopyRequest() { + UploadPartCopyRequest originalRequest = UploadPartCopyRequest.builder() + .sourceBucket(sourceBucket) + .sourceKey(sourceKey) + .sourceVersionId(sourceVersionId) + .build(); + UploadPartCopyRequest modifiedRequest = (UploadPartCopyRequest) interceptor + .modifyRequest(() -> originalRequest, new ExecutionAttributes()); + + assertThat(modifiedRequest.copySource()).isEqualTo(expectedCopySource); + } + + @Test + public void modifyRequest_Throws_whenCopySourceUsedWithSourceBucket_withCopyObjectRequest() { + CopyObjectRequest originalRequest = CopyObjectRequest.builder() + .sourceBucket(sourceBucket) + .sourceKey(sourceKey) + .sourceVersionId(sourceVersionId) + .copySource("copySource") + .build(); + + assertThatThrownBy(() -> { + interceptor.modifyRequest(() -> originalRequest, new ExecutionAttributes()); + }).isInstanceOf(IllegalArgumentException.class) + .hasMessage("Parameter 'copySource' must not be used in conjunction with 'sourceBucket'"); + } + + @Test + public void modifyRequest_Throws_whenCopySourceUsedWithSourceBucket_withUploadPartCopyRequest() { + UploadPartCopyRequest originalRequest = UploadPartCopyRequest.builder() + .sourceBucket(sourceBucket) + .sourceKey(sourceKey) + .sourceVersionId(sourceVersionId) + .copySource("copySource") + .build(); + + assertThatThrownBy(() -> { + interceptor.modifyRequest(() -> originalRequest, new ExecutionAttributes()); + }).isInstanceOf(IllegalArgumentException.class) + .hasMessage("Parameter 'copySource' must not be used in conjunction with 'sourceBucket'"); + } + + @Test + public void modifyRequest_Throws_whenSourceBucketNotSpecified_withCopyObjectRequest() { + CopyObjectRequest originalRequest = CopyObjectRequest.builder().build(); + + assertThatThrownBy(() -> { + interceptor.modifyRequest(() -> originalRequest, new ExecutionAttributes()); + }).isInstanceOf(IllegalArgumentException.class) + .hasMessage("Parameter 'sourceBucket' must not be null"); + } + + @Test + public void modifyRequest_Throws_whenSourceBucketNotSpecified_withUploadPartCopyRequest() { + UploadPartCopyRequest originalRequest = UploadPartCopyRequest.builder().build(); + + assertThatThrownBy(() -> { + interceptor.modifyRequest(() -> originalRequest, new ExecutionAttributes()); + }).isInstanceOf(IllegalArgumentException.class) + .hasMessage("Parameter 'sourceBucket' must not be null"); + } + + @Test + public void modifyRequest_insertsSlashObject_whenAccessPointArn() { + String accessPointArn = "arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point"; + CopyObjectRequest originalRequest = CopyObjectRequest.builder() + .sourceBucket(accessPointArn) + .sourceKey("my-key") + .build(); + CopyObjectRequest modifiedRequest = (CopyObjectRequest) interceptor + .modifyRequest(() -> originalRequest, new ExecutionAttributes()); + + assertThat(modifiedRequest.copySource()).isEqualTo( + "/arn%3Aaws%3As3%3Aus-west-2%3A123456789012%3Aaccesspoint/my-access-point/object/my-key"); + } + + @Test + public void modifyRequest_insertsSlashObject_whenOutpostArn() { + String outpostBucketArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/my-bucket"; + CopyObjectRequest originalRequest = CopyObjectRequest.builder() + .sourceBucket(outpostBucketArn) + .sourceKey("my-key") + .build(); + CopyObjectRequest modifiedRequest = (CopyObjectRequest) interceptor + .modifyRequest(() -> originalRequest, new ExecutionAttributes()); + + assertThat(modifiedRequest.copySource()).isEqualTo( + "/arn%3Aaws%3As3-outposts%3Aus-west-2%3A123456789012%3Aoutpost/my-outpost/bucket/my-bucket/object/my-key"); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtilsTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtilsTest.java index 1bd47ba02294..b5159aa8bd35 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtilsTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtilsTest.java @@ -20,6 +20,7 @@ import static org.hamcrest.Matchers.is; import java.util.Optional; +import java.util.UUID; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -131,4 +132,22 @@ public void parseOutpostArn_malformedArnEmptyOutpostId_shouldThrowException() { .resource("outpost::accesspoint:name") .build()); } + + @Test + public void getArnType_shouldRecognizeAccessPointArn() { + String arnString = "arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point"; + assertThat(S3ArnUtils.getArnType(arnString), is(Optional.of(S3ResourceType.ACCESS_POINT))); + } + + @Test + public void getArnType_shouldRecognizeOutpostArn() { + String arnString = "arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/my-bucket"; + assertThat(S3ArnUtils.getArnType(arnString), is(Optional.of(S3ResourceType.OUTPOST))); + } + + @Test + public void getArnType_shouldNotThrow_onRandomInput() { + String arnString = UUID.randomUUID().toString(); + assertThat(S3ArnUtils.getArnType(arnString), is(Optional.empty())); + } } diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml index 27a82f473e79..b122bbbeaefc 100644 --- a/services/s3control/pom.xml +++ b/services/s3control/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT s3control AWS Java SDK :: Services :: Amazon S3 Control diff --git a/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AccessPointsIntegrationTest.java b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AccessPointsIntegrationTest.java index 11c2a1df1421..ef5feb77f8f2 100644 --- a/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AccessPointsIntegrationTest.java +++ b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AccessPointsIntegrationTest.java @@ -76,8 +76,8 @@ public static void setupFixture() { @AfterClass public static void tearDown() { - deleteBucketAndAllContents(BUCKET); s3control.deleteAccessPoint(b -> b.accountId(accountId).name(AP_NAME)); + deleteBucketAndAllContents(BUCKET); } @Test diff --git a/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AsyncAccessPointsIntegrationTest.java b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AsyncAccessPointsIntegrationTest.java index 48f537bece32..3d4cafaa0031 100644 --- a/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AsyncAccessPointsIntegrationTest.java +++ b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AsyncAccessPointsIntegrationTest.java @@ -65,8 +65,8 @@ public static void setupFixture() { @AfterClass public static void tearDown() { - deleteBucketAndAllContents(BUCKET); s3control.deleteAccessPoint(b -> b.accountId(accountId).name(AP_NAME)).join(); + deleteBucketAndAllContents(BUCKET); } @Test diff --git a/services/s3control/src/main/resources/codegen-resources/service-2.json b/services/s3control/src/main/resources/codegen-resources/service-2.json index 8e5fbc11a3b6..55ebce4c99d0 100644 --- a/services/s3control/src/main/resources/codegen-resources/service-2.json +++ b/services/s3control/src/main/resources/codegen-resources/service-2.json @@ -162,7 +162,7 @@ "requestUri":"/v20180820/bucket/{name}/policy" }, "input":{"shape":"DeleteBucketPolicyRequest"}, - "documentation":"

This action deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon S3 API Reference.

This implementation of the DELETE action uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account to use this action. For more information, see Using Amazon S3 on Outposts in Amazon S3 User Guide.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketPolicy:

", + "documentation":"

This action deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon S3 API Reference.

This implementation of the DELETE action uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account to use this action. For more information, see Using Amazon S3 on Outposts in Amazon S3 User Guide.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -205,7 +205,7 @@ "requestUri":"/v20180820/configuration/publicAccessBlock" }, "input":{"shape":"DeletePublicAccessBlockRequest"}, - "documentation":"

Removes the PublicAccessBlock configuration for an AWS account. For more information, see Using Amazon S3 block public access.

Related actions include:

", + "documentation":"

Removes the PublicAccessBlock configuration for an account. For more information, see Using Amazon S3 block public access.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -353,7 +353,7 @@ }, "input":{"shape":"GetBucketRequest"}, "output":{"shape":"GetBucketResult"}, - "documentation":"

Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the s3-outposts:GetBucket permissions on the specified Outposts bucket and belong to the Outposts bucket owner's account in order to use this action. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.

If you don't have s3-outposts:GetBucket permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

The following actions are related to GetBucket for Amazon S3 on Outposts:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

", + "documentation":"

Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

If you are using an identity other than the root user of the account that owns the Outposts bucket, the calling identity must have the s3-outposts:GetBucket permissions on the specified Outposts bucket and belong to the Outposts bucket owner's account in order to use this action. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.

If you don't have s3-outposts:GetBucket permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

The following actions are related to GetBucket for Amazon S3 on Outposts:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -379,7 +379,7 @@ }, "input":{"shape":"GetBucketPolicyRequest"}, "output":{"shape":"GetBucketPolicyResult"}, - "documentation":"

This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon S3 API Reference.

Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this action.

Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetBucketPolicy:

", + "documentation":"

This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon S3 API Reference.

Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

If you are using an identity other than the root user of the account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this action.

Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

As a security precaution, the root user of the account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -426,7 +426,7 @@ "errors":[ {"shape":"NoSuchPublicAccessBlockConfiguration"} ], - "documentation":"

Retrieves the PublicAccessBlock configuration for an AWS account. For more information, see Using Amazon S3 block public access.

Related actions include:

", + "documentation":"

Retrieves the PublicAccessBlock configuration for an account. For more information, see Using Amazon S3 block public access.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -496,7 +496,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Lists current S3 Batch Operations jobs and jobs that have ended within the last 30 days for the AWS account making the request. For more information, see S3 Batch Operations in the Amazon S3 User Guide.

Related actions include:

", + "documentation":"

Lists current S3 Batch Operations jobs and jobs that have ended within the last 30 days for the account making the request. For more information, see S3 Batch Operations in the Amazon S3 User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -599,7 +599,7 @@ "locationName":"PutBucketPolicyRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

This action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon S3 API Reference.

Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this action.

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketPolicy:

", + "documentation":"

This action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon S3 API Reference.

Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

If you are using an identity other than the root user of the account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this action.

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -612,7 +612,7 @@ "requestUri":"/v20180820/bucket/{name}/tagging" }, "input":{"shape":"PutBucketTaggingRequest"}, - "documentation":"

This action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon S3 API Reference.

Sets the tags for an S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost allocation and tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using cost allocation in Amazon S3 bucket tags.

To use this action, you must have permissions to perform the s3-outposts:PutBucketTagging action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing access permissions to your Amazon S3 resources.

PutBucketTagging has the following special errors:

  • Error code: InvalidTagError

  • Error code: MalformedXMLError

    • Description: The XML provided does not match the schema.

  • Error code: OperationAbortedError

    • Description: A conflicting conditional action is currently in progress against this resource. Try again.

  • Error code: InternalError

    • Description: The service was unable to apply the provided tag to the bucket.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketTagging:

", + "documentation":"

This action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon S3 API Reference.

Sets the tags for an S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

Use tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this, sign up to get your account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost allocation and tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using cost allocation in Amazon S3 bucket tags.

To use this action, you must have permissions to perform the s3-outposts:PutBucketTagging action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing access permissions to your Amazon S3 resources.

PutBucketTagging has the following special errors:

  • Error code: InvalidTagError

  • Error code: MalformedXMLError

    • Description: The XML provided does not match the schema.

  • Error code: OperationAbortedError

    • Description: A conflicting conditional action is currently in progress against this resource. Try again.

  • Error code: InternalError

    • Description: The service was unable to apply the provided tag to the bucket.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketTagging:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -636,7 +636,7 @@ {"shape":"NotFoundException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Sets the supplied tag-set on an S3 Batch Operations job.

A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon S3 User Guide.

  • If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you are charged for a Tier 1 Request (PUT). For more information, see Amazon S3 pricing.

  • For deleting existing tags for your Batch Operations job, a DeleteJobTagging request is preferred because it achieves the same result without incurring charges.

  • A few things to consider about using tags:

    • Amazon S3 limits the maximum number of tags to 50 tags per job.

    • You can associate up to 50 tags with a job as long as they have unique tag keys.

    • A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length.

    • The key and values are case sensitive.

    • For tagging-related restrictions related to characters and encodings, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide.

To use this action, you must have permission to perform the s3:PutJobTagging action.

Related actions include:

", + "documentation":"

Sets the supplied tag-set on an S3 Batch Operations job.

A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon S3 User Guide.

  • If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you are charged for a Tier 1 Request (PUT). For more information, see Amazon S3 pricing.

  • For deleting existing tags for your Batch Operations job, a DeleteJobTagging request is preferred because it achieves the same result without incurring charges.

  • A few things to consider about using tags:

    • Amazon S3 limits the maximum number of tags to 50 tags per job.

    • You can associate up to 50 tags with a job as long as they have unique tag keys.

    • A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length.

    • The key and values are case sensitive.

    • For tagging-related restrictions related to characters and encodings, see User-Defined Tag Restrictions in the Billing and Cost Management User Guide.

To use this action, you must have permission to perform the s3:PutJobTagging action.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -648,7 +648,7 @@ "requestUri":"/v20180820/configuration/publicAccessBlock" }, "input":{"shape":"PutPublicAccessBlockRequest"}, - "documentation":"

Creates or modifies the PublicAccessBlock configuration for an AWS account. For more information, see Using Amazon S3 block public access.

Related actions include:

", + "documentation":"

Creates or modifies the PublicAccessBlock configuration for an account. For more information, see Using Amazon S3 block public access.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -755,7 +755,7 @@ }, "VpcConfiguration":{ "shape":"VpcConfiguration", - "documentation":"

The virtual private cloud (VPC) configuration for this access point, if one exists.

This element is empty if this access point is an Amazon S3 on Outposts access point that is used by other AWS services.

" + "documentation":"

The virtual private cloud (VPC) configuration for this access point, if one exists.

This element is empty if this access point is an Amazon S3 on Outposts access point that is used by other Amazon Web Services.

" }, "Bucket":{ "shape":"BucketName", @@ -764,6 +764,10 @@ "AccessPointArn":{ "shape":"S3AccessPointArn", "documentation":"

The ARN for the access point.

" + }, + "Alias":{ + "shape":"Alias", + "documentation":"

The name or alias of the access point.

" } }, "documentation":"

An access point used to access a bucket.

" @@ -777,7 +781,7 @@ }, "AccessPointName":{ "type":"string", - "max":50, + "max":63, "min":3 }, "AccountId":{ @@ -810,20 +814,25 @@ }, "documentation":"

A container for the activity metrics.

" }, + "Alias":{ + "type":"string", + "max":63, + "pattern":"^[0-9a-z\\\\-]{63}" + }, "AwsLambdaTransformation":{ "type":"structure", "required":["FunctionArn"], "members":{ "FunctionArn":{ "shape":"FunctionArnString", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Lambda function.

" + "documentation":"

The Amazon Resource Name (ARN) of the Lambda function.

" }, "FunctionPayload":{ "shape":"AwsLambdaTransformationPayload", "documentation":"

Additional JSON that provides supplemental data to the Lambda function used to transform objects.

" } }, - "documentation":"

AWS Lambda function used to transform objects through an Object Lambda Access Point.

" + "documentation":"

Lambda function used to transform objects through an Object Lambda Access Point.

" }, "AwsLambdaTransformationPayload":{"type":"string"}, "AwsOrgArn":{ @@ -845,7 +854,7 @@ "type":"structure", "members":{ }, - "documentation":"

The requested Outposts bucket name is not available. The bucket namespace is shared by all users of the AWS Outposts in this Region. Select a different name and try again.

", + "documentation":"

The requested Outposts bucket name is not available. The bucket namespace is shared by all users of the Outposts in this Region. Select a different name and try again.

", "exception":true }, "BucketAlreadyOwnedByYou":{ @@ -925,7 +934,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID for owner of the specified Object Lambda Access Point.

", + "documentation":"

The account ID for owner of the specified Object Lambda Access Point.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -961,7 +970,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID for the owner of the bucket for which you want to create an access point.

", + "documentation":"

The account ID for the owner of the bucket for which you want to create an access point.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -974,7 +983,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket that you want to associate this access point with.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

" + "documentation":"

The name of the bucket that you want to associate this access point with.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

" }, "VpcConfiguration":{ "shape":"VpcConfiguration", @@ -992,6 +1001,10 @@ "AccessPointArn":{ "shape":"S3AccessPointArn", "documentation":"

The ARN of the access point.

This is only supported by Amazon S3 on Outposts.

" + }, + "Alias":{ + "shape":"Alias", + "documentation":"

The name or alias of the access point.

" } } }, @@ -1083,7 +1096,7 @@ }, "BucketArn":{ "shape":"S3RegionalBucketArn", - "documentation":"

The Amazon Resource Name (ARN) of the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

" + "documentation":"

The Amazon Resource Name (ARN) of the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

" } } }, @@ -1101,7 +1114,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID that creates the job.

", + "documentation":"

The account ID that creates the job.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -1139,7 +1152,7 @@ }, "RoleArn":{ "shape":"IAMRoleArn", - "documentation":"

The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that Batch Operations will use to run this job's action on every object in the manifest.

" + "documentation":"

The Amazon Resource Name (ARN) for the Identity and Access Management (IAM) role that Batch Operations will use to run this job's action on every object in the manifest.

" }, "Tags":{ "shape":"S3TagSet", @@ -1220,7 +1233,7 @@ }, "Name":{ "shape":"AccessPointName", - "documentation":"

The name of the access point whose policy you want to delete.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", + "documentation":"

The name of the access point whose policy you want to delete.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1242,7 +1255,7 @@ }, "Name":{ "shape":"AccessPointName", - "documentation":"

The name of the access point you want to delete.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", + "documentation":"

The name of the access point you want to delete.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1264,7 +1277,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1286,7 +1299,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1308,7 +1321,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

Specifies the bucket being deleted.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket being deleted.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1323,14 +1336,14 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the Outposts bucket tag set to be removed.

", + "documentation":"

The account ID of the Outposts bucket tag set to be removed.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" }, "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket ARN that has the tag set to be removed.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

The bucket ARN that has the tag set to be removed.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1345,7 +1358,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID associated with the S3 Batch Operations job.

", + "documentation":"

The account ID associated with the S3 Batch Operations job.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -1369,7 +1382,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The account ID for the AWS account whose PublicAccessBlock configuration you want to remove.

", + "documentation":"

The account ID for the account whose PublicAccessBlock configuration you want to remove.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -1434,7 +1447,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID associated with the S3 Batch Operations job.

", + "documentation":"

The account ID associated with the S3 Batch Operations job.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -1456,6 +1469,11 @@ } } }, + "Endpoints":{ + "type":"map", + "key":{"shape":"NonEmptyMaxLength64String"}, + "value":{"shape":"NonEmptyMaxLength1024String"} + }, "ExceptionMessage":{ "type":"string", "max":1024, @@ -1613,7 +1631,7 @@ }, "Name":{ "shape":"AccessPointName", - "documentation":"

The name of the access point whose policy you want to retrieve.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", + "documentation":"

The name of the access point whose policy you want to retrieve.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1703,7 +1721,7 @@ }, "Name":{ "shape":"AccessPointName", - "documentation":"

The name of the access point whose configuration information you want to retrieve.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", + "documentation":"

The name of the access point whose configuration information you want to retrieve.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1726,12 +1744,24 @@ }, "VpcConfiguration":{ "shape":"VpcConfiguration", - "documentation":"

Contains the virtual private cloud (VPC) configuration for the specified access point.

This element is empty if this access point is an Amazon S3 on Outposts access point that is used by other AWS services.

" + "documentation":"

Contains the virtual private cloud (VPC) configuration for the specified access point.

This element is empty if this access point is an Amazon S3 on Outposts access point that is used by other Amazon Web Services.

" }, "PublicAccessBlockConfiguration":{"shape":"PublicAccessBlockConfiguration"}, "CreationDate":{ "shape":"CreationDate", "documentation":"

The date and time when the specified access point was created.

" + }, + "Alias":{ + "shape":"Alias", + "documentation":"

The name or alias of the access point.

" + }, + "AccessPointArn":{ + "shape":"S3AccessPointArn", + "documentation":"

The ARN of the access point.

" + }, + "Endpoints":{ + "shape":"Endpoints", + "documentation":"

The VPC endpoint for the access point.

" } } }, @@ -1744,14 +1774,14 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the Outposts bucket.

", + "documentation":"

The account ID of the Outposts bucket.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" }, "Bucket":{ "shape":"BucketName", - "documentation":"

The Amazon Resource Name (ARN) of the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

The Amazon Resource Name (ARN) of the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1775,14 +1805,14 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the Outposts bucket.

", + "documentation":"

The account ID of the Outposts bucket.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" }, "Bucket":{ "shape":"BucketName", - "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1806,14 +1836,14 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the Outposts bucket.

", + "documentation":"

The account ID of the Outposts bucket.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" }, "Bucket":{ "shape":"BucketName", - "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1845,14 +1875,14 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the Outposts bucket.

", + "documentation":"

The account ID of the Outposts bucket.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" }, "Bucket":{ "shape":"BucketName", - "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1877,7 +1907,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID associated with the S3 Batch Operations job.

", + "documentation":"

The account ID associated with the S3 Batch Operations job.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -1904,7 +1934,7 @@ "members":{ "PublicAccessBlockConfiguration":{ "shape":"PublicAccessBlockConfiguration", - "documentation":"

The PublicAccessBlock configuration currently in effect for this AWS account.

" + "documentation":"

The PublicAccessBlock configuration currently in effect for this account.

" } }, "payload":"PublicAccessBlockConfiguration" @@ -1915,7 +1945,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The account ID for the AWS account whose PublicAccessBlock configuration you want to retrieve.

", + "documentation":"

The account ID for the account whose PublicAccessBlock configuration you want to retrieve.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -2124,7 +2154,7 @@ }, "RoleArn":{ "shape":"IAMRoleArn", - "documentation":"

The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role assigned to run the tasks for this job.

", + "documentation":"

The Amazon Resource Name (ARN) for the Identity and Access Management (IAM) role assigned to run the tasks for this job.

", "box":true }, "SuspendedDate":{ @@ -2306,7 +2336,7 @@ "members":{ "LambdaInvoke":{ "shape":"LambdaInvokeOperation", - "documentation":"

Directs the specified job to invoke an AWS Lambda function on every object in the manifest.

", + "documentation":"

Directs the specified job to invoke an Lambda function on every object in the manifest.

", "box":true }, "S3PutObjectCopy":{ @@ -2463,7 +2493,7 @@ "members":{ "FunctionArn":{ "shape":"FunctionArnString", - "documentation":"

The Amazon Resource Name (ARN) for the AWS Lambda function that the specified job will invoke on every object in the manifest.

" + "documentation":"

The Amazon Resource Name (ARN) for the Lambda function that the specified job will invoke on every object in the manifest.

" } }, "documentation":"

Contains the configuration parameters for a Lambda Invoke operation.

" @@ -2615,14 +2645,14 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID for owner of the bucket whose access points you want to list.

", + "documentation":"

The account ID for owner of the bucket whose access points you want to list.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket whose associated access points you want to list.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

The name of the bucket whose associated access points you want to list.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"querystring", "locationName":"bucket" }, @@ -2659,7 +2689,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID associated with the S3 Batch Operations job.

", + "documentation":"

The account ID associated with the S3 Batch Operations job.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -2704,7 +2734,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the Outposts bucket.

", + "documentation":"

The account ID of the Outposts bucket.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -2723,7 +2753,7 @@ }, "OutpostId":{ "shape":"NonEmptyMaxLength64String", - "documentation":"

The ID of the AWS Outposts.

This is required by Amazon S3 on Outposts buckets.

", + "documentation":"

The ID of the Outposts.

This is required by Amazon S3 on Outposts buckets.

", "location":"header", "locationName":"x-amz-outpost-id" } @@ -2905,7 +2935,7 @@ "documentation":"

Specifies the ARN for the Object Lambda Access Point.

" } }, - "documentation":"

An access point with an attached AWS Lambda function used to access transformed data from an Amazon S3 bucket.

" + "documentation":"

An access point with an attached Lambda function used to access transformed data from an Amazon S3 bucket.

" }, "ObjectLambdaAccessPointArn":{ "type":"string", @@ -2971,7 +3001,7 @@ "members":{ "AwsLambda":{ "shape":"AwsLambdaTransformation", - "documentation":"

A container for an AWS Lambda function.

" + "documentation":"

A container for an Lambda function.

" } }, "documentation":"

A container for AwsLambdaTransformation.

", @@ -3093,7 +3123,7 @@ }, "RestrictPublicBuckets":{ "shape":"Setting", - "documentation":"

Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to TRUE restricts access to buckets with public policies to only AWS service principals and authorized users within this account.

Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

This is not supported for Amazon S3 on Outposts.

", + "documentation":"

Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to TRUE restricts access to buckets with public policies to only Amazon Web Service principals and authorized users within this account.

Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

This is not supported for Amazon S3 on Outposts.

", "locationName":"RestrictPublicBuckets" } }, @@ -3164,14 +3194,14 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID for owner of the bucket associated with the specified access point.

", + "documentation":"

The account ID for owner of the bucket associated with the specified access point.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" }, "Name":{ "shape":"AccessPointName", - "documentation":"

The name of the access point that you want to associate with the specified policy.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", + "documentation":"

The name of the access point that you want to associate with the specified policy.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", "location":"uri", "locationName":"name" }, @@ -3190,7 +3220,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the Outposts bucket.

", + "documentation":"

The account ID of the Outposts bucket.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -3220,14 +3250,14 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the Outposts bucket.

", + "documentation":"

The account ID of the Outposts bucket.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" }, "Bucket":{ "shape":"BucketName", - "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" }, @@ -3253,14 +3283,14 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the Outposts bucket.

", + "documentation":"

The account ID of the Outposts bucket.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" }, "Bucket":{ "shape":"BucketName", - "documentation":"

The Amazon Resource Name (ARN) of the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

The Amazon Resource Name (ARN) of the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" }, @@ -3283,7 +3313,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID associated with the S3 Batch Operations job.

", + "documentation":"

The account ID associated with the S3 Batch Operations job.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -3314,13 +3344,13 @@ "members":{ "PublicAccessBlockConfiguration":{ "shape":"PublicAccessBlockConfiguration", - "documentation":"

The PublicAccessBlock configuration that you want to apply to the specified AWS account.

", + "documentation":"

The PublicAccessBlock configuration that you want to apply to the specified account.

", "locationName":"PublicAccessBlockConfiguration", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "AccountId":{ "shape":"AccountId", - "documentation":"

The account ID for the AWS account whose PublicAccessBlock configuration you want to set.

", + "documentation":"

The account ID for the account whose PublicAccessBlock configuration you want to set.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -3417,7 +3447,7 @@ }, "OutpostId":{ "shape":"NonEmptyMaxLength64String", - "documentation":"

The AWS Outposts ID of the regional bucket.

" + "documentation":"

The Outposts ID of the regional bucket.

" } }, "documentation":"

The container for the regional bucket.

" @@ -3619,7 +3649,7 @@ }, "BucketKeyEnabled":{ "shape":"Boolean", - "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key.

" + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Amazon Web Services KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key.

" } }, "documentation":"

Contains the configuration parameters for a PUT Copy object operation. S3 Batch Operations passes every object to the underlying PUT Copy object API. For more information about the parameters for this operation, see PUT Object - Copy.

" @@ -3989,10 +4019,10 @@ "members":{ "Arn":{ "shape":"AwsOrgArn", - "documentation":"

A container for the Amazon Resource Name (ARN) of the AWS organization. This property is read-only and follows the following format: arn:aws:organizations:us-east-1:example-account-id:organization/o-ex2l495dck

" + "documentation":"

A container for the Amazon Resource Name (ARN) of the Amazon Web Services organization. This property is read-only and follows the following format: arn:aws:organizations:us-east-1:example-account-id:organization/o-ex2l495dck

" } }, - "documentation":"

The AWS organization for your S3 Storage Lens.

" + "documentation":"

The Amazon Web Services organization for your S3 Storage Lens.

" }, "StorageLensConfiguration":{ "type":"structure", @@ -4028,7 +4058,7 @@ }, "AwsOrg":{ "shape":"StorageLensAwsOrg", - "documentation":"

A container for the AWS organization for this S3 Storage Lens configuration.

" + "documentation":"

A container for the Amazon Web Services organization for this S3 Storage Lens configuration.

" }, "StorageLensArn":{ "shape":"StorageLensArn", @@ -4202,7 +4232,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID associated with the S3 Batch Operations job.

", + "documentation":"

The account ID associated with the S3 Batch Operations job.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -4248,7 +4278,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID associated with the S3 Batch Operations job.

", + "documentation":"

The account ID associated with the S3 Batch Operations job.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -4307,5 +4337,5 @@ "min":1 } }, - "documentation":"

AWS S3 Control provides access to Amazon S3 control plane actions.

" + "documentation":"

Amazon Web Services S3 Control provides access to Amazon S3 control plane actions.

" } diff --git a/services/s3outposts/pom.xml b/services/s3outposts/pom.xml index 2217ee6fa527..9fd4dc875490 100644 --- a/services/s3outposts/pom.xml +++ b/services/s3outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT s3outposts AWS Java SDK :: Services :: S3 Outposts diff --git a/services/s3outposts/src/main/resources/codegen-resources/service-2.json b/services/s3outposts/src/main/resources/codegen-resources/service-2.json index a344e3715111..942f9101bad8 100644 --- a/services/s3outposts/src/main/resources/codegen-resources/service-2.json +++ b/services/s3outposts/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], - "documentation":"

S3 on Outposts access points simplify managing data access at scale for shared datasets in Amazon S3 on Outposts. S3 on Outposts uses endpoints to connect to Outposts buckets so that you can perform actions within your virtual private cloud (VPC).

This action creates an endpoint and associates it with the specified Outpost.

Related actions include:

" + "documentation":"

Amazon S3 on Outposts Access Points simplify managing data access at scale for shared datasets in S3 on Outposts. S3 on Outposts uses endpoints to connect to Outposts buckets so that you can perform actions within your virtual private cloud (VPC). For more information, see Accessing S3 on Outposts using VPC only access points.

This action creates an endpoint and associates it with the specified Outposts.

It can take up to 5 minutes for this action to complete.

Related actions include:

" }, "DeleteEndpoint":{ "name":"DeleteEndpoint", @@ -43,7 +43,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

S3 on Outposts access points simplify managing data access at scale for shared datasets in Amazon S3 on Outposts. S3 on Outposts uses endpoints to connect to Outposts buckets so that you can perform actions within your virtual private cloud (VPC).

This action deletes an endpoint.

Related actions include:

" + "documentation":"

Amazon S3 on Outposts Access Points simplify managing data access at scale for shared datasets in S3 on Outposts. S3 on Outposts uses endpoints to connect to Outposts buckets so that you can perform actions within your virtual private cloud (VPC). For more information, see Accessing S3 on Outposts using VPC only access points.

This action deletes an endpoint.

It can take up to 5 minutes for this action to complete.

Related actions include:

" }, "ListEndpoints":{ "name":"ListEndpoints", @@ -59,7 +59,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

S3 on Outposts access points simplify managing data access at scale for shared datasets in Amazon S3 on Outposts. S3 on Outposts uses endpoints to connect to Outposts buckets so that you can perform actions within your virtual private cloud (VPC).

This action lists endpoints associated with the Outpost.

Related actions include:

" + "documentation":"

Amazon S3 on Outposts Access Points simplify managing data access at scale for shared datasets in S3 on Outposts. S3 on Outposts uses endpoints to connect to Outposts buckets so that you can perform actions within your virtual private cloud (VPC). For more information, see Accessing S3 on Outposts using VPC only access points.

This action lists endpoints associated with the Outposts.

Related actions include:

" } }, "shapes":{ @@ -72,11 +72,7 @@ "error":{"httpStatusCode":403}, "exception":true }, - "CidrBlock":{ - "type":"string", - "max":20, - "min":1 - }, + "CidrBlock":{"type":"string"}, "ConflictException":{ "type":"structure", "members":{ @@ -96,15 +92,23 @@ "members":{ "OutpostId":{ "shape":"OutpostId", - "documentation":"

The ID of the AWS Outpost.

" + "documentation":"

The ID of the AWS Outposts.

" }, "SubnetId":{ "shape":"SubnetId", - "documentation":"

The ID of the subnet in the selected VPC.

" + "documentation":"

The ID of the subnet in the selected VPC. The endpoint subnet must belong to the Outpost that has the Amazon S3 on Outposts provisioned.

" }, "SecurityGroupId":{ "shape":"SecurityGroupId", "documentation":"

The ID of the security group to use with the endpoint.

" + }, + "AccessType":{ + "shape":"EndpointAccessType", + "documentation":"

The type of access for the on-premise network connectivity for the Outpost endpoint. To access the endpoint from an on-premises network, you must specify the access type and provide the customer owned IPv4 pool.

" + }, + "CustomerOwnedIpv4Pool":{ + "shape":"CustomerOwnedIpv4Pool", + "documentation":"

The ID of the customer-owned IPv4 pool for the endpoint. IP addresses will be allocated from this pool for the endpoint.

" } } }, @@ -118,6 +122,10 @@ } }, "CreationTime":{"type":"timestamp"}, + "CustomerOwnedIpv4Pool":{ + "type":"string", + "pattern":"^ipv4pool-coip-([0-9a-f]{17})$" + }, "DeleteEndpointRequest":{ "type":"structure", "required":[ @@ -127,13 +135,13 @@ "members":{ "EndpointId":{ "shape":"EndpointId", - "documentation":"

The ID of the end point.

", + "documentation":"

The ID of the endpoint.

", "location":"querystring", "locationName":"endpointId" }, "OutpostId":{ "shape":"OutpostId", - "documentation":"

The ID of the AWS Outpost.

", + "documentation":"

The ID of the AWS Outposts.

", "location":"querystring", "locationName":"outpostId" } @@ -148,7 +156,7 @@ }, "OutpostsId":{ "shape":"OutpostId", - "documentation":"

The ID of the AWS Outpost.

" + "documentation":"

The ID of the AWS Outposts.

" }, "CidrBlock":{ "shape":"CidrBlock", @@ -165,27 +173,51 @@ "NetworkInterfaces":{ "shape":"NetworkInterfaces", "documentation":"

The network interface of the endpoint.

" + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

The ID of the VPC used for the endpoint.

" + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

The ID of the subnet used for the endpoint.

" + }, + "SecurityGroupId":{ + "shape":"SecurityGroupId", + "documentation":"

The ID of the security group used for the endpoint.

" + }, + "AccessType":{ + "shape":"EndpointAccessType", + "documentation":"

" + }, + "CustomerOwnedIpv4Pool":{ + "shape":"CustomerOwnedIpv4Pool", + "documentation":"

The ID of the customer-owned IPv4 pool used for the endpoint.

" } }, - "documentation":"

S3 on Outposts access points simplify managing data access at scale for shared datasets in Amazon S3 on Outposts. S3 on Outposts uses endpoints to connect to Outposts buckets so that you can perform actions within your virtual private cloud (VPC).

" + "documentation":"

Amazon S3 on Outposts Access Points simplify managing data access at scale for shared datasets in S3 on Outposts. S3 on Outposts uses endpoints to connect to Outposts buckets so that you can perform actions within your virtual private cloud (VPC). For more information, see Accessing S3 on Outposts using VPC only access points.

" + }, + "EndpointAccessType":{ + "type":"string", + "enum":[ + "Private", + "CustomerOwnedIp" + ] }, "EndpointArn":{ "type":"string", - "max":500, - "min":5, "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):s3-outposts:[a-z\\-0-9]*:[0-9]{12}:outpost/(op-[a-f0-9]{17}|ec2)/endpoint/[a-zA-Z0-9]{19}$" }, "EndpointId":{ "type":"string", - "max":500, - "min":5, "pattern":"^[a-zA-Z0-9]{19}$" }, "EndpointStatus":{ "type":"string", "enum":[ - "PENDING", - "AVAILABLE" + "Pending", + "Available", + "Deleting" ] }, "Endpoints":{ @@ -225,7 +257,7 @@ "members":{ "Endpoints":{ "shape":"Endpoints", - "documentation":"

Returns an array of endpoints associated with AWS Outpost.

" + "documentation":"

Returns an array of endpoints associated with AWS Outposts.

" }, "NextToken":{ "shape":"NextToken", @@ -248,11 +280,7 @@ }, "documentation":"

The container for the network interface.

" }, - "NetworkInterfaceId":{ - "type":"string", - "max":100, - "min":1 - }, + "NetworkInterfaceId":{"type":"string"}, "NetworkInterfaces":{ "type":"list", "member":{"shape":"NetworkInterface"} @@ -265,8 +293,6 @@ }, "OutpostId":{ "type":"string", - "max":100, - "min":1, "pattern":"^(op-[a-f0-9]{17}|\\d{12}|ec2)$" }, "ResourceNotFoundException":{ @@ -280,14 +306,10 @@ }, "SecurityGroupId":{ "type":"string", - "max":100, - "min":1, "pattern":"^sg-([0-9a-f]{8}|[0-9a-f]{17})$" }, "SubnetId":{ "type":"string", - "max":100, - "min":1, "pattern":"^subnet-([0-9a-f]{8}|[0-9a-f]{17})$" }, "ValidationException":{ @@ -298,7 +320,8 @@ "documentation":"

There was an exception validating this data.

", "error":{"httpStatusCode":400}, "exception":true - } + }, + "VpcId":{"type":"string"} }, "documentation":"

Amazon S3 on Outposts provides access to S3 on Outposts operations.

" } diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index b91cffa8f24e..4cbf20b6bc5f 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 sagemaker diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index 7715cf02e584..bb11a5cb0db3 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -230,7 +230,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (Amazon Web Services SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web Services account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the S3 path you provided. Amazon Web Services STS is activated in your IAM user account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity and Access Management User Guide.

To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.

  • Option 1: For a full Amazon SageMaker access, search and attach the AmazonSageMakerFullAccess policy.

  • Option 2: For granting a limited access to an IAM role, paste the following Action elements manually into the JSON file of the IAM role:

    \"Action\": [\"sagemaker:CreateEndpoint\", \"sagemaker:CreateEndpointConfig\"]

    \"Resource\": [

    \"arn:aws:sagemaker:region:account-id:endpoint/endpointName\"

    \"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName\"

    ]

    For more information, see Amazon SageMaker API Permissions: Actions, Permissions, and Resources Reference.

" + "documentation":"

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see the Create Endpoint example notebook.

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web Services account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the S3 path you provided. Amazon Web Services STS is activated in your IAM user account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity and Access Management User Guide.

To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.

  • Option 1: For a full Amazon SageMaker access, search and attach the AmazonSageMakerFullAccess policy.

  • Option 2: For granting a limited access to an IAM role, paste the following Action elements manually into the JSON file of the IAM role:

    \"Action\": [\"sagemaker:CreateEndpoint\", \"sagemaker:CreateEndpointConfig\"]

    \"Resource\": [

    \"arn:aws:sagemaker:region:account-id:endpoint/endpointName\"

    \"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName\"

    ]

    For more information, see Amazon SageMaker API Permissions: Actions, Permissions, and Resources Reference.

" }, "CreateEndpointConfig":{ "name":"CreateEndpointConfig", @@ -3314,6 +3314,14 @@ "ml.m5.12xlarge", "ml.m5.16xlarge", "ml.m5.24xlarge", + "ml.m5d.large", + "ml.m5d.xlarge", + "ml.m5d.2xlarge", + "ml.m5d.4xlarge", + "ml.m5d.8xlarge", + "ml.m5d.12xlarge", + "ml.m5d.16xlarge", + "ml.m5d.24xlarge", "ml.c5.large", "ml.c5.xlarge", "ml.c5.2xlarge", @@ -3325,12 +3333,21 @@ "ml.p3.2xlarge", "ml.p3.8xlarge", "ml.p3.16xlarge", + "ml.p3dn.24xlarge", "ml.g4dn.xlarge", "ml.g4dn.2xlarge", "ml.g4dn.4xlarge", "ml.g4dn.8xlarge", "ml.g4dn.12xlarge", - "ml.g4dn.16xlarge" + "ml.g4dn.16xlarge", + "ml.r5.large", + "ml.r5.xlarge", + "ml.r5.2xlarge", + "ml.r5.4xlarge", + "ml.r5.8xlarge", + "ml.r5.12xlarge", + "ml.r5.16xlarge", + "ml.r5.24xlarge" ] }, "AppList":{ @@ -14040,6 +14057,20 @@ "max":2048, "pattern":"arn:aws[a-z\\-]*:lambda:[a-z0-9\\-]*:[0-9]{12}:function:.*" }, + "LambdaStepMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String256", + "documentation":"

The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution.

" + }, + "OutputParameters":{ + "shape":"OutputParameterList", + "documentation":"

A list of the output parameters of the Lambda step.

" + } + }, + "documentation":"

Metadata for a Lambda step.

" + }, "LastModifiedTime":{"type":"timestamp"}, "LineageEntityParameters":{ "type":"map", @@ -19178,17 +19209,24 @@ }, "Model":{ "shape":"ModelStepMetadata", - "documentation":"

Metadata for the Model step.

" + "documentation":"

The Amazon Resource Name (ARN) of the model that was created by this step execution.

" }, "RegisterModel":{ "shape":"RegisterModelStepMetadata", - "documentation":"

Metadata for the RegisterModel step.

" + "documentation":"

The Amazon Resource Name (ARN) of the model package the model was registered to by this step execution.

" }, "Condition":{ "shape":"ConditionStepMetadata", - "documentation":"

If this is a Condition step metadata object, details on the condition.

" + "documentation":"

The outcome of the condition evaluation that was run by this step execution.

" + }, + "Callback":{ + "shape":"CallbackStepMetadata", + "documentation":"

The URL of the Amazon SQS queue used by this step execution, the pipeline generated token, and a list of output parameters.

" }, - "Callback":{"shape":"CallbackStepMetadata"} + "Lambda":{ + "shape":"LambdaStepMetadata", + "documentation":"

The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution and a list of output parameters.

" + } }, "documentation":"

Metadata for a step execution.

" }, @@ -21487,14 +21525,14 @@ "members":{ "MaxRuntimeInSeconds":{ "shape":"MaxRuntimeInSeconds", - "documentation":"

The maximum length of time, in seconds, that a training or compilation job can run. If the job does not complete during this time, Amazon SageMaker ends the job.

When RetryStrategy is specified in the job request, MaxRuntimeInSeconds specifies the maximum time for all of the attempts in total, not each individual attempt.

The default value is 1 day. The maximum value is 28 days.

" + "documentation":"

The maximum length of time, in seconds, that a training or compilation job can run.

For compilation jobs, if the job does not complete during this time, you will receive a TimeOut error. We recommend starting with 900 seconds and increase as necessary based on your model.

For all other jobs, if the job does not complete during this time, Amazon SageMaker ends the job. When RetryStrategy is specified in the job request, MaxRuntimeInSeconds specifies the maximum time for all of the attempts in total, not each individual attempt. The default value is 1 day. The maximum value is 28 days.

" }, "MaxWaitTimeInSeconds":{ "shape":"MaxWaitTimeInSeconds", "documentation":"

The maximum length of time, in seconds, that a managed Spot training job has to complete. It is the amount of time spent waiting for Spot capacity plus the amount of time the job can run. It must be equal to or greater than MaxRuntimeInSeconds. If the job does not complete during this time, Amazon SageMaker ends the job.

When RetryStrategy is specified in the job request, MaxWaitTimeInSeconds specifies the maximum time for all of the attempts in total, not each individual attempt.

" } }, - "documentation":"

Specifies a limit to how long a model training job, model compilation job, or hyperparameter tuning job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, Amazon SageMaker ends the training or compilation job. Use this API to cap model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by Amazon SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

" + "documentation":"

Specifies a limit to how long a model training job, model compilation job, or hyperparameter tuning job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, Amazon SageMaker ends the training or compilation job. Use this API to cap model training costs.

To stop a training job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by Amazon SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

" }, "String":{"type":"string"}, "String1024":{ diff --git a/services/sagemaker/src/main/resources/codegen-resources/waiters-2.json b/services/sagemaker/src/main/resources/codegen-resources/waiters-2.json index c462ff9ca9bf..8677924da15d 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/waiters-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/waiters-2.json @@ -188,6 +188,124 @@ "state": "failure" } ] + }, + "ImageCreated": { + "delay": 60, + "maxAttempts": 60, + "operation": "DescribeImage", + "acceptors": [ + { + "expected": "CREATED", + "matcher": "path", + "state": "success", + "argument": "ImageStatus" + }, + { + "expected": "CREATE_FAILED", + "matcher": "path", + "state": "failure", + "argument": "ImageStatus" + }, + { + "expected": "ValidationException", + "matcher": "error", + "state": "failure" + } + ] + }, + "ImageUpdated": { + "delay": 60, + "maxAttempts": 60, + "operation": "DescribeImage", + "acceptors": [ + { + "expected": "CREATED", + "matcher": "path", + "state": "success", + "argument": "ImageStatus" + }, + { + "expected": "UPDATE_FAILED", + "matcher": "path", + "state": "failure", + "argument": "ImageStatus" + }, + { + "expected": "ValidationException", + "matcher": "error", + "state": "failure" + } + ] + }, + "ImageDeleted": { + "delay": 60, + "maxAttempts": 60, + "operation": "DescribeImage", + "acceptors": [ + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "success" + }, + { + "expected": "DELETE_FAILED", + "matcher": "path", + "state": "failure", + "argument": "ImageStatus" + }, + { + "expected": "ValidationException", + "matcher": "error", + "state": "failure" + } + ] + }, + "ImageVersionCreated": { + "delay": 60, + "maxAttempts": 60, + "operation": "DescribeImageVersion", + "acceptors": [ + { + "expected": "CREATED", + "matcher": "path", + "state": "success", + "argument": "ImageVersionStatus" + }, + { + "expected": "CREATE_FAILED", + "matcher": "path", + "state": "failure", + "argument": "ImageVersionStatus" + }, + { + "expected": "ValidationException", + "matcher": "error", + "state": "failure" + } + ] + }, + "ImageVersionDeleted": { + "delay": 60, + "maxAttempts": 60, + "operation": "DescribeImageVersion", + "acceptors": [ + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "success" + }, + { + "expected": "DELETE_FAILED", + "matcher": "path", + "state": "failure", + "argument": "ImageVersionStatus" + }, + { + "expected": "ValidationException", + "matcher": "error", + "state": "failure" + } + ] } } } diff --git a/services/sagemakera2iruntime/pom.xml b/services/sagemakera2iruntime/pom.xml index 8b5e5c76ba63..3d24e5d69d15 100644 --- a/services/sagemakera2iruntime/pom.xml +++ b/services/sagemakera2iruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT sagemakera2iruntime AWS Java SDK :: Services :: SageMaker A2I Runtime diff --git a/services/sagemakeredge/pom.xml b/services/sagemakeredge/pom.xml index bb3bb319b363..eaa6338c7d66 100644 --- a/services/sagemakeredge/pom.xml +++ b/services/sagemakeredge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT sagemakeredge AWS Java SDK :: Services :: Sagemaker Edge diff --git a/services/sagemakerfeaturestoreruntime/pom.xml b/services/sagemakerfeaturestoreruntime/pom.xml index 6a53f689f850..c52fcb3e4172 100644 --- a/services/sagemakerfeaturestoreruntime/pom.xml +++ b/services/sagemakerfeaturestoreruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT sagemakerfeaturestoreruntime AWS Java SDK :: Services :: Sage Maker Feature Store Runtime diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index 6c309ca7dd98..00b6698ff130 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT sagemakerruntime AWS Java SDK :: Services :: SageMaker Runtime diff --git a/services/savingsplans/pom.xml b/services/savingsplans/pom.xml index a5df509d4875..f4beeaac3510 100644 --- a/services/savingsplans/pom.xml +++ b/services/savingsplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT savingsplans AWS Java SDK :: Services :: Savingsplans diff --git a/services/savingsplans/src/main/resources/codegen-resources/customization.config b/services/savingsplans/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..89f59320725a --- /dev/null +++ b/services/savingsplans/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "customServiceMetadata": {"contentType" : "application/json"} +} \ No newline at end of file diff --git a/services/savingsplans/src/main/resources/codegen-resources/service-2.json b/services/savingsplans/src/main/resources/codegen-resources/service-2.json index e8fb4273baf7..4ddcf8c45eb6 100644 --- a/services/savingsplans/src/main/resources/codegen-resources/service-2.json +++ b/services/savingsplans/src/main/resources/codegen-resources/service-2.json @@ -1134,7 +1134,7 @@ "TermDurationInSeconds":{"type":"long"}, "UUID":{ "type":"string", - "pattern":"^(([0-9a-f]+)(-?))+$" + "pattern":"[a-f0-9]+(-[a-f0-9]+)*" }, "UUIDs":{ "type":"list", diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml index e7ec868e5383..3126569eb9b8 100644 --- a/services/schemas/pom.xml +++ b/services/schemas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT schemas AWS Java SDK :: Services :: Schemas diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index 84d0625c1920..f91d25535c92 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT secretsmanager AWS Java SDK :: Services :: AWS Secrets Manager diff --git a/services/secretsmanager/src/main/resources/codegen-resources/service-2.json b/services/secretsmanager/src/main/resources/codegen-resources/service-2.json index 5b572e32ad23..76da28c02534 100644 --- a/services/secretsmanager/src/main/resources/codegen-resources/service-2.json +++ b/services/secretsmanager/src/main/resources/codegen-resources/service-2.json @@ -48,7 +48,7 @@ {"shape":"InternalServiceError"}, {"shape":"PreconditionNotMetException"} ], - "documentation":"

Creates a new secret. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret.

Secrets Manager stores the encrypted secret data in one of a collection of \"versions\" associated with the secret. Each version contains a copy of the encrypted secret data. Each version is associated with one or more \"staging labels\" that identify where the version is in the rotation cycle. The SecretVersionsToStages field of the secret contains the mapping of staging labels to the active versions of the secret. Versions without a staging label are considered deprecated and not included in the list.

You provide the secret data to be encrypted by putting text in either the SecretString parameter or binary data in the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager also creates an initial secret version and automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS creating the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret resides in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:CreateSecret

  • kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account default AWS managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account default AWS managed CMK for Secrets Manager.

  • secretsmanager:TagResource - needed only if you include the Tags parameter.

Related operations

  • To delete a secret, use DeleteSecret.

  • To modify an existing secret, use UpdateSecret.

  • To create a new version of a secret, use PutSecretValue.

  • To retrieve the encrypted secure string and secure binary values, use GetSecretValue.

  • To retrieve all other details for a secret, use DescribeSecret. This does not include the encrypted secure string and secure binary values.

  • To retrieve the list of secret versions associated with the current secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" + "documentation":"

Creates a new secret. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret.

Secrets Manager stores the encrypted secret data in one of a collection of \"versions\" associated with the secret. Each version contains a copy of the encrypted secret data. Each version is associated with one or more \"staging labels\" that identify where the version is in the rotation cycle. The SecretVersionsToStages field of the secret contains the mapping of staging labels to the active versions of the secret. Versions without a staging label are considered deprecated and not included in the list.

You provide the secret data to be encrypted by putting text in either the SecretString parameter or binary data in the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager also creates an initial secret version and automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a Amazon Web Services KMS encryption key, Secrets Manager uses the account's default Amazon Web Services managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same Amazon Web Services account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in Amazon Web Services creating the account's Amazon Web Services-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret resides in a different Amazon Web Services account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom Amazon Web Services KMS CMK because you can't access the default CMK for the account using credentials from a different Amazon Web Services account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the Amazon Web Services KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:CreateSecret

  • kms:GenerateDataKey - needed only if you use a customer-managed Amazon Web Services KMS key to encrypt the secret. You do not need this permission to use the account default Amazon Web Services managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a customer-managed Amazon Web Services KMS key to encrypt the secret. You do not need this permission to use the account default Amazon Web Services managed CMK for Secrets Manager.

  • secretsmanager:TagResource - needed only if you include the Tags parameter.

Related operations

  • To delete a secret, use DeleteSecret.

  • To modify an existing secret, use UpdateSecret.

  • To create a new version of a secret, use PutSecretValue.

  • To retrieve the encrypted secure string and secure binary values, use GetSecretValue.

  • To retrieve all other details for a secret, use DescribeSecret. This does not include the encrypted secure string and secure binary values.

  • To retrieve the list of secret versions associated with the current secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" }, "DeleteResourcePolicy":{ "name":"DeleteResourcePolicy", @@ -92,9 +92,10 @@ "output":{"shape":"DescribeSecretResponse"}, "errors":[ {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServiceError"} + {"shape":"InternalServiceError"}, + {"shape":"InvalidParameterException"} ], - "documentation":"

Retrieves the details of a secret. It does not include the encrypted fields. Secrets Manager only returns fields populated with a value in the response.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:DescribeSecret

Related operations

" + "documentation":"

Retrieves the details of a secret. It does not include the encrypted fields. Secrets Manager only returns fields populated with a value in the response.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:DescribeSecret

Related operations

  • To create a secret, use CreateSecret.

  • To modify a secret, use UpdateSecret.

  • To retrieve the encrypted secret information in a version of the secret, use GetSecretValue.

  • To list all of the secrets in the Amazon Web Services account, use ListSecrets.

" }, "GetRandomPassword":{ "name":"GetRandomPassword", @@ -122,7 +123,8 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceError"}, - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"} ], "documentation":"

Retrieves the JSON text of the resource-based policy document attached to the specified secret. The JSON request string input and response output displays formatted code with white space and line breaks for better readability. Submit your input as a single line JSON string.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:GetResourcePolicy

Related operations

" }, @@ -141,7 +143,7 @@ {"shape":"DecryptionFailure"}, {"shape":"InternalServiceError"} ], - "documentation":"

Retrieves the contents of the encrypted fields SecretString or SecretBinary from the specified version of a secret, whichever contains content.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:GetSecretValue

  • kms:Decrypt - required only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

Related operations

  • To create a new version of the secret with different encrypted information, use PutSecretValue.

  • To retrieve the non-encrypted details for the secret, use DescribeSecret.

" + "documentation":"

Retrieves the contents of the encrypted fields SecretString or SecretBinary from the specified version of a secret, whichever contains content.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:GetSecretValue

  • kms:Decrypt - required only if you use a customer-managed Amazon Web Services KMS key to encrypt the secret. You do not need this permission to use the account's default Amazon Web Services managed CMK for Secrets Manager.

Related operations

  • To create a new version of the secret with different encrypted information, use PutSecretValue.

  • To retrieve the non-encrypted details for the secret, use DescribeSecret.

" }, "ListSecretVersionIds":{ "name":"ListSecretVersionIds", @@ -154,7 +156,8 @@ "errors":[ {"shape":"InvalidNextTokenException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServiceError"} + {"shape":"InternalServiceError"}, + {"shape":"InvalidParameterException"} ], "documentation":"

Lists all of the versions attached to the specified secret. The output does not include the SecretString or SecretBinary fields. By default, the list includes only versions that have at least one staging label in VersionStage attached.

Always check the NextToken response parameter when calling any of the List* operations. These operations can occasionally return an empty or shorter than expected list of results even when there more results become available. When this happens, the NextToken response parameter contains a value to pass to the next call to the same API to request the next part of the list.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:ListSecretVersionIds

Related operations

" }, @@ -171,7 +174,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Lists all of the secrets that are stored by Secrets Manager in the AWS account. To list the versions currently stored for a specific secret, use ListSecretVersionIds. The encrypted fields SecretString and SecretBinary are not included in the output. To get that information, call the GetSecretValue operation.

Always check the NextToken response parameter when calling any of the List* operations. These operations can occasionally return an empty or shorter than expected list of results even when there more results become available. When this happens, the NextToken response parameter contains a value to pass to the next call to the same API to request the next part of the list.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:ListSecrets

Related operations

" + "documentation":"

Lists all of the secrets that are stored by Secrets Manager in the Amazon Web Services account. To list the versions currently stored for a specific secret, use ListSecretVersionIds. The encrypted fields SecretString and SecretBinary are not included in the output. To get that information, call the GetSecretValue operation.

Always check the NextToken response parameter when calling any of the List* operations. These operations can occasionally return an empty or shorter than expected list of results even when there more results become available. When this happens, the NextToken response parameter contains a value to pass to the next call to the same API to request the next part of the list.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:ListSecrets

Related operations

" }, "PutResourcePolicy":{ "name":"PutResourcePolicy", @@ -189,7 +192,7 @@ {"shape":"InvalidRequestException"}, {"shape":"PublicPolicyException"} ], - "documentation":"

Attaches the contents of the specified resource-based permission policy to a secret. A resource-based policy is optional. Alternatively, you can use IAM identity-based policies that specify the secret's Amazon Resource Name (ARN) in the policy statement's Resources element. You can also use a combination of both identity-based and resource-based policies. The affected users and roles receive the permissions that are permitted by all of the relevant policies. For more information, see Using Resource-Based Policies for AWS Secrets Manager. For the complete description of the AWS policy syntax and grammar, see IAM JSON Policy Reference in the IAM User Guide.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutResourcePolicy

Related operations

" + "documentation":"

Attaches the contents of the specified resource-based permission policy to a secret. A resource-based policy is optional. Alternatively, you can use IAM identity-based policies that specify the secret's Amazon Resource Name (ARN) in the policy statement's Resources element. You can also use a combination of both identity-based and resource-based policies. The affected users and roles receive the permissions that are permitted by all of the relevant policies. For more information, see Using Resource-Based Policies for Amazon Web Services Secrets Manager. For the complete description of the Amazon Web Services policy syntax and grammar, see IAM JSON Policy Reference in the IAM User Guide.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutResourcePolicy

Related operations

" }, "PutSecretValue":{ "name":"PutSecretValue", @@ -208,7 +211,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Stores a new encrypted secret value in the specified secret. To do this, the operation creates a new version and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. You can also specify the staging labels that are initially attached to the new version.

The Secrets Manager console uses only the SecretString field. To add binary data to a secret with the SecretBinary field you must use the AWS CLI or one of the AWS SDKs.

  • If this operation creates the first version for the secret then Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If you do not specify a value for VersionStages then Secrets Manager automatically moves the staging label AWSCURRENT to this new version.

  • If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

  • This operation is idempotent. If a version with a VersionId with the same value as the ClientRequestToken parameter already exists and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you cannot modify an existing version; you can only create new ones.

  • If you call an operation to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS creating the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret resides in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutSecretValue

  • kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

Related operations

" + "documentation":"

Stores a new encrypted secret value in the specified secret. To do this, the operation creates a new version and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. You can also specify the staging labels that are initially attached to the new version.

The Secrets Manager console uses only the SecretString field. To add binary data to a secret with the SecretBinary field you must use the Amazon Web Services CLI or one of the Amazon Web Services SDKs.

  • If this operation creates the first version for the secret then Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If you do not specify a value for VersionStages then Secrets Manager automatically moves the staging label AWSCURRENT to this new version.

  • If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

  • This operation is idempotent. If a version with a VersionId with the same value as the ClientRequestToken parameter already exists and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you cannot modify an existing version; you can only create new ones.

  • If you call an operation to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a Amazon Web Services KMS encryption key, Secrets Manager uses the account's default Amazon Web Services managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same Amazon Web Services account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in Amazon Web Services creating the account's Amazon Web Services-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret resides in a different Amazon Web Services account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom Amazon Web Services KMS CMK because you can't access the default CMK for the account using credentials from a different Amazon Web Services account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the Amazon Web Services KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutSecretValue

  • kms:GenerateDataKey - needed only if you use a customer-managed Amazon Web Services KMS key to encrypt the secret. You do not need this permission to use the account's default Amazon Web Services managed CMK for Secrets Manager.

Related operations

" }, "RemoveRegionsFromReplication":{ "name":"RemoveRegionsFromReplication", @@ -272,7 +275,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Configures and starts the asynchronous process of rotating this secret. If you include the configuration parameters, the operation sets those values for the secret and then immediately starts a rotation. If you do not include the configuration parameters, the operation starts a rotation with the values already stored in the secret. After the rotation completes, the protected service and its clients all use the new version of the secret.

This required configuration information includes the ARN of an AWS Lambda function and the time between scheduled rotations. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the protected service to match. After testing the new credentials, the function marks the new secret with the staging label AWSCURRENT so that your clients all immediately begin to use the new version. For more information about rotating secrets and how to configure a Lambda function to rotate the secrets for your protected service, see Rotating Secrets in AWS Secrets Manager in the AWS Secrets Manager User Guide.

Secrets Manager schedules the next rotation when the previous one completes. Secrets Manager schedules the date by adding the rotation interval (number of days) to the actual date of the last rotation. The service chooses the hour within that 24-hour date window randomly. The minute is also chosen somewhat randomly, but weighted towards the top of the hour and influenced by a variety of factors that help distribute load.

The rotation function must end with the versions of the secret in one of two states:

  • The AWSPENDING and AWSCURRENT staging labels are attached to the same version of the secret, or

  • The AWSPENDING staging label is not attached to any version of the secret.

If the AWSPENDING staging label is present but not attached to the same version as AWSCURRENT then any later invocation of RotateSecret assumes that a previous rotation request is still in progress and returns an error.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:RotateSecret

  • lambda:InvokeFunction (on the function specified in the secret's metadata)

Related operations

" + "documentation":"

Configures and starts the asynchronous process of rotating this secret. If you include the configuration parameters, the operation sets those values for the secret and then immediately starts a rotation. If you do not include the configuration parameters, the operation starts a rotation with the values already stored in the secret. After the rotation completes, the protected service and its clients all use the new version of the secret.

This required configuration information includes the ARN of an Amazon Web Services Lambda function and optionally, the time between scheduled rotations. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the protected service to match. After testing the new credentials, the function marks the new secret with the staging label AWSCURRENT so that your clients all immediately begin to use the new version. For more information about rotating secrets and how to configure a Lambda function to rotate the secrets for your protected service, see Rotating Secrets in Amazon Web Services Secrets Manager in the Amazon Web Services Secrets Manager User Guide.

Secrets Manager schedules the next rotation when the previous one completes. Secrets Manager schedules the date by adding the rotation interval (number of days) to the actual date of the last rotation. The service chooses the hour within that 24-hour date window randomly. The minute is also chosen somewhat randomly, but weighted towards the top of the hour and influenced by a variety of factors that help distribute load.

The rotation function must end with the versions of the secret in one of two states:

  • The AWSPENDING and AWSCURRENT staging labels are attached to the same version of the secret, or

  • The AWSPENDING staging label is not attached to any version of the secret.

If the AWSPENDING staging label is present but not attached to the same version as AWSCURRENT then any later invocation of RotateSecret assumes that a previous rotation request is still in progress and returns an error.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:RotateSecret

  • lambda:InvokeFunction (on the function specified in the secret's metadata)

Related operations

" }, "StopReplicationToReplica":{ "name":"StopReplicationToReplica", @@ -303,7 +306,7 @@ {"shape":"InvalidParameterException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Attaches one or more tags, each consisting of a key name and a value, to the specified secret. Tags are part of the secret's overall metadata, and are not associated with any specific version of the secret. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.

The following basic restrictions apply to tags:

  • Maximum number of tags per secret—50

  • Maximum key length—127 Unicode characters in UTF-8

  • Maximum value length—255 Unicode characters in UTF-8

  • Tag keys and values are case sensitive.

  • Do not use the aws: prefix in your tag names or values because AWS reserves it for AWS use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.

  • If you use your tagging schema across multiple services and resources, remember other services might have restrictions on allowed characters. Generally allowed characters: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.

If you use tags as part of your security strategy, then adding or removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:TagResource

Related operations

  • To remove one or more tags from the collection attached to a secret, use UntagResource.

  • To view the list of tags attached to a secret, use DescribeSecret.

" + "documentation":"

Attaches one or more tags, each consisting of a key name and a value, to the specified secret. Tags are part of the secret's overall metadata, and are not associated with any specific version of the secret. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.

The following basic restrictions apply to tags:

  • Maximum number of tags per secret—50

  • Maximum key length—127 Unicode characters in UTF-8

  • Maximum value length—255 Unicode characters in UTF-8

  • Tag keys and values are case sensitive.

  • Do not use the aws: prefix in your tag names or values because Amazon Web Services reserves it for Amazon Web Services use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.

  • If you use your tagging schema across multiple services and resources, remember other services might have restrictions on allowed characters. Generally allowed characters: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.

If you use tags as part of your security strategy, then adding or removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:TagResource

Related operations

  • To remove one or more tags from the collection attached to a secret, use UntagResource.

  • To view the list of tags attached to a secret, use DescribeSecret.

" }, "UntagResource":{ "name":"UntagResource", @@ -339,7 +342,7 @@ {"shape":"InternalServiceError"}, {"shape":"PreconditionNotMetException"} ], - "documentation":"

Modifies many of the details of the specified secret. If you include a ClientRequestToken and either SecretString or SecretBinary then it also creates a new version attached to the secret.

To modify the rotation configuration of a secret, use RotateSecret instead.

The Secrets Manager console uses only the SecretString parameter and therefore limits you to encrypting and storing only a text string. To encrypt and store binary data as part of the version of a secret, you must use either the AWS CLI or one of the AWS SDKs.

  • If a version with a VersionId with the same value as the ClientRequestToken parameter already exists, the operation results in an error. You cannot modify an existing version, you can only create a new version.

  • If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS creating the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret resides in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecret

  • kms:GenerateDataKey - needed only if you use a custom AWS KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a custom AWS KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

Related operations

" + "documentation":"

Modifies many of the details of the specified secret. If you include a ClientRequestToken and either SecretString or SecretBinary then it also creates a new version attached to the secret.

To modify the rotation configuration of a secret, use RotateSecret instead.

The Secrets Manager console uses only the SecretString parameter and therefore limits you to encrypting and storing only a text string. To encrypt and store binary data as part of the version of a secret, you must use either the Amazon Web Services CLI or one of the Amazon Web Services SDKs.

  • If a version with a VersionId with the same value as the ClientRequestToken parameter already exists, the operation results in an error. You cannot modify an existing version, you can only create a new version.

  • If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a Amazon Web Services KMS encryption key, Secrets Manager uses the account's default Amazon Web Services managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same Amazon Web Services account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in Amazon Web Services creating the account's Amazon Web Services-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret resides in a different Amazon Web Services account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom Amazon Web Services KMS CMK because you can't access the default CMK for the account using credentials from a different Amazon Web Services account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the Amazon Web Services KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecret

  • kms:GenerateDataKey - needed only if you use a custom Amazon Web Services KMS key to encrypt the secret. You do not need this permission to use the account's Amazon Web Services managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a custom Amazon Web Services KMS key to encrypt the secret. You do not need this permission to use the account's Amazon Web Services managed CMK for Secrets Manager.

Related operations

" }, "UpdateSecretVersionStage":{ "name":"UpdateSecretVersionStage", @@ -356,7 +359,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Modifies the staging labels attached to a version of a secret. Staging labels are used to track a version as it progresses through the secret rotation process. You can attach a staging label to only one version of a secret at a time. If a staging label to be added is already attached to another version, then it is moved--removed from the other version first and then attached to this one. For more information about staging labels, see Staging Labels in the AWS Secrets Manager User Guide.

The staging labels that you specify in the VersionStage parameter are added to the existing list of staging labels--they don't replace it.

You can move the AWSCURRENT staging label to this version by including it in this call.

Whenever you move AWSCURRENT, Secrets Manager automatically moves the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If this action results in the last label being removed from a version, then the version is considered to be 'deprecated' and can be deleted by Secrets Manager.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecretVersionStage

Related operations

  • To get the list of staging labels that are currently associated with a version of a secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" + "documentation":"

Modifies the staging labels attached to a version of a secret. Staging labels are used to track a version as it progresses through the secret rotation process. You can attach a staging label to only one version of a secret at a time. If a staging label to be added is already attached to another version, then it is moved--removed from the other version first and then attached to this one. For more information about staging labels, see Staging Labels in the Amazon Web Services Secrets Manager User Guide.

The staging labels that you specify in the VersionStage parameter are added to the existing list of staging labels--they don't replace it.

You can move the AWSCURRENT staging label to this version by including it in this call.

Whenever you move AWSCURRENT, Secrets Manager automatically moves the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If this action results in the last label being removed from a version, then the version is considered to be 'deprecated' and can be deleted by Secrets Manager.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecretVersionStage

Related operations

  • To get the list of staging labels that are currently associated with a version of a secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" }, "ValidateResourcePolicy":{ "name":"ValidateResourcePolicy", @@ -430,7 +433,7 @@ }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) If you include SecretString or SecretBinary, then an initial version is created as part of the secret, and this parameter specifies a unique identifier for the new version.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for the new version and include the value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your versions within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and the version SecretString and SecretBinary values are the same as those in the request, then the request is ignored.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request, then the request fails because you cannot modify an existing version. Instead, use PutSecretValue to create a new version.

This value becomes the VersionId of the new version.

", + "documentation":"

(Optional) If you include SecretString or SecretBinary, then an initial version is created as part of the secret, and this parameter specifies a unique identifier for the new version.

If you use the Amazon Web Services CLI or one of the Amazon Web Services SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for the new version and include the value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your versions within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and the version SecretString and SecretBinary values are the same as those in the request, then the request is ignored.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request, then the request fails because you cannot modify an existing version. Instead, use PutSecretValue to create a new version.

This value becomes the VersionId of the new version.

", "idempotencyToken":true }, "Description":{ @@ -439,19 +442,19 @@ }, "KmsKeyId":{ "shape":"KmsKeyIdType", - "documentation":"

(Optional) Specifies the ARN, Key ID, or alias of the AWS KMS customer master key (CMK) to be used to encrypt the SecretString or SecretBinary values in the versions stored in this secret.

You can specify any of the supported ways to identify a AWS KMS key ID. If you need to reference a CMK in a different account, you can use only the key ARN or the alias ARN.

If you don't specify this value, then Secrets Manager defaults to using the AWS account's default CMK (the one named aws/secretsmanager). If a AWS KMS CMK with that name doesn't yet exist, then Secrets Manager creates it for you automatically the first time it needs to encrypt a version's SecretString or SecretBinary fields.

You can use the account default CMK to encrypt and decrypt only if you call this operation using credentials from the same account that owns the secret. If the secret resides in a different account, then you must create a custom CMK and specify the ARN in this field.

" + "documentation":"

(Optional) Specifies the ARN, Key ID, or alias of the Amazon Web Services KMS customer master key (CMK) to be used to encrypt the SecretString or SecretBinary values in the versions stored in this secret.

You can specify any of the supported ways to identify a Amazon Web Services KMS key ID. If you need to reference a CMK in a different account, you can use only the key ARN or the alias ARN.

If you don't specify this value, then Secrets Manager defaults to using the Amazon Web Services account's default CMK (the one named aws/secretsmanager). If a Amazon Web Services KMS CMK with that name doesn't yet exist, then Secrets Manager creates it for you automatically the first time it needs to encrypt a version's SecretString or SecretBinary fields.

You can use the account default CMK to encrypt and decrypt only if you call this operation using credentials from the same account that owns the secret. If the secret resides in a different account, then you must create a custom CMK and specify the ARN in this field.

" }, "SecretBinary":{ "shape":"SecretBinaryType", - "documentation":"

(Optional) Specifies binary data that you want to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then use the appropriate technique for your tool to pass the contents of the file as a parameter.

Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

This parameter is not available using the Secrets Manager console. It can be accessed only by using the AWS CLI or one of the AWS SDKs.

" + "documentation":"

(Optional) Specifies binary data that you want to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then use the appropriate technique for your tool to pass the contents of the file as a parameter.

Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

This parameter is not available using the Secrets Manager console. It can be accessed only by using the Amazon Web Services CLI or one of the Amazon Web Services SDKs.

" }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret.

Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

{\"username\":\"bob\",\"password\":\"abc123xyz456\"}

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" + "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret.

Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the CLI User Guide. For example:

{\"username\":\"bob\",\"password\":\"abc123xyz456\"}

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" }, "Tags":{ "shape":"TagListType", - "documentation":"

(Optional) Specifies a list of user-defined tags that are attached to the secret. Each tag is a \"Key\" and \"Value\" pair of strings. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.

  • Secrets Manager tag key names are case sensitive. A tag with the key \"ABC\" is a different tag from one with key \"abc\".

  • If you check tags in IAM policy Condition elements as part of your security strategy, then adding or removing a tag can change permissions. If the successful completion of this operation would result in you losing your permissions for this secret, then this operation is blocked and returns an Access Denied error.

This parameter requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"Key\":\"CostCenter\",\"Value\":\"12345\"},{\"Key\":\"environment\",\"Value\":\"production\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

The following basic restrictions apply to tags:

  • Maximum number of tags per secret—50

  • Maximum key length—127 Unicode characters in UTF-8

  • Maximum value length—255 Unicode characters in UTF-8

  • Tag keys and values are case sensitive.

  • Do not use the aws: prefix in your tag names or values because AWS reserves it for AWS use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.

  • If you use your tagging schema across multiple services and resources, remember other services might have restrictions on allowed characters. Generally allowed characters: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.

" + "documentation":"

(Optional) Specifies a list of user-defined tags that are attached to the secret. Each tag is a \"Key\" and \"Value\" pair of strings. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.

  • Secrets Manager tag key names are case sensitive. A tag with the key \"ABC\" is a different tag from one with key \"abc\".

  • If you check tags in IAM policy Condition elements as part of your security strategy, then adding or removing a tag can change permissions. If the successful completion of this operation would result in you losing your permissions for this secret, then this operation is blocked and returns an Access Denied error.

This parameter requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the CLI User Guide. For example:

[{\"Key\":\"CostCenter\",\"Value\":\"12345\"},{\"Key\":\"environment\",\"Value\":\"production\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

The following basic restrictions apply to tags:

  • Maximum number of tags per secret—50

  • Maximum key length—127 Unicode characters in UTF-8

  • Maximum value length—255 Unicode characters in UTF-8

  • Tag keys and values are case sensitive.

  • Do not use the aws: prefix in your tag names or values because Amazon Web Services reserves it for Amazon Web Services use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.

  • If you use your tagging schema across multiple services and resources, remember other services might have restrictions on allowed characters. Generally allowed characters: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.

" }, "AddReplicaRegions":{ "shape":"AddReplicaRegionListType", @@ -531,7 +534,7 @@ }, "ForceDeleteWithoutRecovery":{ "shape":"BooleanType", - "documentation":"

(Optional) Specifies that the secret is to be deleted without any recovery window. You can't use both this parameter and the RecoveryWindowInDays parameter in the same API call.

An asynchronous background process performs the actual deletion, so there can be a short delay before the operation completes. If you write code to delete and then immediately recreate a secret with the same name, ensure that your code includes appropriate back off and retry logic.

Use this parameter with caution. This parameter causes the operation to skip the normal waiting period before the permanent deletion that AWS would normally impose with the RecoveryWindowInDays parameter. If you delete a secret with the ForceDeleteWithouRecovery parameter, then you have no opportunity to recover the secret. You lose the secret permanently.

If you use this parameter and include a previously deleted or nonexistent secret, the operation does not return the error ResourceNotFoundException in order to correctly handle retries.

", + "documentation":"

(Optional) Specifies that the secret is to be deleted without any recovery window. You can't use both this parameter and the RecoveryWindowInDays parameter in the same API call.

An asynchronous background process performs the actual deletion, so there can be a short delay before the operation completes. If you write code to delete and then immediately recreate a secret with the same name, ensure that your code includes appropriate back off and retry logic.

Use this parameter with caution. This parameter causes the operation to skip the normal waiting period before the permanent deletion that Amazon Web Services would normally impose with the RecoveryWindowInDays parameter. If you delete a secret with the ForceDeleteWithouRecovery parameter, then you have no opportunity to recover the secret. You lose the secret permanently.

If you use this parameter and include a previously deleted or nonexistent secret, the operation does not return the error ResourceNotFoundException in order to correctly handle retries.

", "box":true } } @@ -583,7 +586,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyIdType", - "documentation":"

The ARN or alias of the AWS KMS customer master key (CMK) that's used to encrypt the SecretString or SecretBinary fields in each version of the secret. If you don't provide a key, then Secrets Manager defaults to encrypting the secret fields with the default AWS KMS CMK (the one named awssecretsmanager) for this account.

" + "documentation":"

The ARN or alias of the Amazon Web Services KMS customer master key (CMK) that's used to encrypt the SecretString or SecretBinary fields in each version of the secret. If you don't provide a key, then Secrets Manager defaults to encrypting the secret fields with the default Amazon Web Services KMS CMK (the one named awssecretsmanager) for this account.

" }, "RotationEnabled":{ "shape":"RotationEnabledType", @@ -784,7 +787,7 @@ }, "ResourcePolicy":{ "shape":"NonEmptyResourcePolicyType", - "documentation":"

A JSON-formatted string that describes the permissions that are associated with the attached secret. These permissions are combined with any permissions that are associated with the user or role that attempts to access this secret. The combined permissions specify who can access the secret and what actions they can perform. For more information, see Authentication and Access Control for AWS Secrets Manager in the AWS Secrets Manager User Guide.

" + "documentation":"

A JSON-formatted string that describes the permissions that are associated with the attached secret. These permissions are combined with any permissions that are associated with the user or role that attempts to access this secret. The combined permissions specify who can access the secret and what actions they can perform. For more information, see Authentication and Access Control for Amazon Web Services Secrets Manager in the Amazon Web Services Secrets Manager User Guide.

" } } }, @@ -874,6 +877,10 @@ "documentation":"

You provided a parameter value that is not valid for the current state of the resource.

Possible causes:

  • You tried to perform the operation on a secret that's currently marked deleted.

  • You tried to enable rotation on a secret that doesn't already have a Lambda function ARN configured and you didn't include such an ARN as a parameter in this call.

", "exception":true }, + "KmsKeyIdListType":{ + "type":"list", + "member":{"shape":"KmsKeyIdType"} + }, "KmsKeyIdType":{ "type":"string", "max":2048, @@ -1037,7 +1044,7 @@ }, "ResourcePolicy":{ "shape":"NonEmptyResourcePolicyType", - "documentation":"

A JSON-formatted string constructed according to the grammar and syntax for an AWS resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

" + "documentation":"

A JSON-formatted string constructed according to the grammar and syntax for an Amazon Web Services resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the CLI User Guide.

" }, "BlockPublicPolicy":{ "shape":"BooleanType", @@ -1069,7 +1076,7 @@ }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) Specifies a unique identifier for the new version of the secret.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function's processing. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString or SecretBinary values are the same as those in the request then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and the version of the SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing secret version. You can only create new versions to store new secret values.

This value becomes the VersionId of the new version.

", + "documentation":"

(Optional) Specifies a unique identifier for the new version of the secret.

If you use the Amazon Web Services CLI or one of the Amazon Web Services SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function's processing. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString or SecretBinary values are the same as those in the request then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and the version of the SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing secret version. You can only create new versions to store new secret values.

This value becomes the VersionId of the new version.

", "idempotencyToken":true }, "SecretBinary":{ @@ -1078,7 +1085,7 @@ }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret. Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" + "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret. Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the CLI User Guide.

For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" }, "VersionStages":{ "shape":"SecretVersionStagesType", @@ -1283,7 +1290,7 @@ }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) Specifies a unique identifier for the new version of the secret that helps ensure idempotency.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request for this parameter. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You only need to specify your own value if you implement your own retry logic and want to ensure that a given secret is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the function's processing. This value becomes the VersionId of the new version.

", + "documentation":"

(Optional) Specifies a unique identifier for the new version of the secret that helps ensure idempotency.

If you use the Amazon Web Services CLI or one of the Amazon Web Services SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request for this parameter. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You only need to specify your own value if you implement your own retry logic and want to ensure that a given secret is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the function's processing. This value becomes the VersionId of the new version.

", "idempotencyToken":true }, "RotationLambdaARN":{ @@ -1352,7 +1359,7 @@ "members":{ "ARN":{ "shape":"SecretARNType", - "documentation":"

The Amazon Resource Name (ARN) of the secret.

For more information about ARNs in Secrets Manager, see Policy Resources in the AWS Secrets Manager User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the secret.

For more information about ARNs in Secrets Manager, see Policy Resources in the Amazon Web Services Secrets Manager User Guide.

" }, "Name":{ "shape":"SecretNameType", @@ -1364,7 +1371,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyIdType", - "documentation":"

The ARN or alias of the AWS KMS customer master key (CMK) used to encrypt the SecretString and SecretBinary fields in each version of the secret. If you don't provide a key, then Secrets Manager defaults to encrypting the secret fields with the default KMS CMK, the key named awssecretsmanager, for this account.

" + "documentation":"

The ARN or alias of the Amazon Web Services KMS customer master key (CMK) used to encrypt the SecretString and SecretBinary fields in each version of the secret. If you don't provide a key, then Secrets Manager defaults to encrypting the secret fields with the default KMS CMK, the key named awssecretsmanager, for this account.

" }, "RotationEnabled":{ "shape":"RotationEnabledType", @@ -1373,7 +1380,7 @@ }, "RotationLambdaARN":{ "shape":"RotationLambdaARNType", - "documentation":"

The ARN of an AWS Lambda function invoked by Secrets Manager to rotate and expire the secret either automatically per the schedule or manually by a call to RotateSecret.

" + "documentation":"

The ARN of an Amazon Web Services Lambda function invoked by Secrets Manager to rotate and expire the secret either automatically per the schedule or manually by a call to RotateSecret.

" }, "RotationRules":{ "shape":"RotationRulesType", @@ -1473,6 +1480,10 @@ "shape":"CreatedDateType", "documentation":"

The date and time this version of the secret was created.

", "box":true + }, + "KmsKeyIds":{ + "shape":"KmsKeyIdListType", + "documentation":"

The KMS keys used to encrypt the secret version.

" } }, "documentation":"

A structure that contains information about one version of a secret.

" @@ -1565,7 +1576,7 @@ }, "Tags":{ "shape":"TagListType", - "documentation":"

The tags to attach to the secret. Each element in the list consists of a Key and a Value.

This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For the AWS CLI, you can also use the syntax: --Tags Key=\"Key1\",Value=\"Value1\" Key=\"Key2\",Value=\"Value2\"[,…]

" + "documentation":"

The tags to attach to the secret. Each element in the list consists of a Key and a Value.

This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the CLI User Guide. For the CLI, you can also use the syntax: --Tags Key=\"Key1\",Value=\"Value1\" Key=\"Key2\",Value=\"Value2\"[,…]

" } } }, @@ -1588,7 +1599,7 @@ }, "TagKeys":{ "shape":"TagKeyListType", - "documentation":"

A list of tag key names to remove from the secret. You don't specify the value. Both the key and its associated value are removed.

This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

" + "documentation":"

A list of tag key names to remove from the secret. You don't specify the value. Both the key and its associated value are removed.

This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the CLI User Guide.

" } } }, @@ -1602,7 +1613,7 @@ }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) If you want to add a new version to the secret, this parameter specifies a unique identifier for the new version that helps ensure idempotency.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You typically only need to interact with this value if you implement your own retry logic and want to ensure that a given secret is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function's processing.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are the same as those in the request then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from the request then an error occurs because you cannot modify an existing secret value.

This value becomes the VersionId of the new version.

", + "documentation":"

(Optional) If you want to add a new version to the secret, this parameter specifies a unique identifier for the new version that helps ensure idempotency.

If you use the Amazon Web Services CLI or one of the Amazon Web Services SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You typically only need to interact with this value if you implement your own retry logic and want to ensure that a given secret is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function's processing.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are the same as those in the request then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from the request then an error occurs because you cannot modify an existing secret value.

This value becomes the VersionId of the new version.

", "idempotencyToken":true }, "Description":{ @@ -1611,7 +1622,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyIdType", - "documentation":"

(Optional) Specifies an updated ARN or alias of the AWS KMS customer master key (CMK) to be used to encrypt the protected text in new versions of this secret.

You can only use the account's default CMK to encrypt and decrypt if you call this operation using credentials from the same account that owns the secret. If the secret is in a different account, then you must create a custom CMK and provide the ARN of that CMK in this field. The user making the call must have permissions to both the secret and the CMK in their respective accounts.

" + "documentation":"

(Optional) Specifies an updated ARN or alias of the Amazon Web Services KMS customer master key (CMK) to be used to encrypt the protected text in new versions of this secret.

You can only use the account's default CMK to encrypt and decrypt if you call this operation using credentials from the same account that owns the secret. If the secret is in a different account, then you must create a custom CMK and provide the ARN of that CMK in this field. The user making the call must have permissions to both the secret and the CMK in their respective accounts.

" }, "SecretBinary":{ "shape":"SecretBinaryType", @@ -1619,7 +1630,7 @@ }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

(Optional) Specifies updated text data that you want to encrypt and store in this new version of the secret. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text. You can also 'escape' the double quote character in the embedded JSON text by prefacing each with a backslash. For example, the following string is surrounded by double-quotes. All of the embedded double quotes are escaped:

\"[{\\\"username\\\":\\\"bob\\\"},{\\\"password\\\":\\\"abc123xyz456\\\"}]\"

" + "documentation":"

(Optional) Specifies updated text data that you want to encrypt and store in this new version of the secret. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the CLI User Guide. For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text. You can also 'escape' the double quote character in the embedded JSON text by prefacing each with a backslash. For example, the following string is surrounded by double-quotes. All of the embedded double quotes are escaped:

\"[{\\\"username\\\":\\\"bob\\\"},{\\\"password\\\":\\\"abc123xyz456\\\"}]\"

" } } }, @@ -1690,7 +1701,7 @@ }, "ResourcePolicy":{ "shape":"NonEmptyResourcePolicyType", - "documentation":"

A JSON-formatted string constructed according to the grammar and syntax for an AWS resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.publi

" + "documentation":"

A JSON-formatted string constructed according to the grammar and syntax for an Amazon Web Services resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the CLI User Guide.publi

" } } }, @@ -1726,5 +1737,5 @@ "member":{"shape":"ValidationErrorsEntry"} } }, - "documentation":"AWS Secrets Manager API Reference

AWS Secrets Manager provides a service to enable you to store, manage, and retrieve, secrets.

This guide provides descriptions of the Secrets Manager API. For more information about using this service, see the AWS Secrets Manager User Guide.

API Version

This version of the Secrets Manager API Reference documents the Secrets Manager API version 2017-10-17.

As an alternative to using the API, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms such as Java, Ruby, .NET, iOS, and Android. The SDKs provide a convenient way to create programmatic access to AWS Secrets Manager. For example, the SDKs provide cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the AWS SDKs, including downloading and installing them, see Tools for Amazon Web Services.

We recommend you use the AWS SDKs to make programmatic API calls to Secrets Manager. However, you also can use the Secrets Manager HTTP Query API to make direct calls to the Secrets Manager web service. To learn more about the Secrets Manager HTTP Query API, see Making Query Requests in the AWS Secrets Manager User Guide.

Secrets Manager API supports GET and POST requests for all actions, and doesn't require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

Support and Feedback for AWS Secrets Manager

We welcome your feedback. Send your comments to awssecretsmanager-feedback@amazon.com, or post your feedback and questions in the AWS Secrets Manager Discussion Forum. For more information about the AWS Discussion Forums, see Forums Help.

How examples are presented

The JSON that AWS Secrets Manager expects as your request parameters and the service returns as a response to HTTP query requests contain single, long strings without line breaks or white space formatting. The JSON shown in the examples displays the code formatted with both line breaks and white space to improve readability. When example input parameters can also cause long strings extending beyond the screen, you can insert line breaks to enhance readability. You should always submit the input as a single JSON text string.

Logging API Requests

AWS Secrets Manager supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information that's collected by AWS CloudTrail, you can determine the requests successfully made to Secrets Manager, who made the request, when it was made, and so on. For more about AWS Secrets Manager and support for AWS CloudTrail, see Logging AWS Secrets Manager Events with AWS CloudTrail in the AWS Secrets Manager User Guide. To learn more about CloudTrail, including enabling it and find your log files, see the AWS CloudTrail User Guide.

" + "documentation":"Amazon Web Services Secrets Manager

Amazon Web Services Secrets Manager provides a service to enable you to store, manage, and retrieve, secrets.

This guide provides descriptions of the Secrets Manager API. For more information about using this service, see the Amazon Web Services Secrets Manager User Guide.

API Version

This version of the Secrets Manager API Reference documents the Secrets Manager API version 2017-10-17.

As an alternative to using the API, you can use one of the Amazon Web Services SDKs, which consist of libraries and sample code for various programming languages and platforms such as Java, Ruby, .NET, iOS, and Android. The SDKs provide a convenient way to create programmatic access to Amazon Web Services Secrets Manager. For example, the SDKs provide cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including downloading and installing them, see Tools for Amazon Web Services.

We recommend you use the Amazon Web Services SDKs to make programmatic API calls to Secrets Manager. However, you also can use the Secrets Manager HTTP Query API to make direct calls to the Secrets Manager web service. To learn more about the Secrets Manager HTTP Query API, see Making Query Requests in the Amazon Web Services Secrets Manager User Guide.

Secrets Manager API supports GET and POST requests for all actions, and doesn't require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

Support and Feedback for Amazon Web Services Secrets Manager

We welcome your feedback. Send your comments to awssecretsmanager-feedback@amazon.com, or post your feedback and questions in the Amazon Web Services Secrets Manager Discussion Forum. For more information about the Amazon Web Services Discussion Forums, see Forums Help.

How examples are presented

The JSON that Amazon Web Services Secrets Manager expects as your request parameters and the service returns as a response to HTTP query requests contain single, long strings without line breaks or white space formatting. The JSON shown in the examples displays the code formatted with both line breaks and white space to improve readability. When example input parameters can also cause long strings extending beyond the screen, you can insert line breaks to enhance readability. You should always submit the input as a single JSON text string.

Logging API Requests

Amazon Web Services Secrets Manager supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information that's collected by Amazon Web Services CloudTrail, you can determine the requests successfully made to Secrets Manager, who made the request, when it was made, and so on. For more about Amazon Web Services Secrets Manager and support for Amazon Web Services CloudTrail, see Logging Amazon Web Services Secrets Manager Events with Amazon Web Services CloudTrail in the Amazon Web Services Secrets Manager User Guide. To learn more about CloudTrail, including enabling it and find your log files, see the Amazon Web Services CloudTrail User Guide.

" } diff --git a/services/securityhub/pom.xml b/services/securityhub/pom.xml index c8ee8e82c569..b723214bb4ef 100644 --- a/services/securityhub/pom.xml +++ b/services/securityhub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT securityhub AWS Java SDK :: Services :: SecurityHub diff --git a/services/securityhub/src/main/resources/codegen-resources/service-2.json b/services/securityhub/src/main/resources/codegen-resources/service-2.json index 2a6a7e8c9a4f..d6946a106156 100644 --- a/services/securityhub/src/main/resources/codegen-resources/service-2.json +++ b/services/securityhub/src/main/resources/codegen-resources/service-2.json @@ -62,7 +62,7 @@ {"shape":"InvalidAccessException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Disables the standards specified by the provided StandardsSubscriptionArns.

For more information, see Security Standards section of the AWS Security Hub User Guide.

" + "documentation":"

Disables the standards specified by the provided StandardsSubscriptionArns.

For more information, see Security Standards section of the Security Hub User Guide.

" }, "BatchEnableStandards":{ "name":"BatchEnableStandards", @@ -78,7 +78,7 @@ {"shape":"InvalidAccessException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Enables the standards specified by the provided StandardsArn. To obtain the ARN for a standard, use the DescribeStandards operation.

For more information, see the Security Standards section of the AWS Security Hub User Guide.

" + "documentation":"

Enables the standards specified by the provided StandardsArn. To obtain the ARN for a standard, use the DescribeStandards operation.

For more information, see the Security Standards section of the Security Hub User Guide.

" }, "BatchImportFindings":{ "name":"BatchImportFindings", @@ -110,7 +110,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidAccessException"} ], - "documentation":"

Used by Security Hub customers to update information about their investigation into a finding. Requested by administrator accounts or member accounts. Administrator accounts can update findings for their account and their member accounts. Member accounts can update findings for their account.

Updates from BatchUpdateFindings do not affect the value of UpdatedAt for a finding.

Administrator and member accounts can use BatchUpdateFindings to update the following finding fields and objects.

  • Confidence

  • Criticality

  • Note

  • RelatedFindings

  • Severity

  • Types

  • UserDefinedFields

  • VerificationState

  • Workflow

You can configure IAM policies to restrict access to fields and field values. For example, you might not want member accounts to be able to suppress findings or change the finding severity. See Configuring access to BatchUpdateFindings in the AWS Security Hub User Guide.

" + "documentation":"

Used by Security Hub customers to update information about their investigation into a finding. Requested by administrator accounts or member accounts. Administrator accounts can update findings for their account and their member accounts. Member accounts can update findings for their account.

Updates from BatchUpdateFindings do not affect the value of UpdatedAt for a finding.

Administrator and member accounts can use BatchUpdateFindings to update the following finding fields and objects.

  • Confidence

  • Criticality

  • Note

  • RelatedFindings

  • Severity

  • Types

  • UserDefinedFields

  • VerificationState

  • Workflow

You can configure IAM policies to restrict access to fields and field values. For example, you might not want member accounts to be able to suppress findings or change the finding severity. See Configuring access to BatchUpdateFindings in the Security Hub User Guide.

" }, "CreateActionTarget":{ "name":"CreateActionTarget", @@ -161,7 +161,7 @@ {"shape":"InvalidAccessException"}, {"shape":"ResourceConflictException"} ], - "documentation":"

Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the administrator account. If you are integrated with Organizations, then the administrator account is designated by the organization management account.

CreateMembers is always used to add accounts that are not organization members.

For accounts that are part of an organization, CreateMembers is only used in the following cases:

  • Security Hub is not configured to automatically add new accounts in an organization.

  • The account was disassociated or deleted in Security Hub.

This action can only be used by an account that has Security Hub enabled. To enable Security Hub, you can use the EnableSecurityHub operation.

For accounts that are not organization members, you create the account association and then send an invitation to the member account. To send the invitation, you use the InviteMembers operation. If the account owner accepts the invitation, the account becomes a member account in Security Hub.

Accounts that are part of an organization do not receive an invitation. They automatically become a member account in Security Hub.

A permissions policy is added that permits the administrator account to view the findings generated in the member account. When Security Hub is enabled in a member account, the member account findings are also visible to the administrator account.

To remove the association between the administrator and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation.

" + "documentation":"

Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the administrator account. If you are integrated with Organizations, then the administrator account is designated by the organization management account.

CreateMembers is always used to add accounts that are not organization members.

For accounts that are managed using Organizations, CreateMembers is only used in the following cases:

  • Security Hub is not configured to automatically add new organization accounts.

  • The account was disassociated or deleted in Security Hub.

This action can only be used by an account that has Security Hub enabled. To enable Security Hub, you can use the EnableSecurityHub operation.

For accounts that are not organization members, you create the account association and then send an invitation to the member account. To send the invitation, you use the InviteMembers operation. If the account owner accepts the invitation, the account becomes a member account in Security Hub.

Accounts that are managed using Organizations do not receive an invitation. They automatically become a member account in Security Hub, and Security Hub is automatically enabled for those accounts. Note that Security Hub cannot be enabled automatically for the organization management account. The organization management account must enable Security Hub before the administrator account enables it as a member account.

A permissions policy is added that permits the administrator account to view the findings generated in the member account. When Security Hub is enabled in a member account, the member account findings are also visible to the administrator account.

To remove the association between the administrator and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation.

" }, "DeclineInvitations":{ "name":"DeclineInvitations", @@ -227,7 +227,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidAccessException"} ], - "documentation":"

Deletes invitations received by the AWS account to become a member account.

This operation is only used by accounts that are not part of an organization. Organization accounts do not receive invitations.

" + "documentation":"

Deletes invitations received by the Amazon Web Services account to become a member account.

This operation is only used by accounts that are not part of an organization. Organization accounts do not receive invitations.

" }, "DeleteMembers":{ "name":"DeleteMembers", @@ -492,7 +492,7 @@ {"shape":"ResourceConflictException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Enables Security Hub for your account in the current Region or the Region you specify in the request.

When you enable Security Hub, you grant to Security Hub the permissions necessary to gather findings from other services that are integrated with Security Hub.

When you use the EnableSecurityHub operation to enable Security Hub, you also automatically enable the following standards.

  • CIS AWS Foundations

  • AWS Foundational Security Best Practices

You do not enable the Payment Card Industry Data Security Standard (PCI DSS) standard.

To not enable the automatically enabled standards, set EnableDefaultStandards to false.

After you enable Security Hub, to enable a standard, use the BatchEnableStandards operation. To disable a standard, use the BatchDisableStandards operation.

To learn more, see Setting Up AWS Security Hub in the AWS Security Hub User Guide.

" + "documentation":"

Enables Security Hub for your account in the current Region or the Region you specify in the request.

When you enable Security Hub, you grant to Security Hub the permissions necessary to gather findings from other services that are integrated with Security Hub.

When you use the EnableSecurityHub operation to enable Security Hub, you also automatically enable the following standards.

  • CIS Amazon Web Services Foundations

  • Amazon Web Services Foundational Security Best Practices

You do not enable the Payment Card Industry Data Security Standard (PCI DSS) standard.

To not enable the automatically enabled standards, set EnableDefaultStandards to false.

After you enable Security Hub, to enable a standard, use the BatchEnableStandards operation. To disable a standard, use the BatchDisableStandards operation.

To learn more, see the setup information in the Security Hub User Guide.

" }, "GetAdministratorAccount":{ "name":"GetAdministratorAccount", @@ -644,7 +644,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Invites other AWS accounts to become member accounts for the Security Hub administrator account that the invitation is sent from.

This operation is only used to invite accounts that do not belong to an organization. Organization accounts do not receive invitations.

Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub.

When the account owner enables Security Hub and accepts the invitation to become a member account, the administrator account can view the findings generated from the member account.

" + "documentation":"

Invites other Amazon Web Services accounts to become member accounts for the Security Hub administrator account that the invitation is sent from.

This operation is only used to invite accounts that do not belong to an organization. Organization accounts do not receive invitations.

Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub.

When the account owner enables Security Hub and accepts the invitation to become a member account, the administrator account can view the findings generated from the member account.

" }, "ListEnabledProductsForImport":{ "name":"ListEnabledProductsForImport", @@ -675,7 +675,7 @@ {"shape":"InvalidAccessException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Lists all Security Hub membership invitations that were sent to the current AWS account.

This operation is only used by accounts that are managed by invitation. Accounts that are managed using the integration with AWS Organizations do not receive invitations.

" + "documentation":"

Lists all Security Hub membership invitations that were sent to the current Amazon Web Services account.

This operation is only used by accounts that are managed by invitation. Accounts that are managed using the integration with Organizations do not receive invitations.

" }, "ListMembers":{ "name":"ListMembers", @@ -916,14 +916,14 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The ID of an AWS account.

" + "documentation":"

The ID of an Amazon Web Services account.

" }, "Email":{ "shape":"NonEmptyString", - "documentation":"

The email of an AWS account.

" + "documentation":"

The email of an Amazon Web Services account.

" } }, - "documentation":"

The details of an AWS account.

" + "documentation":"

The details of an Amazon Web Services account.

" }, "AccountDetailsList":{ "type":"list", @@ -958,7 +958,7 @@ "documentation":"

Included if ActionType is PORT_PROBE. Provides details about the port probe that was detected.

" } }, - "documentation":"

Provides details about one of the following actions that affects or that was taken on a resource:

  • A remote IP address issued an AWS API call

  • A DNS request was received

  • A remote IP address attempted to connect to an EC2 instance

  • A remote IP address attempted a port probe on an EC2 instance

" + "documentation":"

Provides details about one of the following actions that affects or that was taken on a resource:

  • A remote IP address issued an Amazon Web Services API call

  • A DNS request was received

  • A remote IP address attempted to connect to an EC2 instance

  • A remote IP address attempted a port probe on an EC2 instance

" }, "ActionLocalIpDetails":{ "type":"structure", @@ -1056,7 +1056,7 @@ "members":{ "AccountId":{ "shape":"NonEmptyString", - "documentation":"

The AWS account identifier of the Security Hub administrator account.

" + "documentation":"

The Amazon Web Services account identifier of the Security Hub administrator account.

" }, "Status":{ "shape":"AdminStatus", @@ -1112,7 +1112,7 @@ }, "ServiceName":{ "shape":"NonEmptyString", - "documentation":"

The name of the AWS service that the API method belongs to.

" + "documentation":"

The name of the Amazon Web Services service that the API method belongs to.

" }, "CallerType":{ "shape":"NonEmptyString", @@ -1350,7 +1350,7 @@ }, "TracingEnabled":{ "shape":"Boolean", - "documentation":"

Indicates whether active tracing with AWS X-Ray is enabled for the stage.

" + "documentation":"

Indicates whether active tracing with X-Ray is enabled for the stage.

" }, "CreatedDate":{ "shape":"NonEmptyString", @@ -1556,7 +1556,7 @@ }, "InUseBy":{ "shape":"StringList", - "documentation":"

The list of ARNs for the AWS resources that use the certificate.

" + "documentation":"

The list of ARNs for the Amazon Web Services resources that use the certificate.

" }, "IssuedAt":{ "shape":"NonEmptyString", @@ -1592,7 +1592,7 @@ }, "RenewalSummary":{ "shape":"AwsCertificateManagerCertificateRenewalSummary", - "documentation":"

Information about the status of the AWS Certificate Manager managed renewal for the certificate. Provided only when the certificate type is AMAZON_ISSUED.

" + "documentation":"

Information about the status of the Certificate Manager managed renewal for the certificate. Provided only when the certificate type is AMAZON_ISSUED.

" }, "Serial":{ "shape":"NonEmptyString", @@ -1616,10 +1616,10 @@ }, "Type":{ "shape":"NonEmptyString", - "documentation":"

The source of the certificate. For certificates that AWS Certificate Manager provides, Type is AMAZON_ISSUED. For certificates that are imported with ImportCertificate, Type is IMPORTED.

Valid values: IMPORTED | AMAZON_ISSUED | PRIVATE

" + "documentation":"

The source of the certificate. For certificates that Certificate Manager provides, Type is AMAZON_ISSUED. For certificates that are imported with ImportCertificate, Type is IMPORTED.

Valid values: IMPORTED | AMAZON_ISSUED | PRIVATE

" } }, - "documentation":"

Provides details about an AWS Certificate Manager certificate.

" + "documentation":"

Provides details about an Certificate Manager certificate.

" }, "AwsCertificateManagerCertificateDomainValidationOption":{ "type":"structure", @@ -1634,11 +1634,11 @@ }, "ValidationDomain":{ "shape":"NonEmptyString", - "documentation":"

The domain name that AWS Certificate Manager uses to send domain validation emails.

" + "documentation":"

The domain name that Certificate Manager uses to send domain validation emails.

" }, "ValidationEmails":{ "shape":"StringList", - "documentation":"

A list of email addresses that AWS Certificate Manager uses to send domain validation emails.

" + "documentation":"

A list of email addresses that Certificate Manager uses to send domain validation emails.

" }, "ValidationMethod":{ "shape":"NonEmptyString", @@ -1649,7 +1649,7 @@ "documentation":"

The validation status of the domain name.

" } }, - "documentation":"

Contains information about one of the following:

  • The initial validation of each domain name that occurs as a result of the RequestCertificate request

  • The validation of each domain name in the certificate, as it pertains to AWS Certificate Manager managed renewal

" + "documentation":"

Contains information about one of the following:

  • The initial validation of each domain name that occurs as a result of the RequestCertificate request

  • The validation of each domain name in the certificate, as it pertains to Certificate Manager managed renewal

" }, "AwsCertificateManagerCertificateDomainValidationOptions":{ "type":"list", @@ -1702,11 +1702,11 @@ "members":{ "DomainValidationOptions":{ "shape":"AwsCertificateManagerCertificateDomainValidationOptions", - "documentation":"

Information about the validation of each domain name in the certificate, as it pertains to AWS Certificate Manager managed renewal. Provided only when the certificate type is AMAZON_ISSUED.

" + "documentation":"

Information about the validation of each domain name in the certificate, as it pertains to Certificate Manager managed renewal. Provided only when the certificate type is AMAZON_ISSUED.

" }, "RenewalStatus":{ "shape":"NonEmptyString", - "documentation":"

The status of the AWS Certificate Manager managed renewal of the certificate.

Valid values: PENDING_AUTO_RENEWAL | PENDING_VALIDATION | SUCCESS | FAILED

" + "documentation":"

The status of the Certificate Manager managed renewal of the certificate.

Valid values: PENDING_AUTO_RENEWAL | PENDING_VALIDATION | SUCCESS | FAILED

" }, "RenewalStatusReason":{ "shape":"NonEmptyString", @@ -1717,7 +1717,7 @@ "documentation":"

Indicates when the renewal summary was last updated.

Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

" } }, - "documentation":"

Contains information about the AWS Certificate Manager managed renewal for an AMAZON_ISSUED certificate.

" + "documentation":"

Contains information about the Certificate Manager managed renewal for an AMAZON_ISSUED certificate.

" }, "AwsCertificateManagerCertificateResourceRecord":{ "type":"structure", @@ -1816,7 +1816,7 @@ }, "WebAclId":{ "shape":"NonEmptyString", - "documentation":"

A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution.

" + "documentation":"

A unique identifier that specifies the WAF web ACL, if any, to associate with this distribution.

" } }, "documentation":"

A distribution configuration.

" @@ -1826,7 +1826,7 @@ "members":{ "Bucket":{ "shape":"NonEmptyString", - "documentation":"

The Amazon S3 bucket to store the access logs in.

" + "documentation":"

The S3 bucket to store the access logs in.

" }, "Enabled":{ "shape":"Boolean", @@ -1900,7 +1900,7 @@ "members":{ "DomainName":{ "shape":"NonEmptyString", - "documentation":"

Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin.

" + "documentation":"

Amazon S3 origins: The DNS name of the S3 bucket from which you want CloudFront to get objects for this origin.

" }, "Id":{ "shape":"NonEmptyString", @@ -1915,7 +1915,7 @@ "documentation":"

An origin that is an S3 bucket that is not configured with static website hosting.

" } }, - "documentation":"

A complex type that describes the Amazon S3 bucket, HTTP server (for example, a web server), Amazon Elemental MediaStore, or other server from which CloudFront gets your files.

" + "documentation":"

A complex type that describes the S3 bucket, HTTP server (for example, a web server), AWS Elemental MediaStore, or other server from which CloudFront gets your files.

" }, "AwsCloudFrontDistributionOriginItemList":{ "type":"list", @@ -1950,7 +1950,7 @@ }, "CloudWatchLogsRoleArn":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the role that the CloudWatch Logs endpoint assumes when it writes to the log group.

" + "documentation":"

The ARN of the role that the CloudWatch Events endpoint assumes when it writes to the log group.

" }, "HasCustomEventSelectors":{ "shape":"Boolean", @@ -1970,11 +1970,11 @@ }, "IsOrganizationTrail":{ "shape":"Boolean", - "documentation":"

Whether the trail is created for all accounts in an organization in AWS Organizations, or only for the current AWS account.

" + "documentation":"

Whether the trail is created for all accounts in an organization in Organizations, or only for the current Amazon Web Services account.

" }, "KmsKeyId":{ "shape":"NonEmptyString", - "documentation":"

The AWS KMS key ID to use to encrypt the logs.

" + "documentation":"

The KMS key ID to use to encrypt the logs.

" }, "LogFileValidationEnabled":{ "shape":"Boolean", @@ -2012,7 +2012,7 @@ "members":{ "EncryptionKey":{ "shape":"NonEmptyString", - "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) used to encrypt the build output artifacts.

You can specify either the ARN of the CMK or, if available, the CMK alias (using the format alias/alias-name).

" + "documentation":"

The KMS customer master key (CMK) used to encrypt the build output artifacts.

You can specify either the ARN of the CMK or, if available, the CMK alias (using the format alias/alias-name).

" }, "Environment":{ "shape":"AwsCodeBuildProjectEnvironment", @@ -2028,14 +2028,14 @@ }, "ServiceRole":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the IAM role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.

" + "documentation":"

The ARN of the IAM role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.

" }, "VpcConfig":{ "shape":"AwsCodeBuildProjectVpcConfig", - "documentation":"

Information about the VPC configuration that AWS CodeBuild accesses.

" + "documentation":"

Information about the VPC configuration that CodeBuild accesses.

" } }, - "documentation":"

Information about an AWS CodeBuild project.

" + "documentation":"

Information about an CodeBuild project.

" }, "AwsCodeBuildProjectEnvironment":{ "type":"structure", @@ -2046,7 +2046,7 @@ }, "ImagePullCredentialsType":{ "shape":"NonEmptyString", - "documentation":"

The type of credentials AWS CodeBuild uses to pull images in your build.

Valid values:

  • CODEBUILD specifies that AWS CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust the AWS CodeBuild service principal.

  • SERVICE_ROLE specifies that AWS CodeBuild uses your build project's service role.

When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD credentials.

" + "documentation":"

The type of credentials CodeBuild uses to pull images in your build.

Valid values:

  • CODEBUILD specifies that CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust the CodeBuild service principal.

  • SERVICE_ROLE specifies that CodeBuild uses your build project's service role.

When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an CodeBuild curated image, you must use CODEBUILD credentials.

" }, "RegistryCredential":{ "shape":"AwsCodeBuildProjectEnvironmentRegistryCredential", @@ -2064,11 +2064,11 @@ "members":{ "Credential":{ "shape":"NonEmptyString", - "documentation":"

The ARN or name of credentials created using AWS Secrets Manager.

The credential can use the name of the credentials only if they exist in your current AWS Region.

" + "documentation":"

The ARN or name of credentials created using Secrets Manager.

The credential can use the name of the credentials only if they exist in your current Amazon Web Services Region.

" }, "CredentialProvider":{ "shape":"NonEmptyString", - "documentation":"

The service that created the credentials to access a private Docker registry.

The valid value, SECRETS_MANAGER, is for AWS Secrets Manager.

" + "documentation":"

The service that created the credentials to access a private Docker registry.

The valid value, SECRETS_MANAGER, is for Secrets Manager.

" } }, "documentation":"

The credentials for access to a private registry.

" @@ -2078,11 +2078,11 @@ "members":{ "Type":{ "shape":"NonEmptyString", - "documentation":"

The type of repository that contains the source code to be built. Valid values are:

  • BITBUCKET - The source code is in a Bitbucket repository.

  • CODECOMMIT - The source code is in an AWS CodeCommit repository.

  • CODEPIPELINE - The source code settings are specified in the source action of a pipeline in AWS CodePipeline.

  • GITHUB - The source code is in a GitHub repository.

  • GITHUB_ENTERPRISE - The source code is in a GitHub Enterprise repository.

  • NO_SOURCE - The project does not have input source code.

  • S3 - The source code is in an S3 input bucket.

" + "documentation":"

The type of repository that contains the source code to be built. Valid values are:

  • BITBUCKET - The source code is in a Bitbucket repository.

  • CODECOMMIT - The source code is in an CodeCommit repository.

  • CODEPIPELINE - The source code settings are specified in the source action of a pipeline in CodePipeline.

  • GITHUB - The source code is in a GitHub repository.

  • GITHUB_ENTERPRISE - The source code is in a GitHub Enterprise repository.

  • NO_SOURCE - The project does not have input source code.

  • S3 - The source code is in an S3 input bucket.

" }, "Location":{ "shape":"NonEmptyString", - "documentation":"

Information about the location of the source code to be built.

Valid values include:

  • For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.

  • For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the build spec file (for example, https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name ).

  • For source code in an S3 input bucket, one of the following.

    • The path to the ZIP file that contains the source code (for example, bucket-name/path/to/object-name.zip).

    • The path to the folder that contains the source code (for example, bucket-name/path/to/source-code/folder/).

  • For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the build spec file.

  • For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the build spec file.

" + "documentation":"

Information about the location of the source code to be built.

Valid values include:

  • For source code settings that are specified in the source action of a pipeline in CodePipeline, location should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.

  • For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the build spec file (for example, https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name ).

  • For source code in an S3 input bucket, one of the following.

    • The path to the ZIP file that contains the source code (for example, bucket-name/path/to/object-name.zip).

    • The path to the folder that contains the source code (for example, bucket-name/path/to/source-code/folder/).

  • For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the build spec file.

  • For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the build spec file.

" }, "GitCloneDepth":{ "shape":"Integer", @@ -2104,14 +2104,14 @@ }, "Subnets":{ "shape":"NonEmptyStringList", - "documentation":"

A list of one or more subnet IDs in your Amazon VPC.

" + "documentation":"

A list of one or more subnet IDs in your VPC.

" }, "SecurityGroupIds":{ "shape":"NonEmptyStringList", - "documentation":"

A list of one or more security group IDs in your Amazon VPC.

" + "documentation":"

A list of one or more security group IDs in your VPC.

" } }, - "documentation":"

Information about the VPC configuration that AWS CodeBuild accesses.

" + "documentation":"

Information about the VPC configuration that CodeBuild accesses.

" }, "AwsCorsConfiguration":{ "type":"structure", @@ -2406,7 +2406,7 @@ }, "KmsMasterKeyId":{ "shape":"NonEmptyString", - "documentation":"

The identifier of the AWS KMS customer master key (CMK) that will be used for AWS KMS encryption for the replica.

" + "documentation":"

The identifier of the KMS customer master key (CMK) that will be used for KMS encryption for the replica.

" }, "ProvisionedThroughputOverride":{ "shape":"AwsDynamoDbTableProvisionedThroughputOverride", @@ -2488,7 +2488,7 @@ }, "KmsMasterKeyArn":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the AWS KMS customer master key (CMK) that is used for the AWS KMS encryption.

" + "documentation":"

The ARN of the KMS customer master key (CMK) that is used for the KMS encryption.

" } }, "documentation":"

Information about the server-side encryption for the table.

" @@ -2520,7 +2520,7 @@ }, "AllocationId":{ "shape":"NonEmptyString", - "documentation":"

The identifier that AWS assigns to represent the allocation of the Elastic IP address for use with Amazon VPC.

" + "documentation":"

The identifier that Amazon Web Services assigns to represent the allocation of the Elastic IP address for use with Amazon VPC.

" }, "AssociationId":{ "shape":"NonEmptyString", @@ -2544,7 +2544,7 @@ }, "NetworkInterfaceOwnerId":{ "shape":"NonEmptyString", - "documentation":"

The AWS account ID of the owner of the network interface.

" + "documentation":"

The Amazon Web Services account ID of the owner of the network interface.

" }, "PrivateIpAddress":{ "shape":"NonEmptyString", @@ -2648,7 +2648,7 @@ }, "OwnerId":{ "shape":"NonEmptyString", - "documentation":"

The identifier of the AWS account that owns the network ACL.

" + "documentation":"

The identifier of the Amazon Web Services account that owns the network ACL.

" }, "VpcId":{ "shape":"NonEmptyString", @@ -2732,7 +2732,7 @@ }, "InstanceOwnerId":{ "shape":"NonEmptyString", - "documentation":"

The AWS account ID of the owner of the instance.

" + "documentation":"

The Amazon Web Services account ID of the owner of the instance.

" }, "Status":{ "shape":"NonEmptyString", @@ -2842,7 +2842,7 @@ }, "OwnerId":{ "shape":"NonEmptyString", - "documentation":"

The AWS account ID of the owner of the security group.

" + "documentation":"

The Amazon Web Services account ID of the owner of the security group.

" }, "VpcId":{ "shape":"NonEmptyString", @@ -2876,7 +2876,7 @@ }, "UserIdGroupPairs":{ "shape":"AwsEc2SecurityGroupUserIdGroupPairList", - "documentation":"

The security group and AWS account ID pairs.

" + "documentation":"

The security group and Amazon Web Services account ID pairs.

" }, "IpRanges":{ "shape":"AwsEc2SecurityGroupIpRangeList", @@ -2888,7 +2888,7 @@ }, "PrefixListIds":{ "shape":"AwsEc2SecurityGroupPrefixListIdList", - "documentation":"

[VPC only] The prefix list IDs for an AWS service. With outbound rules, this is the AWS service to access through a VPC endpoint from instances associated with the security group.

" + "documentation":"

[VPC only] The prefix list IDs for an Amazon Web Services service. With outbound rules, this is the Amazon Web Services service to access through a VPC endpoint from instances associated with the security group.

" } }, "documentation":"

An IP permission for an EC2 security group.

" @@ -2956,7 +2956,7 @@ }, "UserId":{ "shape":"NonEmptyString", - "documentation":"

The ID of an AWS account.

For a referenced security group in another VPC, the account ID of the referenced security group is returned in the response. If the referenced security group is deleted, this value is not returned.

[EC2-Classic] Required when adding or removing rules that reference a security group in another VPC.

" + "documentation":"

The ID of an Amazon Web Services account.

For a referenced security group in another VPC, the account ID of the referenced security group is returned in the response. If the referenced security group is deleted, this value is not returned.

[EC2-Classic] Required when adding or removing rules that reference a security group in another VPC.

" }, "VpcId":{ "shape":"NonEmptyString", @@ -3006,7 +3006,7 @@ }, "OwnerId":{ "shape":"NonEmptyString", - "documentation":"

The identifier of the AWS account that owns the subnet.

" + "documentation":"

The identifier of the Amazon Web Services account that owns the subnet.

" }, "State":{ "shape":"NonEmptyString", @@ -3029,7 +3029,7 @@ "documentation":"

The IPV6 CIDR blocks that are associated with the subnet.

" } }, - "documentation":"

Contains information about a subnet in EC2.

" + "documentation":"

Contains information about a subnet in Amazon EC2.

" }, "AwsEc2VolumeAttachment":{ "type":"structure", @@ -3051,7 +3051,7 @@ "documentation":"

The attachment state of the volume.

" } }, - "documentation":"

An attachment to an AWS EC2 volume.

" + "documentation":"

An attachment to an Amazon EC2 volume.

" }, "AwsEc2VolumeAttachmentList":{ "type":"list", @@ -3082,7 +3082,7 @@ }, "KmsKeyId":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the volume.

" + "documentation":"

The ARN of the KMS customer master key (CMK) that was used to protect the volume encryption key for the volume.

" }, "Attachments":{ "shape":"AwsEc2VolumeAttachmentList", @@ -3229,6 +3229,280 @@ }, "documentation":"

provides details about an ECS cluster.

" }, + "AwsEcsServiceCapacityProviderStrategyDetails":{ + "type":"structure", + "members":{ + "Base":{ + "shape":"Integer", + "documentation":"

The minimum number of tasks to run on the capacity provider. Only one strategy item can specify a value for Base.

The value must be between 0 and 100000.

" + }, + "CapacityProvider":{ + "shape":"NonEmptyString", + "documentation":"

The short name of the capacity provider.

" + }, + "Weight":{ + "shape":"Integer", + "documentation":"

The relative percentage of the total number of tasks that should use the capacity provider.

If no weight is specified, the default value is 0. At least one capacity provider must have a weight greater than 0.

The value can be between 0 and 1000.

" + } + }, + "documentation":"

Strategy item for the capacity provider strategy that the service uses.

" + }, + "AwsEcsServiceCapacityProviderStrategyList":{ + "type":"list", + "member":{"shape":"AwsEcsServiceCapacityProviderStrategyDetails"} + }, + "AwsEcsServiceDeploymentConfigurationDeploymentCircuitBreakerDetails":{ + "type":"structure", + "members":{ + "Enable":{ + "shape":"Boolean", + "documentation":"

Whether to enable the deployment circuit breaker logic for the service.

" + }, + "Rollback":{ + "shape":"Boolean", + "documentation":"

Whether to roll back the service if a service deployment fails. If rollback is enabled, when a service deployment fails, the service is rolled back to the last deployment that completed successfully.

" + } + }, + "documentation":"

Determines whether a service deployment fails if a service cannot reach a steady state.

" + }, + "AwsEcsServiceDeploymentConfigurationDetails":{ + "type":"structure", + "members":{ + "DeploymentCircuitBreaker":{ + "shape":"AwsEcsServiceDeploymentConfigurationDeploymentCircuitBreakerDetails", + "documentation":"

Determines whether a service deployment fails if a service cannot reach a steady state.

" + }, + "MaximumPercent":{ + "shape":"Integer", + "documentation":"

For a service that uses the rolling update (ECS) deployment type, the maximum number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, and for tasks that use the EC2 launch type, when any container instances are in the DRAINING state. Provided as a percentage of the desired number of tasks. The default value is 200%.

For a service that uses the blue/green (CODE_DEPLOY) or EXTERNAL deployment types, and tasks that use the EC2 launch type, the maximum number of tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state.

For the Fargate launch type, the maximum percent value is not used.

" + }, + "MinimumHealthyPercent":{ + "shape":"Integer", + "documentation":"

For a service that uses the rolling update (ECS) deployment type, the minimum number of tasks in a service that must remain in the RUNNING state during a deployment, and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Expressed as a percentage of the desired number of tasks. The default value is 100%.

For a service that uses the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and tasks that use the EC2 launch type, the minimum number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state.

For the Fargate launch type, the minimum healthy percent value is not used.

" + } + }, + "documentation":"

Optional deployment parameters for the service.

" + }, + "AwsEcsServiceDeploymentControllerDetails":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"NonEmptyString", + "documentation":"

The rolling update (ECS) deployment type replaces the current running version of the container with the latest version.

The blue/green (CODE_DEPLOY) deployment type uses the blue/green deployment model that is powered by CodeDeploy. This deployment model a new deployment of a service can be verified before production traffic is sent to it.

The external (EXTERNAL) deployment type allows the use of any third-party deployment controller for full control over the deployment process for an Amazon ECS service.

Valid values: ECS | CODE_DEPLOY | EXTERNAL

" + } + }, + "documentation":"

Information about the deployment controller type that the service uses.

" + }, + "AwsEcsServiceDetails":{ + "type":"structure", + "members":{ + "CapacityProviderStrategy":{ + "shape":"AwsEcsServiceCapacityProviderStrategyList", + "documentation":"

The capacity provider strategy that the service uses.

" + }, + "Cluster":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the cluster that hosts the service.

" + }, + "DeploymentConfiguration":{ + "shape":"AwsEcsServiceDeploymentConfigurationDetails", + "documentation":"

Deployment parameters for the service. Includes the number of tasks that run and the order in which to start and stop tasks.

" + }, + "DeploymentController":{ + "shape":"AwsEcsServiceDeploymentControllerDetails", + "documentation":"

Contains the deployment controller type that the service uses.

" + }, + "DesiredCount":{ + "shape":"Integer", + "documentation":"

The number of instantiations of the task definition to run on the service.

" + }, + "EnableEcsManagedTags":{ + "shape":"Boolean", + "documentation":"

Whether to enable Amazon ECS managed tags for the tasks in the service.

" + }, + "EnableExecuteCommand":{ + "shape":"Boolean", + "documentation":"

Whether the execute command functionality is enabled for the service.

" + }, + "HealthCheckGracePeriodSeconds":{ + "shape":"Integer", + "documentation":"

After a task starts, the amount of time in seconds that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks.

" + }, + "LaunchType":{ + "shape":"NonEmptyString", + "documentation":"

The launch type that the service uses.

Valid values: EC2 | FARGATE | EXTERNAL

" + }, + "LoadBalancers":{ + "shape":"AwsEcsServiceLoadBalancersList", + "documentation":"

Information about the load balancers that the service uses.

" + }, + "Name":{ + "shape":"NonEmptyString", + "documentation":"

The name of the service.

" + }, + "NetworkConfiguration":{ + "shape":"AwsEcsServiceNetworkConfigurationDetails", + "documentation":"

For tasks that use the awsvpc networking mode, the VPC subnet and security group configuration.

" + }, + "PlacementConstraints":{ + "shape":"AwsEcsServicePlacementConstraintsList", + "documentation":"

The placement constraints for the tasks in the service.

" + }, + "PlacementStrategies":{ + "shape":"AwsEcsServicePlacementStrategiesList", + "documentation":"

Information about how tasks for the service are placed.

" + }, + "PlatformVersion":{ + "shape":"NonEmptyString", + "documentation":"

The platform version on which to run the service. Only specified for tasks that are hosted on Fargate. If a platform version is not specified, the LATEST platform version is used by default.

" + }, + "PropagateTags":{ + "shape":"NonEmptyString", + "documentation":"

Indicates whether to propagate the tags from the task definition to the task or from the service to the task. If no value is provided, then tags are not propagated.

Valid values: TASK_DEFINITION | SERVICE

" + }, + "Role":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the IAM role that is associated with the service. The role allows the Amazon ECS container agent to register container instances with an Elastic Load Balancing load balancer.

" + }, + "SchedulingStrategy":{ + "shape":"NonEmptyString", + "documentation":"

The scheduling strategy to use for the service.

The REPLICA scheduling strategy places and maintains the desired number of tasks across the cluster. By default, the service scheduler spreads tasks across Availability Zones. Task placement strategies and constraints are used to customize task placement decisions.

The DAEMON scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that are specified in the cluster. The service scheduler also evaluates the task placement constraints for running tasks and stops tasks that do not meet the placement constraints.

Valid values: REPLICA | DAEMON

" + }, + "ServiceArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the service.

" + }, + "ServiceName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the service.

The name can contain up to 255 characters. It can use letters, numbers, underscores, and hyphens.

" + }, + "ServiceRegistries":{ + "shape":"AwsEcsServiceServiceRegistriesList", + "documentation":"

Information about the service discovery registries to assign to the service.

" + }, + "TaskDefinition":{ + "shape":"NonEmptyString", + "documentation":"

The task definition to use for tasks in the service.

" + } + }, + "documentation":"

Provides details about a service within an ECS cluster.

" + }, + "AwsEcsServiceLoadBalancersDetails":{ + "type":"structure", + "members":{ + "ContainerName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the container to associate with the load balancer.

" + }, + "ContainerPort":{ + "shape":"Integer", + "documentation":"

The port on the container to associate with the load balancer. This port must correspond to a containerPort in the task definition the tasks in the service are using. For tasks that use the EC2 launch type, the container instance they are launched on must allow ingress traffic on the hostPort of the port mapping.

" + }, + "LoadBalancerName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the load balancer to associate with the Amazon ECS service or task set.

Only specified when using a Classic Load Balancer. For an Application Load Balancer or a Network Load Balancer, the load balancer name is omitted.

" + }, + "TargetGroupArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the Elastic Load Balancing target group or groups associated with a service or task set.

Only specified when using an Application Load Balancer or a Network Load Balancer. For a Classic Load Balancer, the target group ARN is omitted.

" + } + }, + "documentation":"

Information about a load balancer that the service uses.

" + }, + "AwsEcsServiceLoadBalancersList":{ + "type":"list", + "member":{"shape":"AwsEcsServiceLoadBalancersDetails"} + }, + "AwsEcsServiceNetworkConfigurationAwsVpcConfigurationDetails":{ + "type":"structure", + "members":{ + "AssignPublicIp":{ + "shape":"NonEmptyString", + "documentation":"

Whether the task's elastic network interface receives a public IP address. The default value is DISABLED.

Valid values: ENABLED | DISABLED

" + }, + "SecurityGroups":{ + "shape":"NonEmptyStringList", + "documentation":"

The IDs of the security groups associated with the task or service.

You can provide up to five security groups.

" + }, + "Subnets":{ + "shape":"NonEmptyStringList", + "documentation":"

The IDs of the subnets associated with the task or service.

You can provide up to 16 subnets.

" + } + }, + "documentation":"

For tasks that use the awsvpc networking mode, the VPC subnet and security group configuration.

" + }, + "AwsEcsServiceNetworkConfigurationDetails":{ + "type":"structure", + "members":{ + "AwsVpcConfiguration":{ + "shape":"AwsEcsServiceNetworkConfigurationAwsVpcConfigurationDetails", + "documentation":"

The VPC subnet and security group configuration.

" + } + }, + "documentation":"

For tasks that use the awsvpc networking mode, the VPC subnet and security group configuration.

" + }, + "AwsEcsServicePlacementConstraintsDetails":{ + "type":"structure", + "members":{ + "Expression":{ + "shape":"NonEmptyString", + "documentation":"

A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance.

" + }, + "Type":{ + "shape":"NonEmptyString", + "documentation":"

The type of constraint. Use distinctInstance to run each task in a particular group on a different container instance. Use memberOf to restrict the selection to a group of valid candidates.

Valid values: distinctInstance | memberOf

" + } + }, + "documentation":"

A placement constraint for the tasks in the service.

" + }, + "AwsEcsServicePlacementConstraintsList":{ + "type":"list", + "member":{"shape":"AwsEcsServicePlacementConstraintsDetails"} + }, + "AwsEcsServicePlacementStrategiesDetails":{ + "type":"structure", + "members":{ + "Field":{ + "shape":"NonEmptyString", + "documentation":"

The field to apply the placement strategy against.

For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone.

For the binpack placement strategy, valid values are cpu and memory.

For the random placement strategy, this attribute is not used.

" + }, + "Type":{ + "shape":"NonEmptyString", + "documentation":"

The type of placement strategy.

The random placement strategy randomly places tasks on available candidates.

The spread placement strategy spreads placement across available candidates evenly based on the value of Field.

The binpack strategy places tasks on available candidates that have the least available amount of the resource that is specified in Field.

Valid values: random | spread | binpack

" + } + }, + "documentation":"

A placement strategy that determines how to place the tasks for the service.

" + }, + "AwsEcsServicePlacementStrategiesList":{ + "type":"list", + "member":{"shape":"AwsEcsServicePlacementStrategiesDetails"} + }, + "AwsEcsServiceServiceRegistriesDetails":{ + "type":"structure", + "members":{ + "ContainerName":{ + "shape":"NonEmptyString", + "documentation":"

The container name value to use for the service discovery service.

If the task definition uses the bridge or host network mode, you must specify ContainerName and ContainerPort.

If the task definition uses the awsvpc network mode and a type SRV DNS record, you must specify either ContainerName and ContainerPort, or Port , but not both.

" + }, + "ContainerPort":{ + "shape":"Integer", + "documentation":"

The port value to use for the service discovery service.

If the task definition uses the bridge or host network mode, you must specify ContainerName and ContainerPort.

If the task definition uses the awsvpc network mode and a type SRV DNS record, you must specify either ContainerName and ContainerPort, or Port , but not both.

" + }, + "Port":{ + "shape":"Integer", + "documentation":"

The port value to use for a service discovery service that specifies an SRV record. This field can be used if both the awsvpcawsvpc network mode and SRV records are used.

" + }, + "RegistryArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the service registry.

" + } + }, + "documentation":"

Information about a service discovery registry to assign to the service.

" + }, + "AwsEcsServiceServiceRegistriesList":{ + "type":"list", + "member":{"shape":"AwsEcsServiceServiceRegistriesDetails"} + }, "AwsEcsTaskDefinitionContainerDefinitionsDependsOnDetails":{ "type":"structure", "members":{ @@ -3836,7 +4110,7 @@ }, "TaskRoleArn":{ "shape":"NonEmptyString", - "documentation":"

The short name or ARN of the IAM role that grants containers in the task permission to call AWS API operations on your behalf.

" + "documentation":"

The short name or ARN of the IAM role that grants containers in the task permission to call Amazon Web Services API operations on your behalf.

" }, "Volumes":{ "shape":"AwsEcsTaskDefinitionVolumesList", @@ -4156,7 +4430,7 @@ "members":{ "AccessPolicies":{ "shape":"NonEmptyString", - "documentation":"

IAM policy document specifying the access policies for the new Amazon ES domain.

" + "documentation":"

IAM policy document specifying the access policies for the new Elasticsearch domain.

" }, "DomainEndpointOptions":{ "shape":"AwsElasticsearchDomainDomainEndpointOptions", @@ -4164,24 +4438,28 @@ }, "DomainId":{ "shape":"NonEmptyString", - "documentation":"

Unique identifier for an Amazon ES domain.

" + "documentation":"

Unique identifier for an Elasticsearch domain.

" }, "DomainName":{ "shape":"NonEmptyString", - "documentation":"

Name of an Amazon ES domain.

Domain names are unique across all domains owned by the same account within an AWS Region.

Domain names must start with a lowercase letter and must be between 3 and 28 characters.

Valid characters are a-z (lowercase only), 0-9, and – (hyphen).

" + "documentation":"

Name of an Elasticsearch domain.

Domain names are unique across all domains owned by the same account within an Amazon Web Services Region.

Domain names must start with a lowercase letter and must be between 3 and 28 characters.

Valid characters are a-z (lowercase only), 0-9, and – (hyphen).

" }, "Endpoint":{ "shape":"NonEmptyString", - "documentation":"

Domain-specific endpoint used to submit index, search, and data upload requests to an Amazon ES domain.

The endpoint is a service URL.

" + "documentation":"

Domain-specific endpoint used to submit index, search, and data upload requests to an Elasticsearch domain.

The endpoint is a service URL.

" }, "Endpoints":{ "shape":"FieldMap", - "documentation":"

The key-value pair that exists if the Amazon ES domain uses VPC endpoints.

" + "documentation":"

The key-value pair that exists if the Elasticsearch domain uses VPC endpoints.

" }, "ElasticsearchVersion":{ "shape":"NonEmptyString", "documentation":"

Elasticsearch version.

" }, + "ElasticsearchClusterConfig":{ + "shape":"AwsElasticsearchDomainElasticsearchClusterConfigDetails", + "documentation":"

Information about an Elasticsearch cluster configuration.

" + }, "EncryptionAtRestOptions":{ "shape":"AwsElasticsearchDomainEncryptionAtRestOptions", "documentation":"

Details about the configuration for encryption at rest.

" @@ -4200,10 +4478,10 @@ }, "VPCOptions":{ "shape":"AwsElasticsearchDomainVPCOptions", - "documentation":"

Information that Amazon ES derives based on VPCOptions for the domain.

" + "documentation":"

Information that Elasticsearch derives based on VPCOptions for the domain.

" } }, - "documentation":"

Information about an Elasticsearch domain.

" + "documentation":"

Information about an Amazon Elasticsearch Service domain.

" }, "AwsElasticsearchDomainDomainEndpointOptions":{ "type":"structure", @@ -4219,6 +4497,50 @@ }, "documentation":"

Additional options for the domain endpoint, such as whether to require HTTPS for all traffic.

" }, + "AwsElasticsearchDomainElasticsearchClusterConfigDetails":{ + "type":"structure", + "members":{ + "DedicatedMasterCount":{ + "shape":"Integer", + "documentation":"

The number of instances to use for the master node. If this attribute is specified, then DedicatedMasterEnabled must be true.

" + }, + "DedicatedMasterEnabled":{ + "shape":"Boolean", + "documentation":"

Whether to use a dedicated master node for the Elasticsearch domain. A dedicated master node performs cluster management tasks, but doesn't hold data or respond to data upload requests.

" + }, + "DedicatedMasterType":{ + "shape":"NonEmptyString", + "documentation":"

The hardware configuration of the computer that hosts the dedicated master node. For example, m3.medium.elasticsearch. If this attribute is specified, then DedicatedMasterEnabled must be true.

" + }, + "InstanceCount":{ + "shape":"Integer", + "documentation":"

The number of data nodes to use in the Elasticsearch domain.

" + }, + "InstanceType":{ + "shape":"NonEmptyString", + "documentation":"

The instance type for your data nodes. For example, m3.medium.elasticsearch.

" + }, + "ZoneAwarenessConfig":{ + "shape":"AwsElasticsearchDomainElasticsearchClusterConfigZoneAwarenessConfigDetails", + "documentation":"

Configuration options for zone awareness. Provided if ZoneAwarenessEnabled is true.

" + }, + "ZoneAwarenessEnabled":{ + "shape":"Boolean", + "documentation":"

Whether to enable zone awareness for the Elasticsearch domain. When zone awareness is enabled, Elasticsearch allocates the cluster's nodes and replica index shards across Availability Zones in the same Region. This prevents data loss and minimizes downtime if a node or data center fails.

" + } + }, + "documentation":"

details about the configuration of an Elasticsearch cluster.

" + }, + "AwsElasticsearchDomainElasticsearchClusterConfigZoneAwarenessConfigDetails":{ + "type":"structure", + "members":{ + "AvailabilityZoneCount":{ + "shape":"Integer", + "documentation":"

he number of Availability Zones that the domain uses. Valid values are 2 and 3. The default is 2.

" + } + }, + "documentation":"

Configuration options for zone awareness.

" + }, "AwsElasticsearchDomainEncryptionAtRestOptions":{ "type":"structure", "members":{ @@ -4243,7 +4565,8 @@ "SearchSlowLogs":{ "shape":"AwsElasticsearchDomainLogPublishingOptionsLogConfig", "documentation":"

Configures the Elasticsearch search slow log publishing.

" - } + }, + "AuditLogs":{"shape":"AwsElasticsearchDomainLogPublishingOptionsLogConfig"} }, "documentation":"

configures the CloudWatch Logs to publish for the Elasticsearch domain.

" }, @@ -4325,7 +4648,7 @@ "documentation":"

ID for the VPC.

" } }, - "documentation":"

Information that Amazon ES derives based on VPCOptions for the domain.

" + "documentation":"

Information that Elasticsearch derives based on VPCOptions for the domain.

" }, "AwsElbAppCookieStickinessPolicies":{ "type":"list", @@ -4726,7 +5049,7 @@ }, "AccountId":{ "shape":"NonEmptyString", - "documentation":"

The AWS account ID of the account for the key.

" + "documentation":"

The Amazon Web Services account ID of the account for the key.

" }, "AccessKeyId":{ "shape":"NonEmptyString", @@ -4784,7 +5107,7 @@ }, "AccountId":{ "shape":"NonEmptyString", - "documentation":"

The identifier of the AWS account that created the session.

" + "documentation":"

The identifier of the Amazon Web Services account that created the session.

" }, "UserName":{ "shape":"NonEmptyString", @@ -5136,7 +5459,7 @@ "members":{ "AWSAccountId":{ "shape":"NonEmptyString", - "documentation":"

The twelve-digit account ID of the AWS account that owns the CMK.

" + "documentation":"

The twelve-digit account ID of the Amazon Web Services account that owns the CMK.

" }, "CreationDate":{ "shape":"Double", @@ -5148,7 +5471,7 @@ }, "KeyManager":{ "shape":"NonEmptyString", - "documentation":"

The manager of the CMK. CMKs in your AWS account are either customer managed or AWS managed.

" + "documentation":"

The manager of the CMK. CMKs in your Amazon Web Services account are either customer managed or Amazon Web Services managed.

" }, "KeyState":{ "shape":"NonEmptyString", @@ -5156,7 +5479,7 @@ }, "Origin":{ "shape":"NonEmptyString", - "documentation":"

The source of the CMK's key material.

When this value is AWS_KMS, AWS KMS created the key material.

When this value is EXTERNAL, the key material was imported from your existing key management infrastructure or the CMK lacks key material.

When this value is AWS_CLOUDHSM, the key material was created in the AWS CloudHSM cluster associated with a custom key store.

" + "documentation":"

The source of the CMK's key material.

When this value is AWS_KMS, KMS created the key material.

When this value is EXTERNAL, the key material was imported from your existing key management infrastructure or the CMK lacks key material.

When this value is AWS_CLOUDHSM, the key material was created in the CloudHSM cluster associated with a custom key store.

" }, "Description":{ "shape":"NonEmptyString", @@ -5170,7 +5493,7 @@ "members":{ "S3Bucket":{ "shape":"NonEmptyString", - "documentation":"

An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.

" + "documentation":"

An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account.

" }, "S3Key":{ "shape":"NonEmptyString", @@ -5182,7 +5505,7 @@ }, "ZipFile":{ "shape":"NonEmptyString", - "documentation":"

The base64-encoded contents of the deployment package. AWS SDK and AWS CLI clients handle the encoding for you.

" + "documentation":"

The base64-encoded contents of the deployment package. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you.

" } }, "documentation":"

The code for the Lambda function. You can specify either an object in Amazon S3, or upload a deployment package directly.

" @@ -5192,7 +5515,7 @@ "members":{ "TargetArn":{ "shape":"NonEmptyString", - "documentation":"

The ARN of an Amazon SQS queue or Amazon SNS topic.

" + "documentation":"

The ARN of an SQS queue or SNS topic.

" } }, "documentation":"

The dead-letter queue for failed asynchronous invocations.

" @@ -5226,7 +5549,7 @@ }, "KmsKeyArn":{ "shape":"NonEmptyString", - "documentation":"

The KMS key that's used to encrypt the function's environment variables. This key is only returned if you've configured a customer managed CMK.

" + "documentation":"

The KMS key that is used to encrypt the function's environment variables. This key is only returned if you've configured a customer managed CMK.

" }, "LastModified":{ "shape":"NonEmptyString", @@ -5262,7 +5585,7 @@ }, "TracingConfig":{ "shape":"AwsLambdaFunctionTracingConfig", - "documentation":"

The function's AWS X-Ray tracing configuration.

" + "documentation":"

The function's X-Ray tracing configuration.

" }, "VpcConfig":{ "shape":"AwsLambdaFunctionVpcConfig", @@ -5315,7 +5638,7 @@ "documentation":"

The size of the layer archive in bytes.

" } }, - "documentation":"

An AWS Lambda layer.

" + "documentation":"

An Lambda layer.

" }, "AwsLambdaFunctionLayerList":{ "type":"list", @@ -5329,7 +5652,7 @@ "documentation":"

The tracing mode.

" } }, - "documentation":"

The function's AWS X-Ray tracing configuration.

" + "documentation":"

The function's X-Ray tracing configuration.

" }, "AwsLambdaFunctionVpcConfig":{ "type":"structure", @@ -5467,11 +5790,11 @@ }, "KmsKeyId":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the AWS KMS master key that is used to encrypt the database instances in the DB cluster.

" + "documentation":"

The ARN of the KMS master key that is used to encrypt the database instances in the DB cluster.

" }, "DbClusterResourceId":{ "shape":"NonEmptyString", - "documentation":"

The identifier of the DB cluster. The identifier must be unique within each AWS Region and is immutable.

" + "documentation":"

The identifier of the DB cluster. The identifier must be unique within each Amazon Web Services Region and is immutable.

" }, "AssociatedRoles":{ "shape":"AwsRdsDbClusterAssociatedRoles", @@ -5507,7 +5830,7 @@ }, "CrossAccountClone":{ "shape":"Boolean", - "documentation":"

Whether the DB cluster is a clone of a DB cluster owned by a different AWS account.

" + "documentation":"

Whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account.

" }, "DomainMemberships":{ "shape":"AwsRdsDbDomainMemberships", @@ -5645,7 +5968,7 @@ }, "KmsKeyId":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the AWS KMS master key that is used to encrypt the database instances in the DB cluster.

" + "documentation":"

The ARN of the KMS master key that is used to encrypt the database instances in the DB cluster.

" }, "DbClusterIdentifier":{ "shape":"NonEmptyString", @@ -5697,14 +6020,14 @@ }, "FeatureName":{ "shape":"NonEmptyString", - "documentation":"

The name of the feature associated with the IAM)role.

" + "documentation":"

The name of the feature associated with the IAM role.

" }, "Status":{ "shape":"NonEmptyString", - "documentation":"

Describes the state of the association between the IAM role and the DB instance. The Status property returns one of the following values:

  • ACTIVE - The IAM role ARN is associated with the DB instance and can be used to access other AWS services on your behalf.

  • PENDING - The IAM role ARN is being associated with the DB instance.

  • INVALID - The IAM role ARN is associated with the DB instance. But the DB instance is unable to assume the IAM role in order to access other AWS services on your behalf.

" + "documentation":"

Describes the state of the association between the IAM role and the DB instance. The Status property returns one of the following values:

  • ACTIVE - The IAM role ARN is associated with the DB instance and can be used to access other Amazon Web Services services on your behalf.

  • PENDING - The IAM role ARN is being associated with the DB instance.

  • INVALID - The IAM role ARN is associated with the DB instance. But the DB instance is unable to assume the IAM role in order to access other Amazon Web Services services on your behalf.

" } }, - "documentation":"

An AWS Identity and Access Management (IAM) role associated with the DB instance.

" + "documentation":"

An IAM role associated with the DB instance.

" }, "AwsRdsDbInstanceAssociatedRoles":{ "type":"list", @@ -5715,7 +6038,7 @@ "members":{ "AssociatedRoles":{ "shape":"AwsRdsDbInstanceAssociatedRoles", - "documentation":"

The AWS Identity and Access Management (IAM) roles associated with the DB instance.

" + "documentation":"

The IAM roles associated with the DB instance.

" }, "CACertificateIdentifier":{ "shape":"NonEmptyString", @@ -5739,7 +6062,7 @@ }, "DbiResourceId":{ "shape":"NonEmptyString", - "documentation":"

The AWS Region-unique, immutable identifier for the DB instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB instance is accessed.

" + "documentation":"

The Amazon Web Services Region-unique, immutable identifier for the DB instance. This identifier is found in CloudTrail log entries whenever the KMS key for the DB instance is accessed.

" }, "DBName":{ "shape":"NonEmptyString", @@ -5763,7 +6086,7 @@ }, "IAMDatabaseAuthenticationEnabled":{ "shape":"Boolean", - "documentation":"

True if mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

IAM database authentication can be enabled for the following database engines.

  • For MySQL 5.6, minor version 5.6.34 or higher

  • For MySQL 5.7, minor version 5.7.16 or higher

  • Aurora 5.6 or higher

" + "documentation":"

True if mapping of IAM accounts to database accounts is enabled, and otherwise false.

IAM database authentication can be enabled for the following database engines.

  • For MySQL 5.6, minor version 5.6.34 or higher

  • For MySQL 5.7, minor version 5.7.16 or higher

  • Aurora 5.6 or higher

" }, "InstanceCreateTime":{ "shape":"NonEmptyString", @@ -5771,7 +6094,7 @@ }, "KmsKeyId":{ "shape":"NonEmptyString", - "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted DB instance.

" + "documentation":"

If StorageEncrypted is true, the KMS key identifier for the encrypted DB instance.

" }, "PubliclyAccessible":{ "shape":"Boolean", @@ -5919,7 +6242,7 @@ }, "PerformanceInsightsKmsKeyId":{ "shape":"NonEmptyString", - "documentation":"

The identifier of the AWS KMS key used to encrypt the Performance Insights data.

" + "documentation":"

The identifier of the KMS key used to encrypt the Performance Insights data.

" }, "PerformanceInsightsRetentionPeriod":{ "shape":"Integer", @@ -6170,7 +6493,7 @@ }, "SourceRegion":{ "shape":"NonEmptyString", - "documentation":"

The AWS Region that the DB snapshot was created in or copied from.

" + "documentation":"

The Amazon Web Services Region that the DB snapshot was created in or copied from.

" }, "SourceDbSnapshotIdentifier":{ "shape":"NonEmptyString", @@ -6190,7 +6513,7 @@ }, "KmsKeyId":{ "shape":"NonEmptyString", - "documentation":"

If Encrypted is true, the AWS KMS key identifier for the encrypted DB snapshot.

" + "documentation":"

If Encrypted is true, the KMS key identifier for the encrypted DB snapshot.

" }, "Timezone":{ "shape":"NonEmptyString", @@ -6299,6 +6622,52 @@ "type":"list", "member":{"shape":"AwsRdsDbSubnetGroupSubnet"} }, + "AwsRdsEventSubscriptionDetails":{ + "type":"structure", + "members":{ + "CustSubscriptionId":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of the account that is associated with the event notification subscription.

" + }, + "CustomerAwsId":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of the event notification subscription.

" + }, + "Enabled":{ + "shape":"Boolean", + "documentation":"

Whether the event notification subscription is enabled.

" + }, + "EventCategoriesList":{ + "shape":"NonEmptyStringList", + "documentation":"

The list of event categories for the event notification subscription.

" + }, + "EventSubscriptionArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the event notification subscription.

" + }, + "SnsTopicArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the SNS topic to post the event notifications to.

" + }, + "SourceIdsList":{ + "shape":"NonEmptyStringList", + "documentation":"

A list of source identifiers for the event notification subscription.

" + }, + "SourceType":{ + "shape":"NonEmptyString", + "documentation":"

The source type for the event notification subscription.

" + }, + "Status":{ + "shape":"NonEmptyString", + "documentation":"

The status of the event notification subscription.

Valid values: creating | modifying | deleting | active | no-permission | topic-not-exist

" + }, + "SubscriptionCreationTime":{ + "shape":"NonEmptyString", + "documentation":"

The datetime when the event notification subscription was created.

Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

" + } + }, + "documentation":"

Details about an Amazon RDS event notification subscription. The subscription allows Amazon RDS to post events to an SNS topic.

" + }, "AwsRdsPendingCloudWatchLogsExports":{ "type":"structure", "members":{ @@ -6546,11 +6915,11 @@ }, "IamRoles":{ "shape":"AwsRedshiftClusterIamRoles", - "documentation":"

A list of IAM roles that the cluster can use to access other AWS services.

" + "documentation":"

A list of IAM roles that the cluster can use to access other Amazon Web Services services.

" }, "KmsKeyId":{ "shape":"NonEmptyString", - "documentation":"

The identifier of the AWS KMS encryption key that is used to encrypt data in the cluster.

" + "documentation":"

The identifier of the KMS encryption key that is used to encrypt data in the cluster.

" }, "MaintenanceTrackName":{ "shape":"NonEmptyString", @@ -6677,7 +7046,7 @@ "documentation":"

The ARN of the IAM role.

" } }, - "documentation":"

An IAM role that the cluster can use to access other AWS services.

" + "documentation":"

An IAM role that the cluster can use to access other Amazon Web Services services.

" }, "AwsRedshiftClusterIamRoles":{ "type":"list", @@ -6812,7 +7181,7 @@ }, "RestrictPublicBuckets":{ "shape":"Boolean", - "documentation":"

Indicates whether to restrict access to an access point or S3 bucket that has a public policy to only AWS service principals and authorized users within the S3 bucket owner's account.

" + "documentation":"

Indicates whether to restrict access to an access point or S3 bucket that has a public policy to only Amazon Web Services service principals and authorized users within the S3 bucket owner's account.

" } }, "documentation":"

provides information about the Amazon S3 Public Access Block configuration for accounts.

" @@ -7052,7 +7421,7 @@ }, "KMSMasterKeyID":{ "shape":"NonEmptyString", - "documentation":"

AWS KMS customer master key (CMK) ID to use for the default encryption.

" + "documentation":"

KMS customer master key (CMK) ID to use for the default encryption.

" } }, "documentation":"

Specifies the default server-side encryption to apply to new objects in the bucket.

" @@ -7106,7 +7475,7 @@ }, "SSEKMSKeyId":{ "shape":"NonEmptyString", - "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

" + "documentation":"

The identifier of the KMS symmetric customer managed customer master key (CMK) that was used for the object.

" } }, "documentation":"

Details about an Amazon S3 object.

" @@ -7124,7 +7493,7 @@ }, "KmsKeyId":{ "shape":"NonEmptyString", - "documentation":"

The ARN, Key ID, or alias of the AWS KMS customer master key (CMK) used to encrypt the SecretString or SecretBinary values for versions of this secret.

" + "documentation":"

The ARN, Key ID, or alias of the KMS customer master key (CMK) used to encrypt the SecretString or SecretBinary values for versions of this secret.

" }, "RotationEnabled":{ "shape":"Boolean", @@ -7147,7 +7516,7 @@ "documentation":"

The user-provided description of the secret.

" } }, - "documentation":"

Details about an AWS Secrets Manager secret.

" + "documentation":"

Details about an Secrets Manager secret.

" }, "AwsSecretsManagerSecretRotationRules":{ "type":"structure", @@ -7186,13 +7555,25 @@ "shape":"NonEmptyString", "documentation":"

The ARN generated by Security Hub that uniquely identifies a product that generates findings. This can be the ARN for a third-party product that is integrated with Security Hub, or the ARN for a custom integration.

" }, + "ProductName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the product that generated the finding.

Security Hub populates this attribute automatically for each finding. You cannot update it using BatchImportFindings or BatchUpdateFindings. The exception to this is when you use a custom integration.

When you use the Security Hub console to filter findings by product name, you use this attribute.

When you use the Security Hub API to filter findings by product name, you use the aws/securityhub/ProductyName attribute under ProductFields.

Security Hub does not synchronize those two attributes.

" + }, + "CompanyName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the company for the product that generated the finding.

Security Hub populates this attribute automatically for each finding. You cannot be updated using BatchImportFindings or BatchUpdateFindings. The exception to this is when you use a custom integration.

When you use the Security Hub console to filter findings by company name, you use this attribute.

When you use the Security Hub API to filter findings by company name, you use the aws/securityhub/CompanyName attribute under ProductFields.

Security Hub does not synchronize those two attributes.

" + }, + "Region":{ + "shape":"NonEmptyString", + "documentation":"

The Region from which the finding was generated.

Security Hub populates this attribute automatically for each finding. You cannot update it using BatchImportFindings or BatchUpdateFindings.

" + }, "GeneratorId":{ "shape":"NonEmptyString", "documentation":"

The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security-findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, etc.

" }, "AwsAccountId":{ "shape":"NonEmptyString", - "documentation":"

The AWS account ID that a finding is generated in.

" + "documentation":"

The Amazon Web Services account ID that a finding is generated in.

" }, "Types":{ "shape":"TypeList", @@ -7244,7 +7625,7 @@ }, "ProductFields":{ "shape":"FieldMap", - "documentation":"

A data type where security-findings providers can include additional solution-specific details that aren't part of the defined AwsSecurityFinding format.

" + "documentation":"

A data type where security-findings providers can include additional solution-specific details that aren't part of the defined AwsSecurityFinding format.

Can contain up to 50 key-value pairs. For each key-value pair, the key can contain up to 128 characters, and the value can contain up to 2048 characters.

" }, "UserDefinedFields":{ "shape":"FieldMap", @@ -7276,7 +7657,7 @@ }, "Compliance":{ "shape":"Compliance", - "documentation":"

This data type is exclusive to findings that are generated as the result of a check run against a specific rule in a supported security standard, such as CIS AWS Foundations. Contains security standard-related finding details.

" + "documentation":"

This data type is exclusive to findings that are generated as the result of a check run against a specific rule in a supported security standard, such as CIS Amazon Web Services Foundations. Contains security standard-related finding details.

" }, "VerificationState":{ "shape":"VerificationState", @@ -7319,7 +7700,7 @@ "documentation":"

In a BatchImportFindings request, finding providers use FindingProviderFields to provide and update their own values for confidence, criticality, related findings, severity, and types.

" } }, - "documentation":"

Provides consistent format for the contents of the Security Hub-aggregated findings. AwsSecurityFinding format enables you to share findings between AWS security services and third-party solutions, and security standards checks.

A finding is a potential security issue generated either by AWS services (Amazon GuardDuty, Amazon Inspector, and Amazon Macie) or by the integrated third-party solutions and standards checks.

" + "documentation":"

Provides consistent format for the contents of the Security Hub-aggregated findings. AwsSecurityFinding format enables you to share findings between Amazon Web Services security services and third-party solutions, and security standards checks.

A finding is a potential security issue generated either by Amazon Web Services services or by the integrated third-party solutions and standards checks.

" }, "AwsSecurityFindingFilters":{ "type":"structure", @@ -7330,7 +7711,7 @@ }, "AwsAccountId":{ "shape":"StringFilterList", - "documentation":"

The AWS account ID that a finding is generated in.

" + "documentation":"

The Amazon Web Services account ID that a finding is generated in.

" }, "Id":{ "shape":"StringFilterList", @@ -7340,6 +7721,10 @@ "shape":"StringFilterList", "documentation":"

The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security-findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, etc.

" }, + "Region":{ + "shape":"StringFilterList", + "documentation":"

The Region from which the finding was generated.

" + }, "Type":{ "shape":"StringFilterList", "documentation":"

A finding type in the format of namespace/category/classifier that classifies a finding.

" @@ -7364,13 +7749,13 @@ "shape":"NumberFilterList", "documentation":"

The native severity as defined by the security-findings provider's solution that generated the finding.

", "deprecated":true, - "deprecatedMessage":"This filter is deprecated, use FindingProviiltersSeverityOriginal instead." + "deprecatedMessage":"This filter is deprecated. Instead, use FindingProviderSeverityOriginal." }, "SeverityNormalized":{ "shape":"NumberFilterList", "documentation":"

The normalized severity of a finding.

", "deprecated":true, - "deprecatedMessage":"This filter is deprecated, use SeverityLabel or FindingProviderFieldsSeverityLabel instead." + "deprecatedMessage":"This filter is deprecated. Instead, use SeverityLabel or FindingProviderFieldsSeverityLabel." }, "SeverityLabel":{ "shape":"StringFilterList", @@ -7406,11 +7791,11 @@ }, "ProductName":{ "shape":"StringFilterList", - "documentation":"

The name of the solution (product) that generates findings.

" + "documentation":"

The name of the solution (product) that generates findings.

Note that this is a filter against the aws/securityhub/ProductName field in ProductFields. It is not a filter for the top-level ProductName field.

" }, "CompanyName":{ "shape":"StringFilterList", - "documentation":"

The name of the findings provider (company) that owns the solution (product) that generates findings.

" + "documentation":"

The name of the findings provider (company) that owns the solution (product) that generates findings.

Note that this is a filter against the aws/securityhub/CompanyName field in ProductFields. It is not a filter for the top-level CompanyName field.

" }, "UserDefinedFields":{ "shape":"MapFilterList", @@ -7534,15 +7919,15 @@ }, "ResourcePartition":{ "shape":"StringFilterList", - "documentation":"

The canonical AWS partition name that the Region is assigned to.

" + "documentation":"

The canonical Amazon Web Services partition name that the Region is assigned to.

" }, "ResourceRegion":{ "shape":"StringFilterList", - "documentation":"

The canonical AWS external Region name where this resource is located.

" + "documentation":"

The canonical Amazon Web Services external Region name where this resource is located.

" }, "ResourceTags":{ "shape":"MapFilterList", - "documentation":"

A list of AWS tags associated with a resource at the time the finding was processed.

" + "documentation":"

A list of Amazon Web Services tags associated with a resource at the time the finding was processed.

" }, "ResourceAwsEc2InstanceType":{ "shape":"StringFilterList", @@ -7590,7 +7975,13 @@ }, "ResourceAwsIamAccessKeyUserName":{ "shape":"StringFilterList", - "documentation":"

The user associated with the IAM access key related to a finding.

" + "documentation":"

The user associated with the IAM access key related to a finding.

", + "deprecated":true, + "deprecatedMessage":"This filter is deprecated. Instead, use ResourceAwsIamAccessKeyPrincipalName." + }, + "ResourceAwsIamAccessKeyPrincipalName":{ + "shape":"StringFilterList", + "documentation":"

The name of the principal that is associated with an IAM access key.

" }, "ResourceAwsIamAccessKeyStatus":{ "shape":"StringFilterList", @@ -7600,6 +7991,10 @@ "shape":"DateFilterList", "documentation":"

The creation date/time of the IAM access key related to a finding.

" }, + "ResourceAwsIamUserUserName":{ + "shape":"StringFilterList", + "documentation":"

The name of an IAM user.

" + }, "ResourceContainerName":{ "shape":"StringFilterList", "documentation":"

The name of the container related to a finding.

" @@ -7622,7 +8017,7 @@ }, "ComplianceStatus":{ "shape":"StringFilterList", - "documentation":"

Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard, such as CIS AWS Foundations. Contains security standard-related finding details.

" + "documentation":"

Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard, such as CIS Amazon Web Services Foundations. Contains security standard-related finding details.

" }, "VerificationState":{ "shape":"StringFilterList", @@ -7728,11 +8123,11 @@ "members":{ "KmsMasterKeyId":{ "shape":"NonEmptyString", - "documentation":"

The ID of an AWS managed customer master key (CMK) for Amazon SNS or a custom CMK.

" + "documentation":"

The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK.

" }, "Subscription":{ "shape":"AwsSnsTopicSubscriptionList", - "documentation":"

Subscription is an embedded property that describes the subscription endpoints of an Amazon SNS topic.

" + "documentation":"

Subscription is an embedded property that describes the subscription endpoints of an SNS topic.

" }, "TopicName":{ "shape":"NonEmptyString", @@ -7768,11 +8163,11 @@ "members":{ "KmsDataKeyReusePeriodSeconds":{ "shape":"Integer", - "documentation":"

The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again.

" + "documentation":"

The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again.

" }, "KmsMasterKeyId":{ "shape":"NonEmptyString", - "documentation":"

The ID of an AWS managed customer master key (CMK) for Amazon SQS or a custom CMK.

" + "documentation":"

The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK.

" }, "QueueName":{ "shape":"NonEmptyString", @@ -7903,14 +8298,14 @@ "documentation":"

A unique identifier for a WebACL.

" } }, - "documentation":"

Details about a WAF WebACL.

" + "documentation":"

Details about an WAF WebACL.

" }, "AwsWafWebAclRule":{ "type":"structure", "members":{ "Action":{ "shape":"WafAction", - "documentation":"

Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule.

" + "documentation":"

Specifies the action that CloudFront or WAF takes when a web request matches the conditions in the rule.

" }, "ExcludedRules":{ "shape":"WafExcludedRuleList", @@ -7933,7 +8328,7 @@ "documentation":"

The rule type.

Valid values: REGULAR | RATE_BASED | GROUP

The default is REGULAR.

" } }, - "documentation":"

Details for a rule in a WAF WebACL.

" + "documentation":"

Details for a rule in an WAF WebACL.

" }, "AwsWafWebAclRuleList":{ "type":"list", @@ -7983,7 +8378,7 @@ "members":{ "Findings":{ "shape":"BatchImportFindingsRequestFindingList", - "documentation":"

A list of findings to import. To successfully import a finding, it must follow the AWS Security Finding Format. Maximum of 100 findings per request.

" + "documentation":"

A list of findings to import. To successfully import a finding, it must follow the Amazon Web Services Security Finding Format. Maximum of 100 findings per request.

" } } }, @@ -8213,7 +8608,7 @@ "members":{ "Status":{ "shape":"ComplianceStatus", - "documentation":"

The result of a standards check.

The valid values for Status are as follows.

    • PASSED - Standards check passed for all evaluated resources.

    • WARNING - Some information is missing or this check is not supported for your configuration.

    • FAILED - Standards check failed for at least one evaluated resource.

    • NOT_AVAILABLE - Check could not be performed due to a service outage, API error, or because the result of the AWS Config evaluation was NOT_APPLICABLE. If the AWS Config evaluation result was NOT_APPLICABLE, then after 3 days, Security Hub automatically archives the finding.

" + "documentation":"

The result of a standards check.

The valid values for Status are as follows.

    • PASSED - Standards check passed for all evaluated resources.

    • WARNING - Some information is missing or this check is not supported for your configuration.

    • FAILED - Standards check failed for at least one evaluated resource.

    • NOT_AVAILABLE - Check could not be performed due to a service outage, API error, or because the result of the Config evaluation was NOT_APPLICABLE. If the Config evaluation result was NOT_APPLICABLE, then after 3 days, Security Hub automatically archives the finding.

" }, "RelatedRequirements":{ "shape":"RelatedRequirementsList", @@ -8221,7 +8616,7 @@ }, "StatusReasons":{ "shape":"StatusReasonsList", - "documentation":"

For findings generated from controls, a list of reasons behind the value of Status. For the list of status reason codes and their meanings, see Standards-related information in the ASFF in the AWS Security Hub User Guide.

" + "documentation":"

For findings generated from controls, a list of reasons behind the value of Status. For the list of status reason codes and their meanings, see Standards-related information in the ASFF in the Security Hub User Guide.

" } }, "documentation":"

Contains finding details that are specific to control-based findings. Only returned for findings generated from controls.

" @@ -8357,7 +8752,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"ResultList", - "documentation":"

The list of AWS accounts that were not processed. For each account, the list includes the account ID and the email address.

" + "documentation":"

The list of Amazon Web Services accounts that were not processed. For each account, the list includes the account ID and the email address.

" } } }, @@ -8497,7 +8892,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"ResultList", - "documentation":"

The list of AWS accounts that were not processed. For each account, the list includes the account ID and the email address.

" + "documentation":"

The list of Amazon Web Services accounts that were not processed. For each account, the list includes the account ID and the email address.

" } } }, @@ -8560,7 +8955,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"ResultList", - "documentation":"

The list of AWS accounts for which the invitations were not deleted. For each account, the list includes the account ID and the email address.

" + "documentation":"

The list of Amazon Web Services accounts for which the invitations were not deleted. For each account, the list includes the account ID and the email address.

" } } }, @@ -8579,7 +8974,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"ResultList", - "documentation":"

The list of AWS accounts that were not deleted. For each account, the list includes the account ID and the email address.

" + "documentation":"

The list of Amazon Web Services accounts that were not deleted. For each account, the list includes the account ID and the email address.

" } } }, @@ -8703,7 +9098,7 @@ "members":{ "StandardsSubscriptionArn":{ "shape":"NonEmptyString", - "documentation":"

The ARN of a resource that represents your subscription to a supported standard. To get the subscription ARNs of the standards you have enabled, use the GetEnabledStandards operation.

", + "documentation":"

The ARN of a resource that represents your subscription to a supported standard. To get the subscription ARNs of the standards you have enabled, use the GetEnabledStandards operation.

", "location":"uri", "locationName":"StandardsSubscriptionArn" }, @@ -8787,7 +9182,7 @@ "members":{ "AdminAccountId":{ "shape":"NonEmptyString", - "documentation":"

The AWS account identifier of the Security Hub administrator account.

" + "documentation":"

The Amazon Web Services account identifier of the Security Hub administrator account.

" } } }, @@ -8885,7 +9280,7 @@ "members":{ "AdminAccountId":{ "shape":"NonEmptyString", - "documentation":"

The AWS account identifier of the account to designate as the Security Hub administrator account.

" + "documentation":"

The Amazon Web Services account identifier of the account to designate as the Security Hub administrator account.

" } } }, @@ -9147,7 +9542,7 @@ }, "UnprocessedAccounts":{ "shape":"ResultList", - "documentation":"

The list of AWS accounts that could not be processed. For each account, the list includes the account ID and the email address.

" + "documentation":"

The list of Amazon Web Services accounts that could not be processed. For each account, the list includes the account ID and the email address.

" } } }, @@ -9344,7 +9739,7 @@ "members":{ "AccountIds":{ "shape":"AccountIdList", - "documentation":"

The list of account IDs of the AWS accounts to invite to Security Hub as members.

" + "documentation":"

The list of account IDs of the Amazon Web Services accounts to invite to Security Hub as members.

" } } }, @@ -9353,7 +9748,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"ResultList", - "documentation":"

The list of AWS accounts that could not be processed. For each account, the list includes the account ID and the email address.

" + "documentation":"

The list of Amazon Web Services accounts that could not be processed. For each account, the list includes the account ID and the email address.

" } } }, @@ -9435,7 +9830,7 @@ "Message":{"shape":"NonEmptyString"}, "Code":{"shape":"NonEmptyString"} }, - "documentation":"

The request was rejected because it attempted to create resources beyond the current AWS account or throttling limits. The error code describes the limit exceeded.

", + "documentation":"

The request was rejected because it attempted to create resources beyond the current Amazon Web Services account or throttling limits. The error code describes the limit exceeded.

", "error":{"httpStatusCode":429}, "exception":true }, @@ -9695,7 +10090,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the member account.

" + "documentation":"

The Amazon Web Services account ID of the member account.

" }, "Email":{ "shape":"NonEmptyString", @@ -9703,17 +10098,17 @@ }, "MasterId":{ "shape":"NonEmptyString", - "documentation":"

This is replaced by AdministratorID.

The AWS account ID of the Security Hub administrator account associated with this member account.

", + "documentation":"

This is replaced by AdministratorID.

The Amazon Web Services account ID of the Security Hub administrator account associated with this member account.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use AdministratorId instead." }, "AdministratorId":{ "shape":"NonEmptyString", - "documentation":"

The AWS account ID of the Security Hub administrator account associated with this member account.

" + "documentation":"

The Amazon Web Services account ID of the Security Hub administrator account associated with this member account.

" }, "MemberStatus":{ "shape":"NonEmptyString", - "documentation":"

The status of the relationship between the member account and its administrator account.

The status can have one of the following values:

  • CREATED - Indicates that the administrator account added the member account, but has not yet invited the member account.

  • INVITED - Indicates that the administrator account invited the member account. The member account has not yet responded to the invitation.

  • ENABLED - Indicates that the member account is currently active. For manually invited member accounts, indicates that the member account accepted the invitation.

  • REMOVED - Indicates that the administrator account disassociated the member account.

  • RESIGNED - Indicates that the member account disassociated themselves from the administrator account.

  • DELETED - Indicates that the administrator account deleted the member account.

" + "documentation":"

The status of the relationship between the member account and its administrator account.

The status can have one of the following values:

  • CREATED - Indicates that the administrator account added the member account, but has not yet invited the member account.

  • INVITED - Indicates that the administrator account invited the member account. The member account has not yet responded to the invitation.

  • ENABLED - Indicates that the member account is currently active. For manually invited member accounts, indicates that the member account accepted the invitation.

  • REMOVED - Indicates that the administrator account disassociated the member account.

  • RESIGNED - Indicates that the member account disassociated themselves from the administrator account.

  • DELETED - Indicates that the administrator account deleted the member account.

  • ACCOUNT_SUSPENDED - Indicates that an organization account was suspended from Amazon Web Services at the same time that the administrator account tried to enable the organization account as a member account.

" }, "InvitedAt":{ "shape":"Timestamp", @@ -10186,7 +10581,7 @@ }, "MarketplaceUrl":{ "shape":"NonEmptyString", - "documentation":"

For integrations with AWS services, the AWS Console URL from which to activate the service.

For integrations with third-party products, the AWS Marketplace URL from which to subscribe to or purchase the product.

" + "documentation":"

For integrations with Amazon Web Services services, the Amazon Web Services Console URL from which to activate the service.

For integrations with third-party products, the Marketplace URL from which to subscribe to or purchase the product.

" }, "ActivationUrl":{ "shape":"NonEmptyString", @@ -10326,11 +10721,11 @@ }, "Partition":{ "shape":"Partition", - "documentation":"

The canonical AWS partition name that the Region is assigned to.

" + "documentation":"

The canonical Amazon Web Services partition name that the Region is assigned to.

" }, "Region":{ "shape":"NonEmptyString", - "documentation":"

The canonical AWS external Region name where this resource is located.

" + "documentation":"

The canonical Amazon Web Services external Region name where this resource is located.

" }, "ResourceRole":{ "shape":"NonEmptyString", @@ -10338,7 +10733,7 @@ }, "Tags":{ "shape":"FieldMap", - "documentation":"

A list of AWS tags associated with a resource at the time the finding was processed.

" + "documentation":"

A list of Amazon Web Services tags associated with a resource at the time the finding was processed.

" }, "DataClassification":{ "shape":"DataClassificationDetails", @@ -10374,7 +10769,7 @@ }, "AwsCodeBuildProject":{ "shape":"AwsCodeBuildProjectDetails", - "documentation":"

Details for an AWS CodeBuild project.

" + "documentation":"

Details for an CodeBuild project.

" }, "AwsCloudFrontDistribution":{ "shape":"AwsCloudFrontDistributionDetails", @@ -10386,7 +10781,7 @@ }, "AwsEc2NetworkInterface":{ "shape":"AwsEc2NetworkInterfaceDetails", - "documentation":"

Details for an Amazon EC2 network interface.

" + "documentation":"

Details for an EC2 network interface.

" }, "AwsEc2SecurityGroup":{ "shape":"AwsEc2SecurityGroupDetails", @@ -10406,7 +10801,7 @@ }, "AwsEc2Subnet":{ "shape":"AwsEc2SubnetDetails", - "documentation":"

Details about a subnet in EC2.

" + "documentation":"

Details about a subnet in Amazon EC2.

" }, "AwsEc2NetworkAcl":{ "shape":"AwsEc2NetworkAclDetails", @@ -10426,7 +10821,7 @@ }, "AwsS3Bucket":{ "shape":"AwsS3BucketDetails", - "documentation":"

Details about an Amazon S3 bucket related to a finding.

" + "documentation":"

Details about an S3 bucket related to a finding.

" }, "AwsS3AccountPublicAccessBlock":{ "shape":"AwsS3AccountPublicAccessBlockDetails", @@ -10434,7 +10829,7 @@ }, "AwsS3Object":{ "shape":"AwsS3ObjectDetails", - "documentation":"

Details about an Amazon S3 object related to a finding.

" + "documentation":"

Details about an S3 object related to a finding.

" }, "AwsSecretsManagerSecret":{ "shape":"AwsSecretsManagerSecretDetails", @@ -10482,7 +10877,7 @@ }, "AwsCertificateManagerCertificate":{ "shape":"AwsCertificateManagerCertificateDetails", - "documentation":"

Provides details about an AWS Certificate Manager (ACM) certificate.

" + "documentation":"

Provides details about an Certificate Manager certificate.

" }, "AwsRedshiftCluster":{ "shape":"AwsRedshiftClusterDetails", @@ -10502,7 +10897,7 @@ }, "AwsKmsKey":{ "shape":"AwsKmsKeyDetails", - "documentation":"

Details about a KMS key.

" + "documentation":"

Details about an KMS key.

" }, "AwsLambdaFunction":{ "shape":"AwsLambdaFunctionDetails", @@ -10526,7 +10921,7 @@ }, "AwsWafWebAcl":{ "shape":"AwsWafWebAclDetails", - "documentation":"

Details for a WAF WebACL.

" + "documentation":"

Details for an WAF WebACL.

" }, "AwsRdsDbSnapshot":{ "shape":"AwsRdsDbSnapshotDetails", @@ -10555,6 +10950,14 @@ "Other":{ "shape":"FieldMap", "documentation":"

Details about a resource that are not available in a type-specific details object. Use the Other object in the following cases.

  • The type-specific object does not contain all of the fields that you want to populate. In this case, first use the type-specific object to populate those fields. Use the Other object to populate the fields that are missing from the type-specific object.

  • The resource type does not have a corresponding object. This includes resources for which the type is Other.

" + }, + "AwsRdsEventSubscription":{ + "shape":"AwsRdsEventSubscriptionDetails", + "documentation":"

Details about an RDS event notification subscription.

" + }, + "AwsEcsService":{ + "shape":"AwsEcsServiceDetails", + "documentation":"

Details about a service within an ECS cluster.

" } }, "documentation":"

Additional details about a resource related to a finding.

To provide the details, use the object that corresponds to the resource type. For example, if the resource type is AwsEc2Instance, then you use the AwsEc2Instance object to provide the details.

If the type-specific object does not contain all of the fields you want to populate, then you use the Other object to populate those additional fields.

You also use the Other object to populate the details when the selected type does not have a corresponding object.

" @@ -10578,7 +10981,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

An AWS account ID of the account that was not processed.

" + "documentation":"

An Amazon Web Services account ID of the account that was not processed.

" }, "ProcessingResult":{ "shape":"NonEmptyString", @@ -10644,7 +11047,7 @@ "members":{ "Product":{ "shape":"Double", - "documentation":"

Deprecated. This attribute is being deprecated. Instead of providing Product, provide Original.

The native severity as defined by the AWS service or integrated partner product that generated the finding.

" + "documentation":"

Deprecated. This attribute is being deprecated. Instead of providing Product, provide Original.

The native severity as defined by the Amazon Web Services service or integrated partner product that generated the finding.

" }, "Label":{ "shape":"SeverityLabel", @@ -10689,7 +11092,7 @@ }, "Product":{ "shape":"Double", - "documentation":"

The native severity as defined by the AWS service or integrated partner product that generated the finding.

" + "documentation":"

The native severity as defined by the Amazon Web Services service or integrated partner product that generated the finding.

" }, "Label":{ "shape":"SeverityLabel", @@ -10817,7 +11220,7 @@ }, "SeverityRating":{ "shape":"SeverityRating", - "documentation":"

The severity of findings generated from this security standard control.

The finding severity is based on an assessment of how easy it would be to compromise AWS resources if the issue is detected.

" + "documentation":"

The severity of findings generated from this security standard control.

The finding severity is based on an assessment of how easy it would be to compromise Amazon Web Services resources if the issue is detected.

" }, "RelatedRequirements":{ "shape":"RelatedRequirementsList", @@ -10885,7 +11288,7 @@ "members":{ "StandardsArn":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the standard that you want to enable. To view the list of available standards and their ARNs, use the DescribeStandards operation.

" + "documentation":"

The ARN of the standard that you want to enable. To view the list of available standards and their ARNs, use the DescribeStandards operation.

" }, "StandardsInput":{ "shape":"StandardsInputParameterMap", @@ -10910,7 +11313,7 @@ "members":{ "ReasonCode":{ "shape":"NonEmptyString", - "documentation":"

A code that represents a reason for the control status. For the list of status reason codes and their meanings, see Standards-related information in the ASFF in the AWS Security Hub User Guide.

" + "documentation":"

A code that represents a reason for the control status. For the list of status reason codes and their meanings, see Standards-related information in the ASFF in the Security Hub User Guide.

" }, "Description":{ "shape":"NonEmptyString", @@ -11303,10 +11706,10 @@ "members":{ "Type":{ "shape":"NonEmptyString", - "documentation":"

Specifies how you want AWS WAF to respond to requests that match the settings in a rule.

Valid settings include the following:

  • ALLOW - AWS WAF allows requests

  • BLOCK - AWS WAF blocks requests

  • COUNT - AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can't specify COUNT for the default action for a WebACL.

" + "documentation":"

Specifies how you want WAF to respond to requests that match the settings in a rule.

Valid settings include the following:

  • ALLOW - WAF allows requests

  • BLOCK - WAF blocks requests

  • COUNT - WAF increments a counter of the requests that match all of the conditions in the rule. WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can't specify COUNT for the default action for a WebACL.

" } }, - "documentation":"

Details about the action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule.

" + "documentation":"

Details about the action that CloudFront or WAF takes when a web request matches the conditions in the rule.

" }, "WafExcludedRule":{ "type":"structure", @@ -11374,5 +11777,5 @@ "documentation":"

Used to update information about the investigation into the finding.

" } }, - "documentation":"

Security Hub provides you with a comprehensive view of the security state of your AWS environment and resources. It also provides you with the readiness status of your environment based on controls from supported security standards. Security Hub collects security data from AWS accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the AWS Security Hub User Guide .

When you use operations in the Security Hub API, the requests are executed only in the AWS Region that is currently active or in the specific AWS Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, execute the same command for each Region to apply the change to.

For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the administrator account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from.

The following throttling limits apply to using Security Hub API operations.

  • BatchEnableStandards - RateLimit of 1 request per second, BurstLimit of 1 request per second.

  • GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second.

  • UpdateFindings - RateLimit of 1 request per second. BurstLimit of 5 requests per second.

  • UpdateStandardsControl - RateLimit of 1 request per second, BurstLimit of 5 requests per second.

  • All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

" + "documentation":"

Security Hub provides you with a comprehensive view of the security state of your Amazon Web Services environment and resources. It also provides you with the readiness status of your environment based on controls from supported security standards. Security Hub collects security data from Amazon Web Services accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the Security HubUser Guide .

When you use operations in the Security Hub API, the requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, execute the same command for each Region to apply the change to.

For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the administrator account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from.

The following throttling limits apply to using Security Hub API operations.

  • BatchEnableStandards - RateLimit of 1 request per second, BurstLimit of 1 request per second.

  • GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second.

  • UpdateFindings - RateLimit of 1 request per second. BurstLimit of 5 requests per second.

  • UpdateStandardsControl - RateLimit of 1 request per second, BurstLimit of 5 requests per second.

  • All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

" } diff --git a/services/serverlessapplicationrepository/pom.xml b/services/serverlessapplicationrepository/pom.xml index 25766e7a7c28..20ac44421053 100644 --- a/services/serverlessapplicationrepository/pom.xml +++ b/services/serverlessapplicationrepository/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 serverlessapplicationrepository diff --git a/services/servicecatalog/pom.xml b/services/servicecatalog/pom.xml index 3c3ad64b4bfd..336ac29c992a 100644 --- a/services/servicecatalog/pom.xml +++ b/services/servicecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT servicecatalog AWS Java SDK :: Services :: AWS Service Catalog diff --git a/services/servicecatalogappregistry/pom.xml b/services/servicecatalogappregistry/pom.xml index bb39d0a1a9a2..0ac893b52ab9 100644 --- a/services/servicecatalogappregistry/pom.xml +++ b/services/servicecatalogappregistry/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT servicecatalogappregistry AWS Java SDK :: Services :: Service Catalog App Registry diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index 205964288ca4..fcf48d85bd79 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 servicediscovery diff --git a/services/servicequotas/pom.xml b/services/servicequotas/pom.xml index 9a94153fe897..107c648530b5 100644 --- a/services/servicequotas/pom.xml +++ b/services/servicequotas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT servicequotas AWS Java SDK :: Services :: Service Quotas diff --git a/services/ses/pom.xml b/services/ses/pom.xml index 386a9bdd0c8a..98abd2c6090b 100644 --- a/services/ses/pom.xml +++ b/services/ses/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ses AWS Java SDK :: Services :: Amazon SES diff --git a/services/sesv2/pom.xml b/services/sesv2/pom.xml index 111944252283..fd27773e1129 100644 --- a/services/sesv2/pom.xml +++ b/services/sesv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT sesv2 AWS Java SDK :: Services :: SESv2 diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index 060c10296959..ae9610baab2d 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT sfn AWS Java SDK :: Services :: AWS Step Functions diff --git a/services/shield/pom.xml b/services/shield/pom.xml index 2de202ae539f..b5099621db73 100644 --- a/services/shield/pom.xml +++ b/services/shield/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT shield AWS Java SDK :: Services :: AWS Shield diff --git a/services/shield/src/main/resources/codegen-resources/service-2.json b/services/shield/src/main/resources/codegen-resources/service-2.json index c35e4190b55a..f4fce91164b4 100644 --- a/services/shield/src/main/resources/codegen-resources/service-2.json +++ b/services/shield/src/main/resources/codegen-resources/service-2.json @@ -31,7 +31,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Authorizes the DDoS Response Team (DRT) to access the specified Amazon S3 bucket containing your AWS WAF logs. You can associate up to 10 Amazon S3 buckets with your subscription.

To use the services of the DRT and make an AssociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" + "documentation":"

Authorizes the Shield Response Team (SRT) to access the specified Amazon S3 bucket containing log data such as Application Load Balancer access logs, CloudFront logs, or logs from third party sources. You can associate up to 10 Amazon S3 buckets with your subscription.

To use the services of the SRT and make an AssociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" }, "AssociateDRTRole":{ "name":"AssociateDRTRole", @@ -49,7 +49,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Authorizes the DDoS Response Team (DRT), using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the DRT to inspect your AWS WAF configuration and create or update AWS WAF rules and web ACLs.

You can associate only one RoleArn with your subscription. If you submit an AssociateDRTRole request for an account that already has an associated role, the new RoleArn will replace the existing RoleArn.

Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy managed policy to the role you will specify in the request. For more information see Attaching and Detaching IAM Policies. The role must also trust the service principal drt.shield.amazonaws.com. For more information, see IAM JSON Policy Elements: Principal.

The DRT will have access only to your AWS WAF and Shield resources. By submitting this request, you authorize the DRT to inspect your AWS WAF and Shield configuration and create and update AWS WAF rules and web ACLs on your behalf. The DRT takes these actions only if explicitly authorized by you.

You must have the iam:PassRole permission to make an AssociateDRTRole request. For more information, see Granting a User Permissions to Pass a Role to an AWS Service.

To use the services of the DRT and make an AssociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" + "documentation":"

Authorizes the Shield Response Team (SRT) using the specified role, to access your Amazon Web Services account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your WAF configuration and create or update WAF rules and web ACLs.

You can associate only one RoleArn with your subscription. If you submit an AssociateDRTRole request for an account that already has an associated role, the new RoleArn will replace the existing RoleArn.

Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy managed policy to the role you will specify in the request. For more information see Attaching and Detaching IAM Policies. The role must also trust the service principal drt.shield.amazonaws.com. For more information, see IAM JSON Policy Elements: Principal.

The SRT will have access only to your WAF and Shield resources. By submitting this request, you authorize the SRT to inspect your WAF and Shield configuration and create and update WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you.

You must have the iam:PassRole permission to make an AssociateDRTRole request. For more information, see Granting a User Permissions to Pass a Role to an Amazon Web Services Service.

To use the services of the SRT and make an AssociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" }, "AssociateHealthCheck":{ "name":"AssociateHealthCheck", @@ -66,7 +66,7 @@ {"shape":"InvalidParameterException"}, {"shape":"OptimisticLockException"} ], - "documentation":"

Adds health-based detection to the Shield Advanced protection for a resource. Shield Advanced health-based detection uses the health of your AWS resource to improve responsiveness and accuracy in attack detection and mitigation.

You define the health check in Route 53 and then associate it with your Shield Advanced protection. For more information, see Shield Advanced Health-Based Detection in the AWS WAF and AWS Shield Developer Guide.

" + "documentation":"

Adds health-based detection to the Shield Advanced protection for a resource. Shield Advanced health-based detection uses the health of your Amazon Web Services resource to improve responsiveness and accuracy in attack detection and mitigation.

You define the health check in Route 53 and then associate it with your Shield Advanced protection. For more information, see Shield Advanced Health-Based Detection in the WAF Developer Guide.

" }, "AssociateProactiveEngagementDetails":{ "name":"AssociateProactiveEngagementDetails", @@ -83,7 +83,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"OptimisticLockException"} ], - "documentation":"

Initializes proactive engagement and sets the list of contacts for the DDoS Response Team (DRT) to use. You must provide at least one phone number in the emergency contact list.

After you have initialized proactive engagement using this call, to disable or enable proactive engagement, use the calls DisableProactiveEngagement and EnableProactiveEngagement.

This call defines the list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you for escalations to the DRT and to initiate proactive customer support.

The contacts that you provide in the request replace any contacts that were already defined. If you already have contacts defined and want to use them, retrieve the list using DescribeEmergencyContactSettings and then provide it to this call.

" + "documentation":"

Initializes proactive engagement and sets the list of contacts for the Shield Response Team (SRT) to use. You must provide at least one phone number in the emergency contact list.

After you have initialized proactive engagement using this call, to disable or enable proactive engagement, use the calls DisableProactiveEngagement and EnableProactiveEngagement.

This call defines the list of email addresses and phone numbers that the SRT can use to contact you for escalations to the SRT and to initiate proactive customer support.

The contacts that you provide in the request replace any contacts that were already defined. If you already have contacts defined and want to use them, retrieve the list using DescribeEmergencyContactSettings and then provide it to this call.

" }, "CreateProtection":{ "name":"CreateProtection", @@ -103,7 +103,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Enables AWS Shield Advanced for a specific AWS resource. The resource can be an Amazon CloudFront distribution, Elastic Load Balancing load balancer, AWS Global Accelerator accelerator, Elastic IP Address, or an Amazon Route 53 hosted zone.

You can add protection to only a single resource with each CreateProtection request. If you want to add protection to multiple resources at once, use the AWS WAF console. For more information see Getting Started with AWS Shield Advanced and Add AWS Shield Advanced Protection to more AWS Resources.

" + "documentation":"

Enables Shield Advanced for a specific Amazon Web Services resource. The resource can be an Amazon CloudFront distribution, Elastic Load Balancing load balancer, Global Accelerator accelerator, Elastic IP Address, or an Amazon Route 53 hosted zone.

You can add protection to only a single resource with each CreateProtection request. If you want to add protection to multiple resources at once, use the WAF console. For more information see Getting Started with Shield Advanced and Add Shield Advanced Protection to more Amazon Web Services Resources.

" }, "CreateProtectionGroup":{ "name":"CreateProtectionGroup", @@ -135,7 +135,7 @@ {"shape":"InternalErrorException"}, {"shape":"ResourceAlreadyExistsException"} ], - "documentation":"

Activates AWS Shield Advanced for an account.

When you initally create a subscription, your subscription is set to be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request.

" + "documentation":"

Activates Shield Advanced for an account.

When you initally create a subscription, your subscription is set to be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request.

" }, "DeleteProtection":{ "name":"DeleteProtection", @@ -150,7 +150,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"OptimisticLockException"} ], - "documentation":"

Deletes an AWS Shield Advanced Protection.

" + "documentation":"

Deletes an Shield Advanced Protection.

" }, "DeleteProtectionGroup":{ "name":"DeleteProtectionGroup", @@ -180,7 +180,7 @@ {"shape":"LockedSubscriptionException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes AWS Shield Advanced from an account. AWS Shield Advanced requires a 1-year subscription commitment. You cannot delete a subscription prior to the completion of that commitment.

", + "documentation":"

Removes Shield Advanced from an account. Shield Advanced requires a 1-year subscription commitment. You cannot delete a subscription prior to the completion of that commitment.

", "deprecated":true }, "DescribeAttack":{ @@ -208,7 +208,7 @@ "errors":[ {"shape":"InternalErrorException"} ], - "documentation":"

Provides information about the number and type of attacks AWS Shield has detected in the last year for all resources that belong to your account, regardless of whether you've defined Shield protections for them. This operation is available to Shield customers as well as to Shield Advanced customers.

The operation returns data for the time range of midnight UTC, one year ago, to midnight UTC, today. For example, if the current time is 2020-10-26 15:39:32 PDT, equal to 2020-10-26 22:39:32 UTC, then the time range for the attack data returned is from 2019-10-26 00:00:00 UTC to 2020-10-26 00:00:00 UTC.

The time range indicates the period covered by the attack statistics data items.

" + "documentation":"

Provides information about the number and type of attacks Shield has detected in the last year for all resources that belong to your account, regardless of whether you've defined Shield protections for them. This operation is available to Shield customers as well as to Shield Advanced customers.

The operation returns data for the time range of midnight UTC, one year ago, to midnight UTC, today. For example, if the current time is 2020-10-26 15:39:32 PDT, equal to 2020-10-26 22:39:32 UTC, then the time range for the attack data returned is from 2019-10-26 00:00:00 UTC to 2020-10-26 00:00:00 UTC.

The time range indicates the period covered by the attack statistics data items.

" }, "DescribeDRTAccess":{ "name":"DescribeDRTAccess", @@ -222,7 +222,7 @@ {"shape":"InternalErrorException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns the current role and list of Amazon S3 log buckets used by the DDoS Response Team (DRT) to access your AWS account while assisting with attack mitigation.

" + "documentation":"

Returns the current role and list of Amazon S3 log buckets used by the Shield Response Team (SRT) to access your Amazon Web Services account while assisting with attack mitigation.

" }, "DescribeEmergencyContactSettings":{ "name":"DescribeEmergencyContactSettings", @@ -236,7 +236,7 @@ {"shape":"InternalErrorException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

A list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" + "documentation":"

A list of email addresses and phone numbers that the Shield Response Team (SRT) can use to contact you if you have proactive engagement enabled, for escalations to the SRT and to initiate proactive customer support.

" }, "DescribeProtection":{ "name":"DescribeProtection", @@ -279,7 +279,7 @@ {"shape":"InternalErrorException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Provides details about the AWS Shield Advanced subscription for an account.

" + "documentation":"

Provides details about the Shield Advanced subscription for an account.

" }, "DisableProactiveEngagement":{ "name":"DisableProactiveEngagement", @@ -296,7 +296,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"OptimisticLockException"} ], - "documentation":"

Removes authorization from the DDoS Response Team (DRT) to notify contacts about escalations to the DRT and to initiate proactive customer support.

" + "documentation":"

Removes authorization from the Shield Response Team (SRT) to notify contacts about escalations to the SRT and to initiate proactive customer support.

" }, "DisassociateDRTLogBucket":{ "name":"DisassociateDRTLogBucket", @@ -314,7 +314,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes the DDoS Response Team's (DRT) access to the specified Amazon S3 bucket containing your AWS WAF logs.

To make a DisassociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTLogBucket request to remove this access.

" + "documentation":"

Removes the Shield Response Team's (SRT) access to the specified Amazon S3 bucket containing the logs that you shared previously.

To make a DisassociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the SRT access to your account, you can submit a DisassociateDRTLogBucket request to remove this access.

" }, "DisassociateDRTRole":{ "name":"DisassociateDRTRole", @@ -330,7 +330,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes the DDoS Response Team's (DRT) access to your AWS account.

To make a DisassociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTRole request to remove this access.

" + "documentation":"

Removes the Shield Response Team's (SRT) access to your Amazon Web Services account.

To make a DisassociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the SRT access to your account, you can submit a DisassociateDRTRole request to remove this access.

" }, "DisassociateHealthCheck":{ "name":"DisassociateHealthCheck", @@ -346,7 +346,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"OptimisticLockException"} ], - "documentation":"

Removes health-based detection from the Shield Advanced protection for a resource. Shield Advanced health-based detection uses the health of your AWS resource to improve responsiveness and accuracy in attack detection and mitigation.

You define the health check in Route 53 and then associate or disassociate it with your Shield Advanced protection. For more information, see Shield Advanced Health-Based Detection in the AWS WAF and AWS Shield Developer Guide.

" + "documentation":"

Removes health-based detection from the Shield Advanced protection for a resource. Shield Advanced health-based detection uses the health of your Amazon Web Services resource to improve responsiveness and accuracy in attack detection and mitigation.

You define the health check in Route 53 and then associate or disassociate it with your Shield Advanced protection. For more information, see Shield Advanced Health-Based Detection in the WAF Developer Guide.

" }, "EnableProactiveEngagement":{ "name":"EnableProactiveEngagement", @@ -363,7 +363,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"OptimisticLockException"} ], - "documentation":"

Authorizes the DDoS Response Team (DRT) to use email and phone to notify contacts about escalations to the DRT and to initiate proactive customer support.

" + "documentation":"

Authorizes the Shield Response Team (SRT) to use email and phone to notify contacts about escalations to the SRT and to initiate proactive customer support.

" }, "GetSubscriptionState":{ "name":"GetSubscriptionState", @@ -451,7 +451,7 @@ {"shape":"InvalidResourceException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS Shield.

" + "documentation":"

Gets information about Amazon Web Services tags for a specified Amazon Resource Name (ARN) in Shield.

" }, "TagResource":{ "name":"TagResource", @@ -467,7 +467,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Adds or updates tags for a resource in AWS Shield.

" + "documentation":"

Adds or updates tags for a resource in Shield.

" }, "UntagResource":{ "name":"UntagResource", @@ -483,7 +483,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes tags from a resource in AWS Shield.

" + "documentation":"

Removes tags from a resource in Shield.

" }, "UpdateEmergencyContactSettings":{ "name":"UpdateEmergencyContactSettings", @@ -499,7 +499,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Updates the details of the list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" + "documentation":"

Updates the details of the list of email addresses and phone numbers that the Shield Response Team (SRT) can use to contact you if you have proactive engagement enabled, for escalations to the SRT and to initiate proactive customer support.

" }, "UpdateProtectionGroup":{ "name":"UpdateProtectionGroup", @@ -549,7 +549,7 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

In order to grant the necessary access to the DDoS Response Team (DRT), the user submitting the request must have the iam:PassRole permission. This error indicates the user did not have the appropriate permissions. For more information, see Granting a User Permissions to Pass a Role to an AWS Service.

", + "documentation":"

In order to grant the necessary access to the Shield Response Team (SRT) the user submitting the request must have the iam:PassRole permission. This error indicates the user did not have the appropriate permissions. For more information, see Granting a User Permissions to Pass a Role to an Amazon Web Services Service.

", "exception":true }, "AssociateDRTLogBucketRequest":{ @@ -558,7 +558,7 @@ "members":{ "LogBucket":{ "shape":"LogBucket", - "documentation":"

The Amazon S3 bucket that contains your AWS WAF logs.

" + "documentation":"

The Amazon S3 bucket that contains the logs that you want to share.

" } } }, @@ -573,7 +573,7 @@ "members":{ "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the role the DRT will use to access your AWS account.

Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy managed policy to this role. For more information see Attaching and Detaching IAM Policies.

" + "documentation":"

The Amazon Resource Name (ARN) of the role the SRT will use to access your Amazon Web Services account.

Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy managed policy to this role. For more information see Attaching and Detaching IAM Policies.

" } } }, @@ -610,7 +610,7 @@ "members":{ "EmergencyContactList":{ "shape":"EmergencyContactList", - "documentation":"

A list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you for escalations to the DRT and to initiate proactive customer support.

To enable proactive engagement, the contact list must include at least one phone number.

The contacts that you provide here replace any contacts that were already defined. If you already have contacts defined and want to use them, retrieve the list using DescribeEmergencyContactSettings and then provide it here.

" + "documentation":"

A list of email addresses and phone numbers that the Shield Response Team (SRT) can use to contact you for escalations to the SRT and to initiate proactive customer support.

To enable proactive engagement, the contact list must include at least one phone number.

The contacts that you provide here replace any contacts that were already defined. If you already have contacts defined and want to use them, retrieve the list using DescribeEmergencyContactSettings and then provide it here.

" } } }, @@ -648,7 +648,7 @@ }, "AttackProperties":{ "shape":"AttackProperties", - "documentation":"

The array of AttackProperty objects.

" + "documentation":"

The array of objects that provide details of the Shield event.

For infrastructure layer events (L3 and L4 events) after January 25, 2021, you can view metrics for top contributors in Amazon CloudWatch metrics. For more information, see Shield metrics and alarms in the WAF Developer Guide.

" }, "Mitigations":{ "shape":"MitigationList", @@ -679,26 +679,26 @@ "members":{ "AttackLayer":{ "shape":"AttackLayer", - "documentation":"

The type of distributed denial of service (DDoS) event that was observed. NETWORK indicates layer 3 and layer 4 events and APPLICATION indicates layer 7 events.

" + "documentation":"

The type of Shield event that was observed. NETWORK indicates layer 3 and layer 4 events and APPLICATION indicates layer 7 events.

For infrastructure layer events (L3 and L4 events) after January 25, 2021, you can view metrics for top contributors in Amazon CloudWatch metrics. For more information, see Shield metrics and alarms in the WAF Developer Guide.

" }, "AttackPropertyIdentifier":{ "shape":"AttackPropertyIdentifier", - "documentation":"

Defines the DDoS attack property information that is provided. The WORDPRESS_PINGBACK_REFLECTOR and WORDPRESS_PINGBACK_SOURCE values are valid only for WordPress reflective pingback DDoS attacks.

" + "documentation":"

Defines the Shield event property information that is provided. The WORDPRESS_PINGBACK_REFLECTOR and WORDPRESS_PINGBACK_SOURCE values are valid only for WordPress reflective pingback events.

" }, "TopContributors":{ "shape":"TopContributors", - "documentation":"

The array of contributor objects that includes the top five contributors to an attack.

" + "documentation":"

Contributor objects for the top five contributors to a Shield event.

" }, "Unit":{ "shape":"Unit", - "documentation":"

The unit of the Value of the contributions.

" + "documentation":"

The unit used for the Contributor Value property.

" }, "Total":{ "shape":"Long", - "documentation":"

The total contributions made to this attack by all contributors, not just the five listed in the TopContributors list.

" + "documentation":"

The total contributions made to this Shield event by all contributors.

" } }, - "documentation":"

Details of the described attack.

" + "documentation":"

Details of a Shield event. This is provided as part of an AttackDetail.

" }, "AttackPropertyIdentifier":{ "type":"string", @@ -848,7 +848,7 @@ }, "Aggregation":{ "shape":"ProtectionGroupAggregation", - "documentation":"

Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.

  • Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.

  • Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.

  • Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include CloudFront distributions and origin resources for CloudFront distributions.

" + "documentation":"

Defines how Shield combines resource data for the group in order to detect, mitigate, and report events.

  • Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.

  • Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.

  • Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront and origin resources for CloudFront distributions.

" }, "Pattern":{ "shape":"ProtectionGroupPattern", @@ -886,7 +886,7 @@ }, "ResourceArn":{ "shape":"ResourceArn", - "documentation":"

The ARN (Amazon Resource Name) of the resource to be protected.

The ARN should be in one of the following formats:

  • For an Application Load Balancer: arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id

  • For an Elastic Load Balancer (Classic Load Balancer): arn:aws:elasticloadbalancing:region:account-id:loadbalancer/load-balancer-name

  • For an AWS CloudFront distribution: arn:aws:cloudfront::account-id:distribution/distribution-id

  • For an AWS Global Accelerator accelerator: arn:aws:globalaccelerator::account-id:accelerator/accelerator-id

  • For Amazon Route 53: arn:aws:route53:::hostedzone/hosted-zone-id

  • For an Elastic IP address: arn:aws:ec2:region:account-id:eip-allocation/allocation-id

" + "documentation":"

The ARN (Amazon Resource Name) of the resource to be protected.

The ARN should be in one of the following formats:

  • For an Application Load Balancer: arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id

  • For an Elastic Load Balancer (Classic Load Balancer): arn:aws:elasticloadbalancing:region:account-id:loadbalancer/load-balancer-name

  • For an Amazon CloudFront distribution: arn:aws:cloudfront::account-id:distribution/distribution-id

  • For an Global Accelerator accelerator: arn:aws:globalaccelerator::account-id:accelerator/accelerator-id

  • For Amazon Route 53: arn:aws:route53:::hostedzone/hosted-zone-id

  • For an Elastic IP address: arn:aws:ec2:region:account-id:eip-allocation/allocation-id

" }, "Tags":{ "shape":"TagList", @@ -1003,11 +1003,11 @@ "members":{ "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the role the DRT used to access your AWS account.

" + "documentation":"

The Amazon Resource Name (ARN) of the role the SRT used to access your Amazon Web Services account.

" }, "LogBucketList":{ "shape":"LogBucketList", - "documentation":"

The list of Amazon S3 buckets accessed by the DRT.

" + "documentation":"

The list of Amazon S3 buckets accessed by the SRT.

" } } }, @@ -1021,7 +1021,7 @@ "members":{ "EmergencyContactList":{ "shape":"EmergencyContactList", - "documentation":"

A list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" + "documentation":"

A list of email addresses and phone numbers that the Shield Response Team (SRT) can use to contact you if you have proactive engagement enabled, for escalations to the SRT and to initiate proactive customer support.

" } } }, @@ -1041,7 +1041,7 @@ "members":{ "ProtectionGroup":{ "shape":"ProtectionGroup", - "documentation":"

A grouping of protected resources that you and AWS Shield Advanced can monitor as a collective. This resource grouping improves the accuracy of detection and reduces false positives.

" + "documentation":"

A grouping of protected resources that you and Shield Advanced can monitor as a collective. This resource grouping improves the accuracy of detection and reduces false positives.

" } } }, @@ -1054,7 +1054,7 @@ }, "ResourceArn":{ "shape":"ResourceArn", - "documentation":"

The ARN (Amazon Resource Name) of the AWS resource for the Protection object that is described. When submitting the DescribeProtection request you must provide either the ResourceArn or the ProtectionID, but not both.

" + "documentation":"

The ARN (Amazon Resource Name) of the Amazon Web Services resource for the Protection object that is described. When submitting the DescribeProtection request you must provide either the ResourceArn or the ProtectionID, but not both.

" } } }, @@ -1077,7 +1077,7 @@ "members":{ "Subscription":{ "shape":"Subscription", - "documentation":"

The AWS Shield Advanced subscription details for an account.

" + "documentation":"

The Shield Advanced subscription details for an account.

" } } }, @@ -1097,7 +1097,7 @@ "members":{ "LogBucket":{ "shape":"LogBucket", - "documentation":"

The Amazon S3 bucket that contains your AWS WAF logs.

" + "documentation":"

The Amazon S3 bucket that contains the logs that you want to share.

" } } }, @@ -1166,7 +1166,7 @@ "documentation":"

Additional notes regarding the contact.

" } }, - "documentation":"

Contact information that the DRT can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" + "documentation":"

Contact information that the SRT can use to contact you if you have proactive engagement enabled, for escalations to the SRT and to initiate proactive customer support.

" }, "EmergencyContactList":{ "type":"list", @@ -1357,7 +1357,7 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

If you specify a value for MaxResults and you have more protection groups than the value of MaxResults, AWS Shield Advanced returns this token that you can use in your next request, to get the next batch of objects.

" + "documentation":"

If you specify a value for MaxResults and you have more protection groups than the value of MaxResults, Shield Advanced returns this token that you can use in your next request, to get the next batch of objects.

" } } }, @@ -1383,7 +1383,7 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

If you specify a value for MaxResults and you have more Protections than the value of MaxResults, AWS Shield Advanced returns a NextToken value in the response that allows you to list another group of Protections. For the second and subsequent ListProtections requests, specify the value of NextToken from the previous response to get information about another batch of Protections.

Shield Advanced might return the list of Protection objects in batches smaller than the number specified by MaxResults. If there are more Protection objects to return, Shield Advanced will always also return a NextToken.

" + "documentation":"

If you specify a value for MaxResults and you have more Protections than the value of MaxResults, Shield Advanced returns a NextToken value in the response that allows you to list another group of Protections. For the second and subsequent ListProtections requests, specify the value of NextToken from the previous response to get information about another batch of Protections.

Shield Advanced might return the list of Protection objects in batches smaller than the number specified by MaxResults. If there are more Protection objects to return, Shield Advanced will always also return a NextToken.

" } } }, @@ -1415,7 +1415,7 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

If you specify a value for MaxResults and you have more resources in the protection group than the value of MaxResults, AWS Shield Advanced returns this token that you can use in your next request, to get the next batch of objects.

" + "documentation":"

If you specify a value for MaxResults and you have more resources in the protection group than the value of MaxResults, Shield Advanced returns this token that you can use in your next request, to get the next batch of objects.

" } } }, @@ -1533,7 +1533,7 @@ }, "ResourceArn":{ "shape":"ResourceArn", - "documentation":"

The ARN (Amazon Resource Name) of the AWS resource that is protected.

" + "documentation":"

The ARN (Amazon Resource Name) of the Amazon Web Services resource that is protected.

" }, "HealthCheckIds":{ "shape":"HealthCheckIds", @@ -1561,7 +1561,7 @@ }, "Aggregation":{ "shape":"ProtectionGroupAggregation", - "documentation":"

Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.

  • Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.

  • Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.

  • Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include CloudFront distributions and origin resources for CloudFront distributions.

" + "documentation":"

Defines how Shield combines resource data for the group in order to detect, mitigate, and report events.

  • Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.

  • Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.

  • Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.

" }, "Pattern":{ "shape":"ProtectionGroupPattern", @@ -1580,7 +1580,7 @@ "documentation":"

The ARN (Amazon Resource Name) of the protection group.

" } }, - "documentation":"

A grouping of protected resources that you and AWS Shield Advanced can monitor as a collective. This resource grouping improves the accuracy of detection and reduces false positives.

" + "documentation":"

A grouping of protected resources that you and Shield Advanced can monitor as a collective. This resource grouping improves the accuracy of detection and reduces false positives.

" }, "ProtectionGroupAggregation":{ "type":"string", @@ -1773,7 +1773,7 @@ }, "TimeCommitmentInSeconds":{ "shape":"DurationInSeconds", - "documentation":"

The length, in seconds, of the AWS Shield Advanced subscription for the account.

" + "documentation":"

The length, in seconds, of the Shield Advanced subscription for the account.

" }, "AutoRenew":{ "shape":"AutoRenew", @@ -1785,7 +1785,7 @@ }, "ProactiveEngagementStatus":{ "shape":"ProactiveEngagementStatus", - "documentation":"

If ENABLED, the DDoS Response Team (DRT) will use email and phone to notify contacts about escalations to the DRT and to initiate proactive customer support.

If PENDING, you have requested proactive engagement and the request is pending. The status changes to ENABLED when your request is fully processed.

If DISABLED, the DRT will not proactively notify contacts about escalations or to initiate proactive customer support.

" + "documentation":"

If ENABLED, the Shield Response Team (SRT) will use email and phone to notify contacts about escalations to the SRT and to initiate proactive customer support.

If PENDING, you have requested proactive engagement and the request is pending. The status changes to ENABLED when your request is fully processed.

If DISABLED, the SRT will not proactively notify contacts about escalations or to initiate proactive customer support.

" }, "SubscriptionLimits":{ "shape":"SubscriptionLimits", @@ -1796,7 +1796,7 @@ "documentation":"

The ARN (Amazon Resource Name) of the subscription.

" } }, - "documentation":"

Information about the AWS Shield Advanced subscription for an account.

" + "documentation":"

Information about the Shield Advanced subscription for an account.

" }, "SubscriptionLimits":{ "type":"structure", @@ -1888,7 +1888,7 @@ "documentation":"

Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as \"companyA\" or \"companyB.\" Tag values are case-sensitive.

" } }, - "documentation":"

A tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing or other management. Typically, the tag key represents a category, such as \"environment\", and the tag value represents a specific value within that category, such as \"test,\" \"development,\" or \"production\". Or you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

" + "documentation":"

A tag associated with an Amazon Web Services resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing or other management. Typically, the tag key represents a category, such as \"environment\", and the tag value represents a specific value within that category, such as \"test,\" \"development,\" or \"production\". Or you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each Amazon Web Services resource, up to 50 tags for a resource.

" }, "TagKey":{ "type":"string", @@ -1995,7 +1995,7 @@ "members":{ "EmergencyContactList":{ "shape":"EmergencyContactList", - "documentation":"

A list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

If you have proactive engagement enabled, the contact list must include at least one phone number.

" + "documentation":"

A list of email addresses and phone numbers that the Shield Response Team (SRT) can use to contact you if you have proactive engagement enabled, for escalations to the SRT and to initiate proactive customer support.

If you have proactive engagement enabled, the contact list must include at least one phone number.

" } } }, @@ -2018,7 +2018,7 @@ }, "Aggregation":{ "shape":"ProtectionGroupAggregation", - "documentation":"

Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.

  • Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.

  • Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.

  • Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include CloudFront distributions and origin resources for CloudFront distributions.

" + "documentation":"

Defines how Shield combines resource data for the group in order to detect, mitigate, and report events.

  • Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.

  • Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.

  • Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.

" }, "Pattern":{ "shape":"ProtectionGroupPattern", @@ -2084,5 +2084,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"AWS Shield Advanced

This is the AWS Shield Advanced API Reference. This guide is for developers who need detailed information about the AWS Shield Advanced API actions, data types, and errors. For detailed information about AWS WAF and AWS Shield Advanced features and an overview of how to use the AWS WAF and AWS Shield Advanced APIs, see the AWS WAF and AWS Shield Developer Guide.

" + "documentation":"Shield Advanced

This is the Shield Advanced API Reference. This guide is for developers who need detailed information about the Shield Advanced API actions, data types, and errors. For detailed information about WAF and Shield Advanced features and an overview of how to use the WAF and Shield Advanced APIs, see the WAF and Shield Developer Guide.

" } diff --git a/services/signer/pom.xml b/services/signer/pom.xml index 156e5ad90548..727163ce7c8c 100644 --- a/services/signer/pom.xml +++ b/services/signer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT signer AWS Java SDK :: Services :: Signer diff --git a/services/sms/pom.xml b/services/sms/pom.xml index 0788fd03f628..f76ab7ef2458 100644 --- a/services/sms/pom.xml +++ b/services/sms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT sms AWS Java SDK :: Services :: AWS Server Migration diff --git a/services/snowball/pom.xml b/services/snowball/pom.xml index a378c2849890..8d292b9837aa 100644 --- a/services/snowball/pom.xml +++ b/services/snowball/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT snowball AWS Java SDK :: Services :: Amazon Snowball diff --git a/services/sns/pom.xml b/services/sns/pom.xml index 4073a3a90378..20fdd2097c1d 100644 --- a/services/sns/pom.xml +++ b/services/sns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT sns AWS Java SDK :: Services :: Amazon SNS diff --git a/services/sqs/pom.xml b/services/sqs/pom.xml index d7569e92223f..306fad2eb8fc 100644 --- a/services/sqs/pom.xml +++ b/services/sqs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT sqs AWS Java SDK :: Services :: Amazon SQS diff --git a/services/ssm/pom.xml b/services/ssm/pom.xml index 974b67a58801..59aa5eea5e14 100644 --- a/services/ssm/pom.xml +++ b/services/ssm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ssm AWS Java SDK :: Services :: AWS Simple Systems Management (SSM) diff --git a/services/ssm/src/main/resources/codegen-resources/service-2.json b/services/ssm/src/main/resources/codegen-resources/service-2.json index 1dbe0fa898fd..df25323b26c5 100644 --- a/services/ssm/src/main/resources/codegen-resources/service-2.json +++ b/services/ssm/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"TooManyTagsError"}, {"shape":"TooManyUpdates"} ], - "documentation":"

Adds or overwrites one or more tags for the specified resource. Tags are metadata that you can assign to your documents, managed instances, maintenance windows, Parameter Store parameters, and patch baselines. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define. For example, you could define a set of tags for your account's managed instances that helps you track each instance's owner and stack level. For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. Or Key=Stack and Value=Production, Pre-Production, or Test.

Each resource can have a maximum of 50 tags.

We recommend that you devise a set of tag keys that meets your needs for each resource type. Using a consistent set of tag keys makes it easier for you to manage your resources. You can search and filter the resources based on the tags you add. Tags don't have any semantic meaning to and are interpreted strictly as a string of characters.

For more information about using tags with EC2 instances, see Tagging your Amazon EC2 resources in the Amazon EC2 User Guide.

" + "documentation":"

Adds or overwrites one or more tags for the specified resource. Tags are metadata that you can assign to your documents, managed instances, maintenance windows, Parameter Store parameters, and patch baselines. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define. For example, you could define a set of tags for your account's managed instances that helps you track each instance's owner and stack level. For example:

  • Key=Owner,Value=DbAdmin

  • Key=Owner,Value=SysAdmin

  • Key=Owner,Value=Dev

  • Key=Stack,Value=Production

  • Key=Stack,Value=Pre-Production

  • Key=Stack,Value=Test

Each resource can have a maximum of 50 tags.

We recommend that you devise a set of tag keys that meets your needs for each resource type. Using a consistent set of tag keys makes it easier for you to manage your resources. You can search and filter the resources based on the tags you add. Tags don't have any semantic meaning to and are interpreted strictly as a string of characters.

For more information about using tags with Amazon Elastic Compute Cloud (Amazon EC2) instances, see Tagging your Amazon EC2 resources in the Amazon EC2 User Guide.

" }, "AssociateOpsItemRelatedItem":{ "name":"AssociateOpsItemRelatedItem", @@ -45,7 +45,7 @@ {"shape":"OpsItemInvalidParameterException"}, {"shape":"OpsItemRelatedItemAlreadyExistsException"} ], - "documentation":"

Associates a related resource to a Systems Manager OpsCenter OpsItem. For example, you can associate an Incident Manager incident or analysis with an OpsItem. Incident Manager is a capability of AWS Systems Manager.

" + "documentation":"

Associates a related resource to a Systems Manager OpsCenter OpsItem. For example, you can associate an Incident Manager incident or analysis with an OpsItem. Incident Manager is a capability of Amazon Web Services Systems Manager.

" }, "CancelCommand":{ "name":"CancelCommand", @@ -75,7 +75,7 @@ {"shape":"InternalServerError"}, {"shape":"DoesNotExistException"} ], - "documentation":"

Stops a maintenance window execution that is already in progress and cancels any tasks in the window that have not already starting running. (Tasks already in progress will continue to completion.)

" + "documentation":"

Stops a maintenance window execution that is already in progress and cancels any tasks in the window that haven't already starting running. Tasks already in progress will continue to completion.

" }, "CreateActivation":{ "name":"CreateActivation", @@ -88,7 +88,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Generates an activation code and activation ID you can use to register your on-premises server or virtual machine (VM) with Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises instances and VMs using Systems Manager, see Setting up AWS Systems Manager for hybrid environments in the AWS Systems Manager User Guide.

On-premises servers or VMs that are registered with Systems Manager and EC2 instances that you manage with Systems Manager are all called managed instances.

" + "documentation":"

Generates an activation code and activation ID you can use to register your on-premises server or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises instances and VMs using Systems Manager, see Setting up Amazon Web Services Systems Manager for hybrid environments in the Amazon Web Services Systems Manager User Guide.

On-premises servers or VMs that are registered with Systems Manager and Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage with Systems Manager are all called managed instances.

" }, "CreateAssociation":{ "name":"CreateAssociation", @@ -111,7 +111,7 @@ {"shape":"InvalidTarget"}, {"shape":"InvalidSchedule"} ], - "documentation":"

A State Manager association defines the state that you want to maintain on your instances. For example, an association can specify that anti-virus software must be installed and running on your instances, or that certain ports must be closed. For static targets, the association specifies a schedule for when the configuration is reapplied. For dynamic targets, such as an AWS Resource Group or an AWS Autoscaling Group, State Manager applies the configuration when new instances are added to the group. The association also specifies actions to take when applying the configuration. For example, an association for anti-virus software might run once a day. If the software is not installed, then State Manager installs it. If the software is installed, but the service is not running, then the association might instruct State Manager to start the service.

" + "documentation":"

A State Manager association defines the state that you want to maintain on your instances. For example, an association can specify that anti-virus software must be installed and running on your instances, or that certain ports must be closed. For static targets, the association specifies a schedule for when the configuration is reapplied. For dynamic targets, such as an Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a capability of Amazon Web Services Systems Manager applies the configuration when new instances are added to the group. The association also specifies actions to take when applying the configuration. For example, an association for anti-virus software might run once a day. If the software isn't installed, then State Manager installs it. If the software is installed, but the service isn't running, then the association might instruct State Manager to start the service.

" }, "CreateAssociationBatch":{ "name":"CreateAssociationBatch", @@ -134,7 +134,7 @@ {"shape":"InvalidTarget"}, {"shape":"InvalidSchedule"} ], - "documentation":"

Associates the specified Systems Manager document with the specified instances or targets.

When you associate a document with one or more instances using instance IDs or tags, SSM Agent running on the instance processes the document and configures the instance as specified.

If you associate a document with an instance that already has an associated document, the system returns the AssociationAlreadyExists exception.

" + "documentation":"

Associates the specified Amazon Web Services Systems Manager document (SSM document) with the specified instances or targets.

When you associate a document with one or more instances using instance IDs or tags, Amazon Web Services Systems Manager Agent (SSM Agent) running on the instance processes the document and configures the instance as specified.

If you associate a document with an instance that already has an associated document, the system returns the AssociationAlreadyExists exception.

" }, "CreateDocument":{ "name":"CreateDocument", @@ -152,7 +152,7 @@ {"shape":"DocumentLimitExceeded"}, {"shape":"InvalidDocumentSchemaVersion"} ], - "documentation":"

Creates a Systems Manager (SSM) document. An SSM document defines the actions that Systems Manager performs on your managed instances. For more information about SSM documents, including information about supported schemas, features, and syntax, see AWS Systems Manager Documents in the AWS Systems Manager User Guide.

" + "documentation":"

Creates a Amazon Web Services Systems Manager (SSM document). An SSM document defines the actions that Systems Manager performs on your managed instances. For more information about SSM documents, including information about supported schemas, features, and syntax, see Amazon Web Services Systems Manager Documents in the Amazon Web Services Systems Manager User Guide.

" }, "CreateMaintenanceWindow":{ "name":"CreateMaintenanceWindow", @@ -183,7 +183,7 @@ {"shape":"OpsItemLimitExceededException"}, {"shape":"OpsItemInvalidParameterException"} ], - "documentation":"

Creates a new OpsItem. You must have permission in AWS Identity and Access Management (IAM) to create a new OpsItem. For more information, see Getting started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" + "documentation":"

Creates a new OpsItem. You must have permission in Identity and Access Management (IAM) to create a new OpsItem. For more information, see Getting started with OpsCenter in the Amazon Web Services Systems Manager User Guide.

Operations engineers and IT professionals use Amazon Web Services Systems Manager OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their Amazon Web Services resources. For more information, see Amazon Web Services Systems Manager OpsCenter in the Amazon Web Services Systems Manager User Guide.

" }, "CreateOpsMetadata":{ "name":"CreateOpsMetadata", @@ -200,7 +200,7 @@ {"shape":"OpsMetadataLimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

If you create a new application in Application Manager, Systems Manager calls this API action to specify information about the new application, including the application type.

" + "documentation":"

If you create a new application in Application Manager, Amazon Web Services Systems Manager calls this API operation to specify information about the new application, including the application type.

" }, "CreatePatchBaseline":{ "name":"CreatePatchBaseline", @@ -215,7 +215,7 @@ {"shape":"ResourceLimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a patch baseline.

For information about valid key and value pairs in PatchFilters for each supported operating system type, see PatchFilter.

" + "documentation":"

Creates a patch baseline.

For information about valid key-value pairs in PatchFilters for each supported operating system type, see PatchFilter.

" }, "CreateResourceDataSync":{ "name":"CreateResourceDataSync", @@ -231,7 +231,7 @@ {"shape":"ResourceDataSyncAlreadyExistsException"}, {"shape":"ResourceDataSyncInvalidConfigurationException"} ], - "documentation":"

A resource data sync helps you view data from multiple sources in a single location. Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource.

You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple AWS Regions to a single S3 bucket. For more information, see Configuring Resource Data Sync for Inventory in the AWS Systems Manager User Guide.

You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple AWS Regions to a single S3 bucket. This type can synchronize OpsItems and OpsData from multiple AWS accounts and Regions or EntireOrganization by using AWS Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the AWS Systems Manager User Guide.

A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync.

By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy.

" + "documentation":"

A resource data sync helps you view data from multiple sources in a single location. Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource.

You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Configuring resource data sync for Inventory in the Amazon Web Services Systems Manager User Guide.

You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization by using Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the Amazon Web Services Systems Manager User Guide.

A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync.

By default, data isn't encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy.

" }, "DeleteActivation":{ "name":"DeleteActivation", @@ -247,7 +247,7 @@ {"shape":"InternalServerError"}, {"shape":"TooManyUpdates"} ], - "documentation":"

Deletes an activation. You are not required to delete an activation. If you delete an activation, you can no longer use it to register additional managed instances. Deleting an activation does not de-register managed instances. You must manually de-register managed instances.

" + "documentation":"

Deletes an activation. You aren't required to delete an activation. If you delete an activation, you can no longer use it to register additional managed instances. Deleting an activation doesn't de-register managed instances. You must manually de-register managed instances.

" }, "DeleteAssociation":{ "name":"DeleteAssociation", @@ -264,7 +264,7 @@ {"shape":"InvalidInstanceId"}, {"shape":"TooManyUpdates"} ], - "documentation":"

Disassociates the specified Systems Manager document from the specified instance.

When you disassociate a document from an instance, it does not change the configuration of the instance. To change the configuration state of an instance after you disassociate a document, you must create a new document with the desired configuration and associate it with the instance.

" + "documentation":"

Disassociates the specified Amazon Web Services Systems Manager document (SSM document) from the specified instance. If you created the association by using the Targets parameter, then you must delete the association by using the association ID.

When you disassociate a document from an instance, it doesn't change the configuration of the instance. To change the configuration state of an instance after you disassociate a document, you must create a new document with the desired configuration and associate it with the instance.

" }, "DeleteDocument":{ "name":"DeleteDocument", @@ -280,7 +280,7 @@ {"shape":"InvalidDocumentOperation"}, {"shape":"AssociatedInstances"} ], - "documentation":"

Deletes the Systems Manager document and all instance associations to the document.

Before you delete the document, we recommend that you use DeleteAssociation to disassociate all instances that are associated with the document.

" + "documentation":"

Deletes the Amazon Web Services Systems Manager document (SSM document) and all instance associations to the document.

Before you delete the document, we recommend that you use DeleteAssociation to disassociate all instances that are associated with the document.

" }, "DeleteInventory":{ "name":"DeleteInventory", @@ -339,7 +339,7 @@ {"shape":"InternalServerError"}, {"shape":"ParameterNotFound"} ], - "documentation":"

Delete a parameter from the system.

" + "documentation":"

Delete a parameter from the system. After deleting a parameter, wait for at least 30 seconds to create a parameter with the same name.

" }, "DeleteParameters":{ "name":"DeleteParameters", @@ -352,7 +352,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Delete a list of parameters.

" + "documentation":"

Delete a list of parameters. After deleting a parameter, wait for at least 30 seconds to create a parameter with the same name.

" }, "DeletePatchBaseline":{ "name":"DeletePatchBaseline", @@ -381,7 +381,7 @@ {"shape":"ResourceDataSyncNotFoundException"}, {"shape":"ResourceDataSyncInvalidConfigurationException"} ], - "documentation":"

Deletes a Resource Data Sync configuration. After the configuration is deleted, changes to data on managed instances are no longer synced to or from the target. Deleting a sync configuration does not delete data.

" + "documentation":"

Deletes a resource data sync configuration. After the configuration is deleted, changes to data on managed instances are no longer synced to or from the target. Deleting a sync configuration doesn't delete data.

" }, "DeregisterManagedInstance":{ "name":"DeregisterManagedInstance", @@ -453,7 +453,7 @@ {"shape":"InvalidNextToken"}, {"shape":"InternalServerError"} ], - "documentation":"

Describes details about the activation, such as the date and time the activation was created, its expiration date, the IAM role assigned to the instances in the activation, and the number of instances registered by using this activation.

" + "documentation":"

Describes details about the activation, such as the date and time the activation was created, its expiration date, the Identity and Access Management (IAM) role assigned to the instances in the activation, and the number of instances registered by using this activation.

" }, "DescribeAssociation":{ "name":"DescribeAssociation", @@ -470,7 +470,7 @@ {"shape":"InvalidDocument"}, {"shape":"InvalidInstanceId"} ], - "documentation":"

Describes the association for the specified target or instance. If you created the association by using the Targets parameter, then you must retrieve the association by using the association ID. If you created the association by specifying an instance ID and a Systems Manager document, then you retrieve the association by specifying the document name and the instance ID.

" + "documentation":"

Describes the association for the specified target or instance. If you created the association by using the Targets parameter, then you must retrieve the association by using the association ID.

" }, "DescribeAssociationExecutionTargets":{ "name":"DescribeAssociationExecutionTargets", @@ -486,7 +486,7 @@ {"shape":"InvalidNextToken"}, {"shape":"AssociationExecutionDoesNotExist"} ], - "documentation":"

Use this API action to view information about a specific execution of a specific association.

" + "documentation":"

Views information about a specific execution of a specific association.

" }, "DescribeAssociationExecutions":{ "name":"DescribeAssociationExecutions", @@ -501,7 +501,7 @@ {"shape":"AssociationDoesNotExist"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Use this API action to view all executions for a specific association ID.

" + "documentation":"

Views all executions for a specific association ID.

" }, "DescribeAutomationExecutions":{ "name":"DescribeAutomationExecutions", @@ -562,7 +562,7 @@ {"shape":"InvalidDocument"}, {"shape":"InvalidDocumentVersion"} ], - "documentation":"

Describes the specified Systems Manager document.

" + "documentation":"

Describes the specified Amazon Web Services Systems Manager document (SSM document).

" }, "DescribeDocumentPermission":{ "name":"DescribeDocumentPermission", @@ -579,7 +579,7 @@ {"shape":"InvalidPermissionType"}, {"shape":"InvalidDocumentOperation"} ], - "documentation":"

Describes the permissions for a Systems Manager document. If you created the document, you are the owner. If a document is shared, it can either be shared privately (by specifying a user's AWS account ID) or publicly (All).

" + "documentation":"

Describes the permissions for a Amazon Web Services Systems Manager document (SSM document). If you created the document, you are the owner. If a document is shared, it can either be shared privately (by specifying a user's Amazon Web Services account ID) or publicly (All).

" }, "DescribeEffectiveInstanceAssociations":{ "name":"DescribeEffectiveInstanceAssociations", @@ -610,7 +610,7 @@ {"shape":"UnsupportedOperatingSystem"}, {"shape":"InternalServerError"} ], - "documentation":"

Retrieves the current effective patches (the patch and the approval state) for the specified patch baseline. Note that this API applies only to Windows patch baselines.

" + "documentation":"

Retrieves the current effective patches (the patch and the approval state) for the specified patch baseline. Applies to patch baselines for Windows only.

" }, "DescribeInstanceAssociationsStatus":{ "name":"DescribeInstanceAssociationsStatus", @@ -642,7 +642,7 @@ {"shape":"InvalidInstanceInformationFilterValue"}, {"shape":"InvalidFilterKey"} ], - "documentation":"

Describes one or more of your instances, including information about the operating system platform, the version of SSM Agent installed on the instance, instance status, and so on.

If you specify one or more instance IDs, it returns information for those instances. If you do not specify instance IDs, it returns information for all your instances. If you specify an instance ID that is not valid or an instance that you do not own, you receive an error.

The IamRole field for this API action is the Amazon Identity and Access Management (IAM) role assigned to on-premises instances. This call does not return the IAM role for EC2 instances.

" + "documentation":"

Describes one or more of your instances, including information about the operating system platform, the version of SSM Agent installed on the instance, instance status, and so on.

If you specify one or more instance IDs, it returns information for those instances. If you don't specify instance IDs, it returns information for all your instances. If you specify an instance ID that isn't valid or an instance that you don't own, you receive an error.

The IamRole field for this API operation is the Identity and Access Management (IAM) role assigned to on-premises instances. This call doesn't return the IAM role for EC2 instances.

" }, "DescribeInstancePatchStates":{ "name":"DescribeInstancePatchStates", @@ -785,7 +785,7 @@ {"shape":"DoesNotExistException"}, {"shape":"InternalServerError"} ], - "documentation":"

Lists the tasks in a maintenance window.

For maintenance window tasks without a specified target, you cannot supply values for --max-errors and --max-concurrency. Instead, the system inserts a placeholder value of 1, which may be reported in the response to this command. These values do not affect the running of your task and can be ignored.

" + "documentation":"

Lists the tasks in a maintenance window.

For maintenance window tasks without a specified target, you can't supply values for --max-errors and --max-concurrency. Instead, the system inserts a placeholder value of 1, which may be reported in the response to this command. These values don't affect the running of your task and can be ignored.

" }, "DescribeMaintenanceWindows":{ "name":"DescribeMaintenanceWindows", @@ -798,7 +798,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Retrieves the maintenance windows in an AWS account.

" + "documentation":"

Retrieves the maintenance windows in an Amazon Web Services account.

" }, "DescribeMaintenanceWindowsForTarget":{ "name":"DescribeMaintenanceWindowsForTarget", @@ -824,7 +824,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Query a set of OpsItems. You must have permission in AWS Identity and Access Management (IAM) to query a list of OpsItems. For more information, see Getting started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" + "documentation":"

Query a set of OpsItems. You must have permission in Identity and Access Management (IAM) to query a list of OpsItems. For more information, see Getting started with OpsCenter in the Amazon Web Services Systems Manager User Guide.

Operations engineers and IT professionals use Amazon Web Services Systems Manager OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their Amazon Web Services resources. For more information, see OpsCenter in the Amazon Web Services Systems Manager User Guide.

" }, "DescribeParameters":{ "name":"DescribeParameters", @@ -841,7 +841,7 @@ {"shape":"InvalidFilterValue"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Get information about a parameter.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

" + "documentation":"

Get information about a parameter.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

" }, "DescribePatchBaselines":{ "name":"DescribePatchBaselines", @@ -854,7 +854,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Lists the patch baselines in your AWS account.

" + "documentation":"

Lists the patch baselines in your Amazon Web Services account.

" }, "DescribePatchGroupState":{ "name":"DescribePatchGroupState", @@ -868,7 +868,7 @@ {"shape":"InternalServerError"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Returns high-level aggregated patch compliance state for a patch group.

" + "documentation":"

Returns high-level aggregated patch compliance state information for a patch group.

" }, "DescribePatchGroups":{ "name":"DescribePatchGroups", @@ -894,7 +894,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Lists the properties of available patches organized by product, product family, classification, severity, and other properties of available patches. You can use the reported properties in the filters you specify in requests for actions such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines.

The following section lists the properties that can be used in filters for each major operating system type:

AMAZON_LINUX

Valid properties: PRODUCT, CLASSIFICATION, SEVERITY

AMAZON_LINUX_2

Valid properties: PRODUCT, CLASSIFICATION, SEVERITY

CENTOS

Valid properties: PRODUCT, CLASSIFICATION, SEVERITY

DEBIAN

Valid properties: PRODUCT, PRIORITY

MACOS

Valid properties: PRODUCT, CLASSIFICATION

ORACLE_LINUX

Valid properties: PRODUCT, CLASSIFICATION, SEVERITY

REDHAT_ENTERPRISE_LINUX

Valid properties: PRODUCT, CLASSIFICATION, SEVERITY

SUSE

Valid properties: PRODUCT, CLASSIFICATION, SEVERITY

UBUNTU

Valid properties: PRODUCT, PRIORITY

WINDOWS

Valid properties: PRODUCT, PRODUCT_FAMILY, CLASSIFICATION, MSRC_SEVERITY

" + "documentation":"

Lists the properties of available patches organized by product, product family, classification, severity, and other properties of available patches. You can use the reported properties in the filters you specify in requests for operations such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines.

The following section lists the properties that can be used in filters for each major operating system type:

AMAZON_LINUX

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

AMAZON_LINUX_2

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

CENTOS

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

DEBIAN

Valid properties: PRODUCT | PRIORITY

MACOS

Valid properties: PRODUCT | CLASSIFICATION

ORACLE_LINUX

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

REDHAT_ENTERPRISE_LINUX

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

SUSE

Valid properties: PRODUCT | CLASSIFICATION | SEVERITY

UBUNTU

Valid properties: PRODUCT | PRIORITY

WINDOWS

Valid properties: PRODUCT | PRODUCT_FAMILY | CLASSIFICATION | MSRC_SEVERITY

" }, "DescribeSessions":{ "name":"DescribeSessions", @@ -925,7 +925,7 @@ {"shape":"OpsItemNotFoundException"}, {"shape":"OpsItemInvalidParameterException"} ], - "documentation":"

Deletes the association between an OpsItem and a related resource. For example, this API action can delete an Incident Manager incident from an OpsItem. Incident Manager is a capability of AWS Systems Manager.

" + "documentation":"

Deletes the association between an OpsItem and a related resource. For example, this API operation can delete an Incident Manager incident from an OpsItem. Incident Manager is a capability of Amazon Web Services Systems Manager.

" }, "GetAutomationExecution":{ "name":"GetAutomationExecution", @@ -955,7 +955,7 @@ {"shape":"InvalidDocumentType"}, {"shape":"UnsupportedCalendarException"} ], - "documentation":"

Gets the state of the AWS Systems Manager Change Calendar at an optional, specified time. If you specify a time, GetCalendarState returns the state of the calendar at a specific time, and returns the next time that the Change Calendar state will transition. If you do not specify a time, GetCalendarState assumes the current time. Change Calendar entries have two possible states: OPEN or CLOSED.

If you specify more than one calendar in a request, the command returns the status of OPEN only if all calendars in the request are open. If one or more calendars in the request are closed, the status returned is CLOSED.

For more information about Systems Manager Change Calendar, see AWS Systems Manager Change Calendar in the AWS Systems Manager User Guide.

" + "documentation":"

Gets the state of a Amazon Web Services Systems Manager change calendar at the current time or a specified time. If you specify a time, GetCalendarState returns the state of the calendar at that specific time, and returns the next time that the change calendar state will transition. If you don't specify a time, GetCalendarState uses the current time. Change Calendar entries have two possible states: OPEN or CLOSED.

If you specify more than one calendar in a request, the command returns the status of OPEN only if all calendars in the request are open. If one or more calendars in the request are closed, the status returned is CLOSED.

For more information about Change Calendar, a capability of Amazon Web Services Systems Manager, see Amazon Web Services Systems Manager Change Calendar in the Amazon Web Services Systems Manager User Guide.

" }, "GetCommandInvocation":{ "name":"GetCommandInvocation", @@ -998,7 +998,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Retrieves the default patch baseline. Note that Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.

If you do not specify an operating system value, the default patch baseline for Windows is returned.

" + "documentation":"

Retrieves the default patch baseline. Amazon Web Services Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.

If you don't specify an operating system value, the default patch baseline for Windows is returned.

" }, "GetDeployablePatchSnapshotForInstance":{ "name":"GetDeployablePatchSnapshotForInstance", @@ -1013,7 +1013,7 @@ {"shape":"UnsupportedOperatingSystem"}, {"shape":"UnsupportedFeatureRequiredException"} ], - "documentation":"

Retrieves the current snapshot for the patch baseline the instance uses. This API is primarily used by the AWS-RunPatchBaseline Systems Manager document.

" + "documentation":"

Retrieves the current snapshot for the patch baseline the instance uses. This API is primarily used by the AWS-RunPatchBaseline Systems Manager document (SSM document).

If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a capability of Amazon Web Services Systems Manager, with an SSM document that enables you to target an instance with a script or command. For example, run the command using the AWS-RunShellScript document or the AWS-RunPowerShellScript document.

" }, "GetDocument":{ "name":"GetDocument", @@ -1028,7 +1028,7 @@ {"shape":"InvalidDocument"}, {"shape":"InvalidDocumentVersion"} ], - "documentation":"

Gets the contents of the specified Systems Manager document.

" + "documentation":"

Gets the contents of the specified Amazon Web Services Systems Manager document (SSM document).

" }, "GetInventory":{ "name":"GetInventory", @@ -1047,7 +1047,7 @@ {"shape":"InvalidAggregatorException"}, {"shape":"InvalidResultAttributeException"} ], - "documentation":"

Query inventory information.

" + "documentation":"

Query inventory information. This includes instance status, such as Stopped or Terminated.

" }, "GetInventorySchema":{ "name":"GetInventorySchema", @@ -1132,7 +1132,7 @@ {"shape":"DoesNotExistException"}, {"shape":"InternalServerError"} ], - "documentation":"

Lists the tasks in a maintenance window.

For maintenance window tasks without a specified target, you cannot supply values for --max-errors and --max-concurrency. Instead, the system inserts a placeholder value of 1, which may be reported in the response to this command. These values do not affect the running of your task and can be ignored.

" + "documentation":"

Lists the tasks in a maintenance window.

For maintenance window tasks without a specified target, you can't supply values for --max-errors and --max-concurrency. Instead, the system inserts a placeholder value of 1, which may be reported in the response to this command. These values don't affect the running of your task and can be ignored.

" }, "GetOpsItem":{ "name":"GetOpsItem", @@ -1146,7 +1146,7 @@ {"shape":"InternalServerError"}, {"shape":"OpsItemNotFoundException"} ], - "documentation":"

Get information about an OpsItem by using the ID. You must have permission in AWS Identity and Access Management (IAM) to view information about an OpsItem. For more information, see Getting started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" + "documentation":"

Get information about an OpsItem by using the ID. You must have permission in Identity and Access Management (IAM) to view information about an OpsItem. For more information, see Getting started with OpsCenter in the Amazon Web Services Systems Manager User Guide.

Operations engineers and IT professionals use Amazon Web Services Systems Manager OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their Amazon Web Services resources. For more information, see OpsCenter in the Amazon Web Services Systems Manager User Guide.

" }, "GetOpsMetadata":{ "name":"GetOpsMetadata", @@ -1179,7 +1179,7 @@ {"shape":"InvalidTypeNameException"}, {"shape":"InvalidAggregatorException"} ], - "documentation":"

View a summary of OpsItems based on specified filters and aggregators.

" + "documentation":"

View a summary of operations metadata (OpsData) based on specified filters and aggregators. OpsData can include information about Amazon Web Services Systems Manager OpsCenter operational workitems (OpsItems) as well as information about any Amazon Web Services resource or service configured to report OpsData to Amazon Web Services Systems Manager Explorer.

" }, "GetParameter":{ "name":"GetParameter", @@ -1195,7 +1195,7 @@ {"shape":"ParameterNotFound"}, {"shape":"ParameterVersionNotFound"} ], - "documentation":"

Get information about a parameter by using the parameter name. Don't confuse this API action with the GetParameters API action.

" + "documentation":"

Get information about a single parameter by specifying the parameter name.

To get information about more than one parameter at a time, use the GetParameters operation.

" }, "GetParameterHistory":{ "name":"GetParameterHistory", @@ -1225,7 +1225,7 @@ {"shape":"InvalidKeyId"}, {"shape":"InternalServerError"} ], - "documentation":"

Get details of a parameter. Don't confuse this API action with the GetParameter API action.

" + "documentation":"

Get information about one or more parameters by specifying multiple parameter names.

To get information about a single parameter, you can use the GetParameter operation instead.

" }, "GetParametersByPath":{ "name":"GetParametersByPath", @@ -1243,7 +1243,7 @@ {"shape":"InvalidKeyId"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Retrieve information about one or more parameters in a specific hierarchy.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

" + "documentation":"

Retrieve information about one or more parameters in a specific hierarchy.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

" }, "GetPatchBaseline":{ "name":"GetPatchBaseline", @@ -1285,7 +1285,7 @@ {"shape":"InternalServerError"}, {"shape":"ServiceSettingNotFound"} ], - "documentation":"

ServiceSetting is an account-level setting for an AWS service. This setting defines how a user interacts with or uses a service or a feature of a service. For example, if an AWS service charges money to the account based on feature or service usage, then the AWS service team might create a default setting of \"false\". This means the user can't use this feature unless they change the setting to \"true\" and intentionally opt in for a paid feature.

Services map a SettingId object to a setting value. AWS services teams define the default value for a SettingId. You can't create a new SettingId, but you can overwrite the default value if you have the ssm:UpdateServiceSetting permission for the setting. Use the UpdateServiceSetting API action to change the default setting. Or use the ResetServiceSetting to change the value back to the original value defined by the AWS service team.

Query the current service setting for the account.

" + "documentation":"

ServiceSetting is an account-level setting for an Amazon Web Services service. This setting defines how a user interacts with or uses a service or a feature of a service. For example, if an Amazon Web Services service charges money to the account based on feature or service usage, then the Amazon Web Services service team might create a default setting of false. This means the user can't use this feature unless they change the setting to true and intentionally opt in for a paid feature.

Services map a SettingId object to a setting value. Amazon Web Services services teams define the default value for a SettingId. You can't create a new SettingId, but you can overwrite the default value if you have the ssm:UpdateServiceSetting permission for the setting. Use the UpdateServiceSetting API operation to change the default setting. Or use the ResetServiceSetting to change the value back to the original value defined by the Amazon Web Services service team.

Query the current service setting for the Amazon Web Services account.

" }, "LabelParameterVersion":{ "name":"LabelParameterVersion", @@ -1302,7 +1302,7 @@ {"shape":"ParameterVersionNotFound"}, {"shape":"ParameterVersionLabelLimitExceeded"} ], - "documentation":"

A parameter label is a user-defined alias to help you manage different versions of a parameter. When you modify a parameter, Systems Manager automatically saves a new version and increments the version number by one. A label can help you remember the purpose of a parameter when there are multiple versions.

Parameter labels have the following requirements and restrictions.

  • A version of a parameter can have a maximum of 10 labels.

  • You can't attach the same label to different versions of the same parameter. For example, if version 1 has the label Production, then you can't attach Production to version 2.

  • You can move a label from one version of a parameter to another.

  • You can't create a label when you create a new parameter. You must attach a label to a specific version of a parameter.

  • If you no longer want to use a parameter label, then you can either delete it or move it to a different version of a parameter.

  • A label can have a maximum of 100 characters.

  • Labels can contain letters (case sensitive), numbers, periods (.), hyphens (-), or underscores (_).

  • Labels can't begin with a number, \"aws,\" or \"ssm\" (not case sensitive). If a label fails to meet these requirements, then the label is not associated with a parameter and the system displays it in the list of InvalidLabels.

" + "documentation":"

A parameter label is a user-defined alias to help you manage different versions of a parameter. When you modify a parameter, Amazon Web Services Systems Manager automatically saves a new version and increments the version number by one. A label can help you remember the purpose of a parameter when there are multiple versions.

Parameter labels have the following requirements and restrictions.

  • A version of a parameter can have a maximum of 10 labels.

  • You can't attach the same label to different versions of the same parameter. For example, if version 1 has the label Production, then you can't attach Production to version 2.

  • You can move a label from one version of a parameter to another.

  • You can't create a label when you create a new parameter. You must attach a label to a specific version of a parameter.

  • If you no longer want to use a parameter label, then you can either delete it or move it to a different version of a parameter.

  • A label can have a maximum of 100 characters.

  • Labels can contain letters (case sensitive), numbers, periods (.), hyphens (-), or underscores (_).

  • Labels can't begin with a number, \"aws\" or \"ssm\" (not case sensitive). If a label fails to meet these requirements, then the label isn't associated with a parameter and the system displays it in the list of InvalidLabels.

" }, "ListAssociationVersions":{ "name":"ListAssociationVersions", @@ -1331,7 +1331,7 @@ {"shape":"InternalServerError"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Returns all State Manager associations in the current AWS account and Region. You can limit the results to a specific State Manager association document or instance by specifying a filter.

" + "documentation":"

Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You can limit the results to a specific State Manager association document or instance by specifying a filter. State Manager is a capability of Amazon Web Services Systems Manager.

" }, "ListCommandInvocations":{ "name":"ListCommandInvocations", @@ -1348,7 +1348,7 @@ {"shape":"InvalidFilterKey"}, {"shape":"InvalidNextToken"} ], - "documentation":"

An invocation is copy of a command sent to a specific instance. A command can apply to one or more instances. A command invocation applies to one instance. For example, if a user runs SendCommand against three instances, then a command invocation is created for each requested instance ID. ListCommandInvocations provide status about command execution.

" + "documentation":"

An invocation is copy of a command sent to a specific instance. A command can apply to one or more instances. A command invocation applies to one instance. For example, if a user runs SendCommand against three instances, then a command invocation is created for each requested instance ID. ListCommandInvocations provide status about command execution.

" }, "ListCommands":{ "name":"ListCommands", @@ -1365,7 +1365,7 @@ {"shape":"InvalidFilterKey"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Lists the commands requested by users of the AWS account.

" + "documentation":"

Lists the commands requested by users of the Amazon Web Services account.

" }, "ListComplianceItems":{ "name":"ListComplianceItems", @@ -1382,7 +1382,7 @@ {"shape":"InvalidFilter"}, {"shape":"InvalidNextToken"} ], - "documentation":"

For a specified resource ID, this API action returns a list of compliance statuses for different resource types. Currently, you can only specify one resource ID per call. List results depend on the criteria specified in the filter.

" + "documentation":"

For a specified resource ID, this API operation returns a list of compliance statuses for different resource types. Currently, you can only specify one resource ID per call. List results depend on the criteria specified in the filter.

" }, "ListComplianceSummaries":{ "name":"ListComplianceSummaries", @@ -1413,7 +1413,7 @@ {"shape":"InvalidDocumentVersion"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Information about approval reviews for a version of an SSM document.

" + "documentation":"

Information about approval reviews for a version of a change template in Change Manager.

" }, "ListDocumentVersions":{ "name":"ListDocumentVersions", @@ -1443,7 +1443,7 @@ {"shape":"InvalidNextToken"}, {"shape":"InvalidFilterKey"} ], - "documentation":"

Returns all Systems Manager (SSM) documents in the current AWS account and Region. You can limit the results of this request by using a filter.

" + "documentation":"

Returns all Systems Manager (SSM) documents in the current Amazon Web Services account and Amazon Web Services Region. You can limit the results of this request by using a filter.

" }, "ListInventoryEntries":{ "name":"ListInventoryEntries", @@ -1476,7 +1476,7 @@ {"shape":"OpsItemLimitExceededException"}, {"shape":"OpsItemInvalidParameterException"} ], - "documentation":"

Returns a list of all OpsItem events in the current AWS account and Region. You can limit the results to events associated with specific OpsItems by specifying a filter.

" + "documentation":"

Returns a list of all OpsItem events in the current Amazon Web Services Region and Amazon Web Services account. You can limit the results to events associated with specific OpsItems by specifying a filter.

" }, "ListOpsItemRelatedItems":{ "name":"ListOpsItemRelatedItems", @@ -1504,7 +1504,7 @@ {"shape":"OpsMetadataInvalidArgumentException"}, {"shape":"InternalServerError"} ], - "documentation":"

Systems Manager calls this API action when displaying all Application Manager OpsMetadata objects or blobs.

" + "documentation":"

Amazon Web Services Systems Manager calls this API operation when displaying all Application Manager OpsMetadata objects or blobs.

" }, "ListResourceComplianceSummaries":{ "name":"ListResourceComplianceSummaries", @@ -1549,7 +1549,7 @@ {"shape":"InvalidResourceId"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns a list of the tags assigned to the specified resource.

" + "documentation":"

Returns a list of the tags assigned to the specified resource.

For information about the ID format for each supported resource type, see AddTagsToResource.

" }, "ModifyDocumentPermission":{ "name":"ModifyDocumentPermission", @@ -1566,7 +1566,7 @@ {"shape":"DocumentPermissionLimit"}, {"shape":"DocumentLimitExceeded"} ], - "documentation":"

Shares a Systems Manager document publicly or privately. If you share a document privately, you must specify the AWS user account IDs for those people who can use the document. If you share a document publicly, you must specify All as the account ID.

" + "documentation":"

Shares a Amazon Web Services Systems Manager document (SSM document)publicly or privately. If you share a document privately, you must specify the Amazon Web Services user account IDs for those people who can use the document. If you share a document publicly, you must specify All as the account ID.

" }, "PutComplianceItems":{ "name":"PutComplianceItems", @@ -1585,7 +1585,7 @@ {"shape":"InvalidResourceType"}, {"shape":"InvalidResourceId"} ], - "documentation":"

Registers a compliance type and other compliance details on a designated resource. This action lets you register custom compliance details with a resource. This call overwrites existing compliance information on the resource, so you must provide a full list of compliance items each time that you send the request.

ComplianceType can be one of the following:

  • ExecutionId: The execution ID when the patch, association, or custom compliance item was applied.

  • ExecutionType: Specify patch, association, or Custom:string.

  • ExecutionTime. The time the patch, association, or custom compliance item was applied to the instance.

  • Id: The patch, association, or custom compliance ID.

  • Title: A title.

  • Status: The status of the compliance item. For example, approved for patches, or Failed for associations.

  • Severity: A patch severity. For example, critical.

  • DocumentName: A SSM document name. For example, AWS-RunPatchBaseline.

  • DocumentVersion: An SSM document version number. For example, 4.

  • Classification: A patch classification. For example, security updates.

  • PatchBaselineId: A patch baseline ID.

  • PatchSeverity: A patch severity. For example, Critical.

  • PatchState: A patch state. For example, InstancesWithFailedPatches.

  • PatchGroup: The name of a patch group.

  • InstalledTime: The time the association, patch, or custom compliance item was applied to the resource. Specify the time by using the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

" + "documentation":"

Registers a compliance type and other compliance details on a designated resource. This operation lets you register custom compliance details with a resource. This call overwrites existing compliance information on the resource, so you must provide a full list of compliance items each time that you send the request.

ComplianceType can be one of the following:

  • ExecutionId: The execution ID when the patch, association, or custom compliance item was applied.

  • ExecutionType: Specify patch, association, or Custom:string.

  • ExecutionTime. The time the patch, association, or custom compliance item was applied to the instance.

  • Id: The patch, association, or custom compliance ID.

  • Title: A title.

  • Status: The status of the compliance item. For example, approved for patches, or Failed for associations.

  • Severity: A patch severity. For example, critical.

  • DocumentName: An SSM document name. For example, AWS-RunPatchBaseline.

  • DocumentVersion: An SSM document version number. For example, 4.

  • Classification: A patch classification. For example, security updates.

  • PatchBaselineId: A patch baseline ID.

  • PatchSeverity: A patch severity. For example, Critical.

  • PatchState: A patch state. For example, InstancesWithFailedPatches.

  • PatchGroup: The name of a patch group.

  • InstalledTime: The time the association, patch, or custom compliance item was applied to the resource. Specify the time by using the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

" }, "PutInventory":{ "name":"PutInventory", @@ -1651,7 +1651,7 @@ {"shape":"DoesNotExistException"}, {"shape":"InternalServerError"} ], - "documentation":"

Defines the default patch baseline for the relevant operating system.

To reset the AWS predefined patch baseline as the default, specify the full patch baseline ARN as the baseline ID value. For example, for CentOS, specify arn:aws:ssm:us-east-2:733109147000:patchbaseline/pb-0574b43a65ea646ed instead of pb-0574b43a65ea646ed.

" + "documentation":"

Defines the default patch baseline for the relevant operating system.

To reset the Amazon Web Services-predefined patch baseline as the default, specify the full patch baseline Amazon Resource Name (ARN) as the baseline ID value. For example, for CentOS, specify arn:aws:ssm:us-east-2:733109147000:patchbaseline/pb-0574b43a65ea646ed instead of pb-0574b43a65ea646ed.

" }, "RegisterPatchBaselineForPatchGroup":{ "name":"RegisterPatchBaselineForPatchGroup", @@ -1732,7 +1732,7 @@ {"shape":"ServiceSettingNotFound"}, {"shape":"TooManyUpdates"} ], - "documentation":"

ServiceSetting is an account-level setting for an AWS service. This setting defines how a user interacts with or uses a service or a feature of a service. For example, if an AWS service charges money to the account based on feature or service usage, then the AWS service team might create a default setting of \"false\". This means the user can't use this feature unless they change the setting to \"true\" and intentionally opt in for a paid feature.

Services map a SettingId object to a setting value. AWS services teams define the default value for a SettingId. You can't create a new SettingId, but you can overwrite the default value if you have the ssm:UpdateServiceSetting permission for the setting. Use the GetServiceSetting API action to view the current value. Use the UpdateServiceSetting API action to change the default setting.

Reset the service setting for the account to the default value as provisioned by the AWS service team.

" + "documentation":"

ServiceSetting is an account-level setting for an Amazon Web Services service. This setting defines how a user interacts with or uses a service or a feature of a service. For example, if an Amazon Web Services service charges money to the account based on feature or service usage, then the Amazon Web Services service team might create a default setting of \"false\". This means the user can't use this feature unless they change the setting to \"true\" and intentionally opt in for a paid feature.

Services map a SettingId object to a setting value. Amazon Web Services services teams define the default value for a SettingId. You can't create a new SettingId, but you can overwrite the default value if you have the ssm:UpdateServiceSetting permission for the setting. Use the GetServiceSetting API operation to view the current value. Use the UpdateServiceSetting API operation to change the default setting.

Reset the service setting for the account to the default value as provisioned by the Amazon Web Services service team.

" }, "ResumeSession":{ "name":"ResumeSession", @@ -1746,7 +1746,7 @@ {"shape":"DoesNotExistException"}, {"shape":"InternalServerError"} ], - "documentation":"

Reconnects a session to an instance after it has been disconnected. Connections can be resumed for disconnected sessions, but not terminated sessions.

This command is primarily for use by client machines to automatically reconnect during intermittent network issues. It is not intended for any other use.

" + "documentation":"

Reconnects a session to an instance after it has been disconnected. Connections can be resumed for disconnected sessions, but not terminated sessions.

This command is primarily for use by client machines to automatically reconnect during intermittent network issues. It isn't intended for any other use.

" }, "SendAutomationSignal":{ "name":"SendAutomationSignal", @@ -1799,7 +1799,7 @@ {"shape":"InvalidAssociation"}, {"shape":"AssociationDoesNotExist"} ], - "documentation":"

Use this API action to run an association immediately and only one time. This action can be helpful when troubleshooting associations.

" + "documentation":"

Runs an association immediately and only one time. This operation can be helpful when troubleshooting associations.

" }, "StartAutomationExecution":{ "name":"StartAutomationExecution", @@ -1818,7 +1818,7 @@ {"shape":"InvalidTarget"}, {"shape":"InternalServerError"} ], - "documentation":"

Initiates execution of an Automation document.

" + "documentation":"

Initiates execution of an Automation runbook.

" }, "StartChangeRequestExecution":{ "name":"StartChangeRequestExecution", @@ -1837,7 +1837,7 @@ {"shape":"InternalServerError"}, {"shape":"AutomationDefinitionNotApprovedException"} ], - "documentation":"

Creates a change request for Change Manager. The runbooks (Automation documents) specified in the change request run only after all required approvals for the change request have been received.

" + "documentation":"

Creates a change request for Change Manager. The Automation runbooks specified in the change request run only after all required approvals for the change request have been received.

" }, "StartSession":{ "name":"StartSession", @@ -1852,7 +1852,7 @@ {"shape":"TargetNotConnected"}, {"shape":"InternalServerError"} ], - "documentation":"

Initiates a connection to a target (for example, an instance) for a Session Manager session. Returns a URL and token that can be used to open a WebSocket connection for sending input and receiving outputs.

AWS CLI usage: start-session is an interactive command that requires the Session Manager plugin to be installed on the client machine making the call. For information, see Install the Session Manager plugin for the AWS CLI in the AWS Systems Manager User Guide.

AWS Tools for PowerShell usage: Start-SSMSession is not currently supported by AWS Tools for PowerShell on Windows local machines.

" + "documentation":"

Initiates a connection to a target (for example, an instance) for a Session Manager session. Returns a URL and token that can be used to open a WebSocket connection for sending input and receiving outputs.

Amazon Web Services CLI usage: start-session is an interactive command that requires the Session Manager plugin to be installed on the client machine making the call. For information, see Install the Session Manager plugin for the Amazon Web Services CLI in the Amazon Web Services Systems Manager User Guide.

Amazon Web Services Tools for PowerShell usage: Start-SSMSession isn't currently supported by Amazon Web Services Tools for PowerShell on Windows local machines.

" }, "StopAutomationExecution":{ "name":"StopAutomationExecution", @@ -1881,7 +1881,7 @@ {"shape":"DoesNotExistException"}, {"shape":"InternalServerError"} ], - "documentation":"

Permanently ends a session and closes the data connection between the Session Manager client and SSM Agent on the instance. A terminated session cannot be resumed.

" + "documentation":"

Permanently ends a session and closes the data connection between the Session Manager client and SSM Agent on the instance. A terminated session isn't be resumed.

" }, "UnlabelParameterVersion":{ "name":"UnlabelParameterVersion", @@ -1921,7 +1921,7 @@ {"shape":"InvalidAssociationVersion"}, {"shape":"AssociationVersionLimitExceeded"} ], - "documentation":"

Updates an association. You can update the association name and version, the document version, schedule, parameters, and Amazon S3 output.

In order to call this API action, your IAM user account, group, or role must be configured with permission to call the DescribeAssociation API action. If you don't have permission to call DescribeAssociation, then you receive the following error: An error occurred (AccessDeniedException) when calling the UpdateAssociation operation: User: <user_arn> is not authorized to perform: ssm:DescribeAssociation on resource: <resource_arn>

When you update an association, the association immediately runs against the specified targets.

" + "documentation":"

Updates an association. You can update the association name and version, the document version, schedule, parameters, and Amazon Simple Storage Service (Amazon S3) output.

In order to call this API operation, your Identity and Access Management (IAM) user account, group, or role must be configured with permission to call the DescribeAssociation API operation. If you don't have permission to call DescribeAssociation, then you receive the following error: An error occurred (AccessDeniedException) when calling the UpdateAssociation operation: User: <user_arn> isn't authorized to perform: ssm:DescribeAssociation on resource: <resource_arn>

When you update an association, the association immediately runs against the specified targets.

" }, "UpdateAssociationStatus":{ "name":"UpdateAssociationStatus", @@ -1939,7 +1939,7 @@ {"shape":"StatusUnchanged"}, {"shape":"TooManyUpdates"} ], - "documentation":"

Updates the status of the Systems Manager document associated with the specified instance.

" + "documentation":"

Updates the status of the Amazon Web Services Systems Manager document (SSM document) associated with the specified instance.

UpdateAssociationStatus is primarily used by the Amazon Web Services Systems Manager Agent (SSM Agent) to report status updates about your associations and is only used for associations created with the InstanceId legacy parameter.

" }, "UpdateDocument":{ "name":"UpdateDocument", @@ -1993,7 +1993,7 @@ {"shape":"InvalidDocumentOperation"}, {"shape":"InvalidDocumentVersion"} ], - "documentation":"

Updates information related to approval reviews for a specific version of a document.

" + "documentation":"

Updates information related to approval reviews for a specific version of a change template in Change Manager.

" }, "UpdateMaintenanceWindow":{ "name":"UpdateMaintenanceWindow", @@ -2021,7 +2021,7 @@ {"shape":"DoesNotExistException"}, {"shape":"InternalServerError"} ], - "documentation":"

Modifies the target of an existing maintenance window. You can change the following:

  • Name

  • Description

  • Owner

  • IDs for an ID target

  • Tags for a Tag target

  • From any supported tag type to another. The three supported tag types are ID target, Tag target, and resource group. For more information, see Target.

If a parameter is null, then the corresponding field is not modified.

" + "documentation":"

Modifies the target of an existing maintenance window. You can change the following:

  • Name

  • Description

  • Owner

  • IDs for an ID target

  • Tags for a Tag target

  • From any supported tag type to another. The three supported tag types are ID target, Tag target, and resource group. For more information, see Target.

If a parameter is null, then the corresponding field isn't modified.

" }, "UpdateMaintenanceWindowTask":{ "name":"UpdateMaintenanceWindowTask", @@ -2035,7 +2035,7 @@ {"shape":"DoesNotExistException"}, {"shape":"InternalServerError"} ], - "documentation":"

Modifies a task assigned to a maintenance window. You can't change the task type, but you can change the following values:

  • TaskARN. For example, you can change a RUN_COMMAND task from AWS-RunPowerShellScript to AWS-RunShellScript.

  • ServiceRoleArn

  • TaskInvocationParameters

  • Priority

  • MaxConcurrency

  • MaxErrors

One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, AWS Lambda, and AWS Step Functions). For more information about running tasks that do not specify targets, see Registering maintenance window tasks without targets in the AWS Systems Manager User Guide.

If the value for a parameter in UpdateMaintenanceWindowTask is null, then the corresponding field is not modified. If you set Replace to true, then all fields required by the RegisterTaskWithMaintenanceWindow action are required for this request. Optional fields that aren't specified are set to null.

When you update a maintenance window task that has options specified in TaskInvocationParameters, you must provide again all the TaskInvocationParameters values that you want to retain. The values you do not specify again are removed. For example, suppose that when you registered a Run Command task, you specified TaskInvocationParameters values for Comment, NotificationConfig, and OutputS3BucketName. If you update the maintenance window task and specify only a different OutputS3BucketName value, the values for Comment and NotificationConfig are removed.

" + "documentation":"

Modifies a task assigned to a maintenance window. You can't change the task type, but you can change the following values:

  • TaskARN. For example, you can change a RUN_COMMAND task from AWS-RunPowerShellScript to AWS-RunShellScript.

  • ServiceRoleArn

  • TaskInvocationParameters

  • Priority

  • MaxConcurrency

  • MaxErrors

One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, Lambda, and Step Functions). For more information about running tasks that don't specify targets, see Registering maintenance window tasks without targets in the Amazon Web Services Systems Manager User Guide.

If the value for a parameter in UpdateMaintenanceWindowTask is null, then the corresponding field isn't modified. If you set Replace to true, then all fields required by the RegisterTaskWithMaintenanceWindow operation are required for this request. Optional fields that aren't specified are set to null.

When you update a maintenance window task that has options specified in TaskInvocationParameters, you must provide again all the TaskInvocationParameters values that you want to retain. The values you don't specify again are removed. For example, suppose that when you registered a Run Command task, you specified TaskInvocationParameters values for Comment, NotificationConfig, and OutputS3BucketName. If you update the maintenance window task and specify only a different OutputS3BucketName value, the values for Comment and NotificationConfig are removed.

" }, "UpdateManagedInstanceRole":{ "name":"UpdateManagedInstanceRole", @@ -2049,7 +2049,7 @@ {"shape":"InvalidInstanceId"}, {"shape":"InternalServerError"} ], - "documentation":"

Changes the Amazon Identity and Access Management (IAM) role that is assigned to the on-premises instance or virtual machines (VM). IAM roles are first assigned to these hybrid instances during the activation process. For more information, see CreateActivation.

" + "documentation":"

Changes the Identity and Access Management (IAM) role that is assigned to the on-premises instance or virtual machines (VM). IAM roles are first assigned to these hybrid instances during the activation process. For more information, see CreateActivation.

" }, "UpdateOpsItem":{ "name":"UpdateOpsItem", @@ -2066,7 +2066,7 @@ {"shape":"OpsItemLimitExceededException"}, {"shape":"OpsItemInvalidParameterException"} ], - "documentation":"

Edit or change an OpsItem. You must have permission in AWS Identity and Access Management (IAM) to update an OpsItem. For more information, see Getting started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" + "documentation":"

Edit or change an OpsItem. You must have permission in Identity and Access Management (IAM) to update an OpsItem. For more information, see Getting started with OpsCenter in the Amazon Web Services Systems Manager User Guide.

Operations engineers and IT professionals use Amazon Web Services Systems Manager OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their Amazon Web Services resources. For more information, see OpsCenter in the Amazon Web Services Systems Manager User Guide.

" }, "UpdateOpsMetadata":{ "name":"UpdateOpsMetadata", @@ -2083,7 +2083,7 @@ {"shape":"OpsMetadataTooManyUpdatesException"}, {"shape":"InternalServerError"} ], - "documentation":"

Systems Manager calls this API action when you edit OpsMetadata in Application Manager.

" + "documentation":"

Amazon Web Services Systems Manager calls this API operation when you edit OpsMetadata in Application Manager.

" }, "UpdatePatchBaseline":{ "name":"UpdatePatchBaseline", @@ -2097,7 +2097,7 @@ {"shape":"DoesNotExistException"}, {"shape":"InternalServerError"} ], - "documentation":"

Modifies an existing patch baseline. Fields not specified in the request are left unchanged.

For information about valid key and value pairs in PatchFilters for each supported operating system type, see PatchFilter.

" + "documentation":"

Modifies an existing patch baseline. Fields not specified in the request are left unchanged.

For information about valid key-value pairs in PatchFilters for each supported operating system type, see PatchFilter.

" }, "UpdateResourceDataSync":{ "name":"UpdateResourceDataSync", @@ -2113,7 +2113,7 @@ {"shape":"ResourceDataSyncConflictException"}, {"shape":"InternalServerError"} ], - "documentation":"

Update a resource data sync. After you create a resource data sync for a Region, you can't change the account options for that sync. For example, if you create a sync in the us-east-2 (Ohio) Region and you choose the Include only the current account option, you can't edit that sync later and choose the Include all accounts from my AWS Organizations configuration option. Instead, you must delete the first resource data sync, and create a new one.

This API action only supports a resource data sync that was created with a SyncFromSource SyncType.

" + "documentation":"

Update a resource data sync. After you create a resource data sync for a Region, you can't change the account options for that sync. For example, if you create a sync in the us-east-2 (Ohio) Region and you choose the Include only the current account option, you can't edit that sync later and choose the Include all accounts from my Organizations configuration option. Instead, you must delete the first resource data sync, and create a new one.

This API operation only supports a resource data sync that was created with a SyncFromSource SyncType.

" }, "UpdateServiceSetting":{ "name":"UpdateServiceSetting", @@ -2128,7 +2128,7 @@ {"shape":"ServiceSettingNotFound"}, {"shape":"TooManyUpdates"} ], - "documentation":"

ServiceSetting is an account-level setting for an AWS service. This setting defines how a user interacts with or uses a service or a feature of a service. For example, if an AWS service charges money to the account based on feature or service usage, then the AWS service team might create a default setting of \"false\". This means the user can't use this feature unless they change the setting to \"true\" and intentionally opt in for a paid feature.

Services map a SettingId object to a setting value. AWS services teams define the default value for a SettingId. You can't create a new SettingId, but you can overwrite the default value if you have the ssm:UpdateServiceSetting permission for the setting. Use the GetServiceSetting API action to view the current value. Or, use the ResetServiceSetting to change the value back to the original value defined by the AWS service team.

Update the service setting for the account.

" + "documentation":"

ServiceSetting is an account-level setting for an Amazon Web Services service. This setting defines how a user interacts with or uses a service or a feature of a service. For example, if an Amazon Web Services service charges money to the account based on feature or service usage, then the Amazon Web Services service team might create a default setting of \"false\". This means the user can't use this feature unless they change the setting to \"true\" and intentionally opt in for a paid feature.

Services map a SettingId object to a setting value. Amazon Web Services services teams define the default value for a SettingId. You can't create a new SettingId, but you can overwrite the default value if you have the ssm:UpdateServiceSetting permission for the setting. Use the GetServiceSetting API operation to view the current value. Or, use the ResetServiceSetting to change the value back to the original value defined by the Amazon Web Services service team.

Update the service setting for the account.

" } }, "shapes":{ @@ -2147,19 +2147,19 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID where the current document is shared.

" + "documentation":"

The Amazon Web Services account ID where the current document is shared.

" }, "SharedDocumentVersion":{ "shape":"SharedDocumentVersion", "documentation":"

The version of the current document shared with the account.

" } }, - "documentation":"

Information includes the AWS account ID where the current document is shared and the version shared with that account.

" + "documentation":"

Information includes the Amazon Web Services account ID where the current document is shared and the version shared with that account.

" }, "AccountSharingInfoList":{ "type":"list", "member":{"shape":"AccountSharingInfo"}, - "documentation":"

A list of of AWS accounts where the current document is shared and the version shared with each account.

" + "documentation":"

A list of of Amazon Web Services accounts where the current document is shared and the version shared with each account.

" }, "Accounts":{ "type":"list", @@ -2184,7 +2184,7 @@ }, "IamRole":{ "shape":"IamRole", - "documentation":"

The Amazon Identity and Access Management (IAM) role to assign to the managed instance.

" + "documentation":"

The Identity and Access Management (IAM) role to assign to the managed instance.

" }, "RegistrationLimit":{ "shape":"RegistrationLimit", @@ -2211,7 +2211,7 @@ "documentation":"

Tags assigned to the activation.

" } }, - "documentation":"

An activation registers one or more on-premises servers or virtual machines (VMs) with AWS so that you can configure those servers or VMs using Run Command. A server or VM that has been registered with AWS is called a managed instance.

" + "documentation":"

An activation registers one or more on-premises servers or virtual machines (VMs) with Amazon Web Services so that you can configure those servers or VMs using Run Command. A server or VM that has been registered with Amazon Web Services Systems Manager is called a managed instance.

" }, "ActivationCode":{ "type":"string", @@ -2241,15 +2241,15 @@ "members":{ "ResourceType":{ "shape":"ResourceTypeForTagging", - "documentation":"

Specifies the type of resource you are tagging.

The ManagedInstance type for this API action is for on-premises managed instances. You must specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

" + "documentation":"

Specifies the type of resource you are tagging.

The ManagedInstance type for this API operation is for on-premises managed instances. You must specify the name of the managed instance in the following format: mi-ID_number . For example, mi-1a2b3c4d5e6f.

" }, "ResourceId":{ "shape":"ResourceId", - "documentation":"

The resource ID you want to tag.

Use the ID of the resource. Here are some examples:

ManagedInstance: mi-012345abcde

MaintenanceWindow: mw-012345abcde

PatchBaseline: pb-012345abcde

OpsMetadata object: ResourceID for tagging is created from the Amazon Resource Name (ARN) for the object. Specifically, ResourceID is created from the strings that come after the word opsmetadata in the ARN. For example, an OpsMetadata object with an ARN of arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager has a ResourceID of either aws/ssm/MyGroup/appmanager or /aws/ssm/MyGroup/appmanager.

For the Document and Parameter values, use the name of the resource.

The ManagedInstance type for this API action is only for on-premises managed instances. You must specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

" + "documentation":"

The resource ID you want to tag.

Use the ID of the resource. Here are some examples:

MaintenanceWindow: mw-012345abcde

PatchBaseline: pb-012345abcde

OpsMetadata object: ResourceID for tagging is created from the Amazon Resource Name (ARN) for the object. Specifically, ResourceID is created from the strings that come after the word opsmetadata in the ARN. For example, an OpsMetadata object with an ARN of arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager has a ResourceID of either aws/ssm/MyGroup/appmanager or /aws/ssm/MyGroup/appmanager.

For the Document and Parameter values, use the name of the resource.

ManagedInstance: mi-012345abcde

The ManagedInstance type for this API operation is only for on-premises managed instances. You must specify the name of the managed instance in the following format: mi-ID_number . For example, mi-1a2b3c4d5e6f.

" }, "Tags":{ "shape":"TagList", - "documentation":"

One or more tags. The value parameter is required.

Do not enter personally identifiable information in this field.

" + "documentation":"

One or more tags. The value parameter is required.

Don't enter personally identifiable information in this field.

" } } }, @@ -2301,11 +2301,11 @@ }, "ResourceType":{ "shape":"OpsItemRelatedItemAssociationResourceType", - "documentation":"

The type of resource that you want to associate with an OpsItem. OpsCenter supports the following types:

AWS::SSMIncidents::IncidentRecord: an Incident Manager incident. Incident Manager is a capability of AWS Systems Manager.

AWS::SSM::Document: a Systems Manager (SSM) document.

" + "documentation":"

The type of resource that you want to associate with an OpsItem. OpsCenter supports the following types:

AWS::SSMIncidents::IncidentRecord: an Incident Manager incident. Incident Manager is a capability of Amazon Web Services Systems Manager.

AWS::SSM::Document: a Systems Manager (SSM) document.

" }, "ResourceUri":{ "shape":"OpsItemRelatedItemAssociationResourceUri", - "documentation":"

The Amazon Resource Name (ARN) of the AWS resource that you want to associate with the OpsItem.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services resource that you want to associate with the OpsItem.

" } } }, @@ -2330,11 +2330,11 @@ "members":{ "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the Systems Manager document.

" + "documentation":"

The name of the SSM document.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The ID of the instance.

" + "documentation":"

The instance ID.

" }, "AssociationId":{ "shape":"AssociationId", @@ -2369,7 +2369,7 @@ "documentation":"

The association name.

" } }, - "documentation":"

Describes an association of a Systems Manager document and an instance.

" + "documentation":"

Describes an association of a Amazon Web Services Systems Manager document (SSM document) and an instance.

" }, "AssociationAlreadyExists":{ "type":"structure", @@ -2393,11 +2393,11 @@ "members":{ "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the Systems Manager document.

" + "documentation":"

The name of the SSM document.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The ID of the instance.

" + "documentation":"

The instance ID.

" }, "AssociationVersion":{ "shape":"AssociationVersion", @@ -2425,7 +2425,7 @@ }, "AutomationTargetParameterName":{ "shape":"AutomationTargetParameterName", - "documentation":"

Specify the target for the association. This target is required for associations that use an Automation document and target resources by using rate controls.

" + "documentation":"

Specify the target for the association. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager.

" }, "Parameters":{ "shape":"Parameters", @@ -2461,11 +2461,11 @@ }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.

Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.

" + "documentation":"

The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.

Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.

" }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.

If a new instance starts and attempts to run an association while Systems Manager is running MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.

" + "documentation":"

The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.

If a new instance starts and attempts to run an association while Systems Manager is running MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.

" }, "ComplianceSeverity":{ "shape":"AssociationComplianceSeverity", @@ -2473,19 +2473,19 @@ }, "SyncCompliance":{ "shape":"AssociationSyncCompliance", - "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.

By default, all associations use AUTO mode.

" }, "ApplyOnlyAtCronInterval":{ "shape":"ApplyOnlyAtCronInterval", - "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it. This parameter is not supported for rate expressions.

" + "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it. This parameter isn't supported for rate expressions.

" }, "CalendarNames":{ "shape":"CalendarNameOrARNList", - "documentation":"

The names or Amazon Resource Names (ARNs) of the Systems Manager Change Calendar type documents your associations are gated under. The associations only run when that Change Calendar is open. For more information, see AWS Systems Manager Change Calendar.

" + "documentation":"

The names or Amazon Resource Names (ARNs) of the Change Calendar type documents your associations are gated under. The associations only run when that change calendar is open. For more information, see Amazon Web Services Systems Manager Change Calendar.

" }, "TargetLocations":{ "shape":"TargetLocations", - "documentation":"

The combination of AWS Regions and AWS accounts where you want to run the association.

" + "documentation":"

The combination of Amazon Web Services Regions and Amazon Web Services accounts where you want to run the association.

" } }, "documentation":"

Describes the parameters for a document.

" @@ -2499,7 +2499,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The specified association does not exist.

", + "documentation":"

The specified association doesn't exist.

", "exception":true }, "AssociationExecution":{ @@ -2545,7 +2545,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The specified execution ID does not exist. Verify the ID number and try again.

", + "documentation":"

The specified execution ID doesn't exist. Verify the ID number and try again.

", "exception":true }, "AssociationExecutionFilter":{ @@ -2850,7 +2850,7 @@ }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The version of a Systems Manager document used when the association version was created.

" + "documentation":"

The version of an Amazon Web Services Systems Manager document (SSM document) used when the association version was created.

" }, "Parameters":{ "shape":"Parameters", @@ -2874,11 +2874,11 @@ }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.

Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.

" + "documentation":"

The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.

Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.

" }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.

If a new instance starts and attempts to run an association while Systems Manager is running MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.

" + "documentation":"

The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.

If a new instance starts and attempts to run an association while Systems Manager is running MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.

" }, "ComplianceSeverity":{ "shape":"AssociationComplianceSeverity", @@ -2886,19 +2886,19 @@ }, "SyncCompliance":{ "shape":"AssociationSyncCompliance", - "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.

By default, all associations use AUTO mode.

" }, "ApplyOnlyAtCronInterval":{ "shape":"ApplyOnlyAtCronInterval", - "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it. This parameter is not supported for rate expressions.

" + "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it. This parameter isn't supported for rate expressions.

" }, "CalendarNames":{ "shape":"CalendarNameOrARNList", - "documentation":"

The names or Amazon Resource Names (ARNs) of the Systems Manager Change Calendar type documents your associations are gated under. The associations for this version only run when that Change Calendar is open. For more information, see AWS Systems Manager Change Calendar.

" + "documentation":"

The names or Amazon Resource Names (ARNs) of the Change Calendar type documents your associations are gated under. The associations for this version only run when that Change Calendar is open. For more information, see Amazon Web Services Systems Manager Change Calendar.

" }, "TargetLocations":{ "shape":"TargetLocations", - "documentation":"

The combination of AWS Regions and AWS accounts where you wanted to run the association when this association version was created.

" + "documentation":"

The combination of Amazon Web Services Regions and Amazon Web Services accounts where you wanted to run the association when this association version was created.

" } }, "documentation":"

Information about the association version.

" @@ -3047,7 +3047,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

An Automation document with the specified name could not be found.

", + "documentation":"

An Automation runbook with the specified name couldn't be found.

", "exception":true }, "AutomationDefinitionVersionNotFoundException":{ @@ -3055,7 +3055,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

An Automation document with the specified name and version could not be found.

", + "documentation":"

An Automation runbook with the specified name and version couldn't be found.

", "exception":true }, "AutomationExecution":{ @@ -3067,7 +3067,7 @@ }, "DocumentName":{ "shape":"DocumentName", - "documentation":"

The name of the Automation document used during the execution.

" + "documentation":"

The name of the Automation runbook used during the execution.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -3087,19 +3087,19 @@ }, "StepExecutions":{ "shape":"StepExecutionList", - "documentation":"

A list of details about the current state of all steps that comprise an execution. An Automation document contains a list of steps that are run in order.

" + "documentation":"

A list of details about the current state of all steps that comprise an execution. An Automation runbook contains a list of steps that are run in order.

" }, "StepExecutionsTruncated":{ "shape":"Boolean", - "documentation":"

A boolean value that indicates if the response contains the full list of the Automation step executions. If true, use the DescribeAutomationStepExecutions API action to get the full list of step executions.

" + "documentation":"

A boolean value that indicates if the response contains the full list of the Automation step executions. If true, use the DescribeAutomationStepExecutions API operation to get the full list of step executions.

" }, "Parameters":{ "shape":"AutomationParameterMap", - "documentation":"

The key-value map of execution parameters, which were supplied when calling StartAutomationExecution.

" + "documentation":"

The key-value map of execution parameters, which were supplied when calling StartAutomationExecution.

" }, "Outputs":{ "shape":"AutomationParameterMap", - "documentation":"

The list of execution outputs as defined in the automation document.

" + "documentation":"

The list of execution outputs as defined in the Automation runbook.

" }, "FailureMessage":{ "shape":"String", @@ -3143,7 +3143,7 @@ }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The MaxConcurrency value specified by the user when the execution started.

" + "documentation":"

The MaxConcurrency value specified by the user when the execution started.

" }, "MaxErrors":{ "shape":"MaxErrors", @@ -3155,12 +3155,12 @@ }, "TargetLocations":{ "shape":"TargetLocations", - "documentation":"

The combination of AWS Regions and/or AWS accounts where you want to run the Automation.

", + "documentation":"

The combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the Automation.

", "box":true }, "ProgressCounters":{ "shape":"ProgressCounters", - "documentation":"

An aggregate of step execution statuses displayed in the AWS Console for a multi-Region and multi-account Automation execution.

" + "documentation":"

An aggregate of step execution statuses displayed in the Amazon Web Services Systems Manager console for a multi-Region and multi-account Automation execution.

" }, "AutomationSubtype":{ "shape":"AutomationSubtype", @@ -3172,7 +3172,7 @@ }, "Runbooks":{ "shape":"Runbooks", - "documentation":"

Information about the Automation runbooks (Automation documents) that are run as part of a runbook workflow.

The Automation runbooks specified for the runbook workflow can't run until all required approvals for the change request have been received.

" + "documentation":"

Information about the Automation runbooks that are run as part of a runbook workflow.

The Automation runbooks specified for the runbook workflow can't run until all required approvals for the change request have been received.

" }, "OpsItemId":{ "shape":"String", @@ -3263,7 +3263,7 @@ }, "DocumentName":{ "shape":"DocumentName", - "documentation":"

The name of the Automation document used during execution.

" + "documentation":"

The name of the Automation runbook used during execution.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -3279,11 +3279,11 @@ }, "ExecutionEndTime":{ "shape":"DateTime", - "documentation":"

The time the execution finished. This is not populated if the execution is still in progress.

" + "documentation":"

The time the execution finished. This isn't populated if the execution is still in progress.

" }, "ExecutedBy":{ "shape":"String", - "documentation":"

The IAM role ARN of the user who ran the Automation.

" + "documentation":"

The IAM role ARN of the user who ran the automation.

" }, "LogFile":{ "shape":"String", @@ -3291,7 +3291,7 @@ }, "Outputs":{ "shape":"AutomationParameterMap", - "documentation":"

The list of execution outputs as defined in the Automation document.

" + "documentation":"

The list of execution outputs as defined in the Automation runbook.

" }, "Mode":{ "shape":"ExecutionMode", @@ -3299,7 +3299,7 @@ }, "ParentAutomationExecutionId":{ "shape":"AutomationExecutionId", - "documentation":"

The ExecutionId of the parent Automation.

" + "documentation":"

The execution ID of the parent automation.

" }, "CurrentStepName":{ "shape":"String", @@ -3311,15 +3311,15 @@ }, "FailureMessage":{ "shape":"String", - "documentation":"

The list of execution outputs as defined in the Automation document.

" + "documentation":"

The list of execution outputs as defined in the Automation runbook.

" }, "TargetParameterName":{ "shape":"AutomationParameterKey", - "documentation":"

The list of execution outputs as defined in the Automation document.

" + "documentation":"

The list of execution outputs as defined in the Automation runbook.

" }, "Targets":{ "shape":"Targets", - "documentation":"

The targets defined by the user when starting the Automation.

" + "documentation":"

The targets defined by the user when starting the automation.

" }, "TargetMaps":{ "shape":"TargetMaps", @@ -3331,19 +3331,19 @@ }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The MaxConcurrency value specified by the user when starting the Automation.

" + "documentation":"

The MaxConcurrency value specified by the user when starting the automation.

" }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The MaxErrors value specified by the user when starting the Automation.

" + "documentation":"

The MaxErrors value specified by the user when starting the automation.

" }, "Target":{ "shape":"String", - "documentation":"

The list of execution outputs as defined in the Automation document.

" + "documentation":"

The list of execution outputs as defined in the Automation runbook.

" }, "AutomationType":{ "shape":"AutomationType", - "documentation":"

Use this filter with DescribeAutomationExecutions. Specify either Local or CrossAccount. CrossAccount is an Automation that runs in multiple AWS Regions and accounts. For more information, see Running Automation workflows in multiple AWS Regions and accounts in the AWS Systems Manager User Guide.

" + "documentation":"

Use this filter with DescribeAutomationExecutions. Specify either Local or CrossAccount. CrossAccount is an Automation that runs in multiple Amazon Web Services Regions and Amazon Web Services accounts. For more information, see Running Automation workflows in multiple Amazon Web Services Regions and accounts in the Amazon Web Services Systems Manager User Guide.

" }, "AutomationSubtype":{ "shape":"AutomationSubtype", @@ -3355,7 +3355,7 @@ }, "Runbooks":{ "shape":"Runbooks", - "documentation":"

Information about the Automation runbooks (Automation documents) that are run during a runbook workflow in Change Manager.

The Automation runbooks specified for the runbook workflow can't run until all required approvals for the change request have been received.

" + "documentation":"

Information about the Automation runbooks that are run during a runbook workflow in Change Manager.

The Automation runbooks specified for the runbook workflow can't run until all required approvals for the change request have been received.

" }, "OpsItemId":{ "shape":"String", @@ -3482,7 +3482,7 @@ "ApprovalRules":{"shape":"PatchRuleGroup"}, "ApprovedPatches":{ "shape":"PatchIdList", - "documentation":"

A list of explicitly approved patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the AWS Systems Manager User Guide.

" + "documentation":"

A list of explicitly approved patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" }, "ApprovedPatchesComplianceLevel":{ "shape":"PatchComplianceLevel", @@ -3490,15 +3490,15 @@ }, "RejectedPatches":{ "shape":"PatchIdList", - "documentation":"

A list of explicitly rejected patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the AWS Systems Manager User Guide.

" + "documentation":"

A list of explicitly rejected patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" }, "RejectedPatchesAction":{ "shape":"PatchAction", - "documentation":"

The action for Patch Manager to take on patches included in the RejectedPackages list. A patch can be allowed only if it is a dependency of another package, or blocked entirely along with packages that include it as a dependency.

" + "documentation":"

The action for Patch Manager to take on patches included in the RejectedPackages list. A patch can be allowed only if it is a dependency of another package, or blocked entirely along with packages that include it as a dependency.

" }, "ApprovedPatchesEnableNonSecurity":{ "shape":"Boolean", - "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is 'false'. Applies to Linux instances only.

" + "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is false. Applies to Linux instances only.

" }, "Sources":{ "shape":"PatchSourceList", @@ -3586,14 +3586,14 @@ "members":{ "CloudWatchLogGroupName":{ "shape":"CloudWatchLogGroupName", - "documentation":"

The name of the CloudWatch log group where you want to send command output. If you don't specify a group name, Systems Manager automatically creates a log group for you. The log group uses the following naming format: aws/ssm/SystemsManagerDocumentName.

" + "documentation":"

The name of the CloudWatch Logs log group where you want to send command output. If you don't specify a group name, Amazon Web Services Systems Manager automatically creates a log group for you. The log group uses the following naming format:

aws/ssm/SystemsManagerDocumentName

" }, "CloudWatchOutputEnabled":{ "shape":"CloudWatchOutputEnabled", "documentation":"

Enables Systems Manager to send command output to CloudWatch Logs.

" } }, - "documentation":"

Configuration options for sending command output to CloudWatch Logs.

" + "documentation":"

Configuration options for sending command output to Amazon CloudWatch Logs.

" }, "CloudWatchOutputEnabled":{"type":"boolean"}, "Command":{ @@ -3609,7 +3609,7 @@ }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The SSM document version.

" + "documentation":"

The Systems Manager document (SSM document) version.

" }, "Comment":{ "shape":"Comment", @@ -3617,7 +3617,7 @@ }, "ExpiresAfter":{ "shape":"DateTime", - "documentation":"

If this time is reached and the command has not already started running, it will not run. Calculated based on the ExpiresAfter user input provided as part of the SendCommand API.

" + "documentation":"

If this time is reached and the command hasn't already started running, it won't run. Calculated based on the ExpiresAfter user input provided as part of the SendCommand API operation.

" }, "Parameters":{ "shape":"Parameters", @@ -3641,11 +3641,11 @@ }, "StatusDetails":{ "shape":"StatusDetails", - "documentation":"

A detailed status of the command execution. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding command statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to any instances.

  • In Progress: The command has been sent to at least one instance but has not reached a final state on all instances.

  • Success: The command successfully ran on all invocations. This is a terminal state.

  • Delivery Timed Out: The value of MaxErrors or more command invocations shows a status of Delivery Timed Out. This is a terminal state.

  • Execution Timed Out: The value of MaxErrors or more command invocations shows a status of Execution Timed Out. This is a terminal state.

  • Failed: The value of MaxErrors or more command invocations shows a status of Failed. This is a terminal state.

  • Incomplete: The command was attempted on all instances and one or more invocations does not have a value of Success but not enough invocations failed for the status to be Failed. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Rate Exceeded: The number of instances targeted by the command exceeded the account limit for pending invocations. The system has canceled the command before running it on any instance. This is a terminal state.

" + "documentation":"

A detailed status of the command execution. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding command statuses in the Amazon Web Services Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command hasn't been sent to any instances.

  • In Progress: The command has been sent to at least one instance but hasn't reached a final state on all instances.

  • Success: The command successfully ran on all invocations. This is a terminal state.

  • Delivery Timed Out: The value of MaxErrors or more command invocations shows a status of Delivery Timed Out. This is a terminal state.

  • Execution Timed Out: The value of MaxErrors or more command invocations shows a status of Execution Timed Out. This is a terminal state.

  • Failed: The value of MaxErrors or more command invocations shows a status of Failed. This is a terminal state.

  • Incomplete: The command was attempted on all instances and one or more invocations doesn't have a value of Success but not enough invocations failed for the status to be Failed. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Rate Exceeded: The number of instances targeted by the command exceeded the account limit for pending invocations. The system has canceled the command before running it on any instance. This is a terminal state.

" }, "OutputS3Region":{ "shape":"S3Region", - "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Region of the S3 bucket.

" + "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Amazon Web Services Region of the S3 bucket.

" }, "OutputS3BucketName":{ "shape":"S3BucketName", @@ -3657,11 +3657,11 @@ }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The maximum number of instances that are allowed to run the command at the same time. You can specify a number of instances, such as 10, or a percentage of instances, such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Running commands using Systems Manager Run Command in the AWS Systems Manager User Guide.

" + "documentation":"

The maximum number of instances that are allowed to run the command at the same time. You can specify a number of instances, such as 10, or a percentage of instances, such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Running commands using Systems Manager Run Command in the Amazon Web Services Systems Manager User Guide.

" }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The maximum number of errors allowed before the system stops sending the command to additional targets. You can specify a number of errors, such as 10, or a percentage or errors, such as 10%. The default value is 0. For more information about how to use MaxErrors, see Running commands using Systems Manager Run Command in the AWS Systems Manager User Guide.

" + "documentation":"

The maximum number of errors allowed before the system stops sending the command to additional targets. You can specify a number of errors, such as 10, or a percentage or errors, such as 10%. The default value is 0. For more information about how to use MaxErrors, see Running commands using Systems Manager Run Command in the Amazon Web Services Systems Manager User Guide.

" }, "TargetCount":{ "shape":"TargetCount", @@ -3681,7 +3681,7 @@ }, "ServiceRole":{ "shape":"ServiceRole", - "documentation":"

The IAM service role that Run Command uses to act on your behalf when sending notifications about command status changes.

" + "documentation":"

The Identity and Access Management (IAM) service role that Run Command, a capability of Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes.

" }, "NotificationConfig":{ "shape":"NotificationConfig", @@ -3689,7 +3689,7 @@ }, "CloudWatchOutputConfig":{ "shape":"CloudWatchOutputConfig", - "documentation":"

CloudWatch Logs information where you want Systems Manager to send the command output.

" + "documentation":"

Amazon CloudWatch Logs information where you want Amazon Web Services Systems Manager to send the command output.

" }, "TimeoutSeconds":{ "shape":"TimeoutSeconds", @@ -3711,7 +3711,7 @@ }, "value":{ "shape":"CommandFilterValue", - "documentation":"

The filter value. Valid values for each filter key are as follows:

  • InvokedAfter: Specify a timestamp to limit your results. For example, specify 2018-07-07T00:00:00Z to see a list of command executions occurring July 7, 2018, and later.

  • InvokedBefore: Specify a timestamp to limit your results. For example, specify 2018-07-07T00:00:00Z to see a list of command executions from before July 7, 2018.

  • Status: Specify a valid command status to see a list of all command executions with that status. Status values you can specify include:

    • Pending

    • InProgress

    • Success

    • Cancelled

    • Failed

    • TimedOut

    • Cancelling

  • DocumentName: Specify name of the SSM document for which you want to see command execution results. For example, specify AWS-RunPatchBaseline to see command executions that used this SSM document to perform security patching operations on instances.

  • ExecutionStage: Specify one of the following values:

    • Executing: Returns a list of command executions that are currently still running.

    • Complete: Returns a list of command executions that have already completed.

" + "documentation":"

The filter value. Valid values for each filter key are as follows:

  • InvokedAfter: Specify a timestamp to limit your results. For example, specify 2021-07-07T00:00:00Z to see a list of command executions occurring July 7, 2021, and later.

  • InvokedBefore: Specify a timestamp to limit your results. For example, specify 2021-07-07T00:00:00Z to see a list of command executions from before July 7, 2021.

  • Status: Specify a valid command status to see a list of all command executions with that status. Status values you can specify include:

    • Pending

    • InProgress

    • Success

    • Cancelled

    • Failed

    • TimedOut

    • Cancelling

  • DocumentName: Specify name of the Amazon Web Services Systems Manager document (SSM document) for which you want to see command execution results. For example, specify AWS-RunPatchBaseline to see command executions that used this SSM document to perform security patching operations on instances.

  • ExecutionStage: Specify one of the following values:

    • Executing: Returns a list of command executions that are currently still running.

    • Complete: Returns a list of command executions that have already completed.

" } }, "documentation":"

Describes a command filter.

An instance ID can't be specified when a command status is Pending because the command hasn't run on the instance yet.

" @@ -3755,7 +3755,7 @@ }, "InstanceName":{ "shape":"InstanceTagName", - "documentation":"

The name of the invocation target. For EC2 instances this is the value for the aws:Name tag. For on-premises instances, this is the name of the instance.

" + "documentation":"

The name of the invocation target. For EC2 instances this is the value for the aws:Name tag. For on-premises instances, this is the name of the instance.

" }, "Comment":{ "shape":"Comment", @@ -3767,7 +3767,7 @@ }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The SSM document version.

" + "documentation":"

The Systems Manager document (SSM document) version.

" }, "RequestedDateTime":{ "shape":"DateTime", @@ -3779,7 +3779,7 @@ }, "StatusDetails":{ "shape":"StatusDetails", - "documentation":"

A detailed status of the command execution for each invocation (each instance targeted by the command). StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding command statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Success: The execution of the command or plugin was successfully completed. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: Command execution started on the instance, but the execution was not complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command was not successful on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" + "documentation":"

A detailed status of the command execution for each invocation (each instance targeted by the command). StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding command statuses in the Amazon Web Services Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command hasn't been sent to the instance.

  • In Progress: The command has been sent to the instance but hasn't reached a terminal state.

  • Success: The execution of the command or plugin was successfully completed. This is a terminal state.

  • Delivery Timed Out: The command wasn't delivered to the instance before the delivery timeout expired. Delivery timeouts don't count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: Command execution started on the instance, but the execution wasn't complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command wasn't successful on the instance. For a plugin, this indicates that the result code wasn't zero. For a command invocation, this indicates that the result code for one or more plugins wasn't zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" }, "TraceOutput":{ "shape":"InvocationTraceOutput", @@ -3787,11 +3787,11 @@ }, "StandardOutputUrl":{ "shape":"Url", - "documentation":"

The URL to the plugin's StdOut file in Amazon S3, if the S3 bucket was defined for the parent command. For an invocation, StandardOutputUrl is populated if there is just one plugin defined for the command, and the S3 bucket was defined for the command.

" + "documentation":"

The URL to the plugin's StdOut file in Amazon Simple Storage Service (Amazon S3), if the S3 bucket was defined for the parent command. For an invocation, StandardOutputUrl is populated if there is just one plugin defined for the command, and the S3 bucket was defined for the command.

" }, "StandardErrorUrl":{ "shape":"Url", - "documentation":"

The URL to the plugin's StdErr file in Amazon S3, if the S3 bucket was defined for the parent command. For an invocation, StandardErrorUrl is populated if there is just one plugin defined for the command, and the S3 bucket was defined for the command.

" + "documentation":"

The URL to the plugin's StdErr file in Amazon Simple Storage Service (Amazon S3), if the S3 bucket was defined for the parent command. For an invocation, StandardErrorUrl is populated if there is just one plugin defined for the command, and the S3 bucket was defined for the command.

" }, "CommandPlugins":{ "shape":"CommandPluginList", @@ -3799,7 +3799,7 @@ }, "ServiceRole":{ "shape":"ServiceRole", - "documentation":"

The IAM service role that Run Command uses to act on your behalf when sending notifications about command status changes on a per instance basis.

" + "documentation":"

The Identity and Access Management (IAM) service role that Run Command, a capability of Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes on a per instance basis.

" }, "NotificationConfig":{ "shape":"NotificationConfig", @@ -3807,7 +3807,7 @@ }, "CloudWatchOutputConfig":{ "shape":"CloudWatchOutputConfig", - "documentation":"

CloudWatch Logs information where you want Systems Manager to send the command output.

" + "documentation":"

Amazon CloudWatch Logs information where you want Amazon Web Services Systems Manager to send the command output.

" } }, "documentation":"

An invocation is copy of a command sent to a specific instance. A command can apply to one or more instances. A command invocation applies to one instance. For example, if a user runs SendCommand against three instances, then a command invocation is created for each requested instance ID. A command invocation returns status and detail information about a command you ran.

" @@ -3843,7 +3843,7 @@ "members":{ "Name":{ "shape":"CommandPluginName", - "documentation":"

The name of the plugin. Must be one of the following: aws:updateAgent, aws:domainjoin, aws:applications, aws:runPowerShellScript, aws:psmodule, aws:cloudWatch, aws:runShellScript, or aws:updateSSMAgent.

" + "documentation":"

The name of the plugin. Must be one of the following: aws:updateAgent, aws:domainjoin, aws:applications, aws:runPowerShellScript, aws:psmodule, aws:cloudWatch, aws:runShellScript, or aws:updateSSMAgent.

" }, "Status":{ "shape":"CommandPluginStatus", @@ -3851,7 +3851,7 @@ }, "StatusDetails":{ "shape":"StatusDetails", - "documentation":"

A detailed status of the plugin execution. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding command statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Success: The execution of the command or plugin was successfully completed. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: Command execution started on the instance, but the execution was not complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command was not successful on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist, or it might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit, and they don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" + "documentation":"

A detailed status of the plugin execution. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding command statuses in the Amazon Web Services Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command hasn't been sent to the instance.

  • In Progress: The command has been sent to the instance but hasn't reached a terminal state.

  • Success: The execution of the command or plugin was successfully completed. This is a terminal state.

  • Delivery Timed Out: The command wasn't delivered to the instance before the delivery timeout expired. Delivery timeouts don't count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: Command execution started on the instance, but the execution wasn't complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command wasn't successful on the instance. For a plugin, this indicates that the result code wasn't zero. For a command invocation, this indicates that the result code for one or more plugins wasn't zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist, or it might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit, and they don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" }, "ResponseCode":{ "shape":"ResponseCode", @@ -3871,23 +3871,23 @@ }, "StandardOutputUrl":{ "shape":"Url", - "documentation":"

The URL for the complete text written by the plugin to stdout in Amazon S3. If the S3 bucket for the command was not specified, then this string is empty.

" + "documentation":"

The URL for the complete text written by the plugin to stdout in Amazon S3. If the S3 bucket for the command wasn't specified, then this string is empty.

" }, "StandardErrorUrl":{ "shape":"Url", - "documentation":"

The URL for the complete text written by the plugin to stderr. If execution is not yet complete, then this string is empty.

" + "documentation":"

The URL for the complete text written by the plugin to stderr. If execution isn't yet complete, then this string is empty.

" }, "OutputS3Region":{ "shape":"S3Region", - "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the S3 bucket region.

" + "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Amazon Web Services Systems Manager automatically determines the S3 bucket region.

" }, "OutputS3BucketName":{ "shape":"S3BucketName", - "documentation":"

The S3 bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response:

doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript

doc-example-bucket is the name of the S3 bucket;

ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

i-02573cafcfEXAMPLE is the instance ID;

awsrunShellScript is the name of the plugin.

" + "documentation":"

The S3 bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response:

doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript

doc-example-bucket is the name of the S3 bucket;

ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

i-02573cafcfEXAMPLE is the instance ID;

awsrunShellScript is the name of the plugin.

" }, "OutputS3KeyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

The S3 directory path inside the bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response:

doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript

doc-example-bucket is the name of the S3 bucket;

ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

i-02573cafcfEXAMPLE is the instance ID;

awsrunShellScript is the name of the plugin.

" + "documentation":"

The S3 directory path inside the bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response:

doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript

doc-example-bucket is the name of the S3 bucket;

ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

i-02573cafcfEXAMPLE is the instance ID;

awsrunShellScript is the name of the plugin.

" } }, "documentation":"

Describes plugin details.

" @@ -4214,19 +4214,19 @@ "members":{ "Description":{ "shape":"ActivationDescription", - "documentation":"

A user-defined description of the resource that you want to register with Systems Manager.

Do not enter personally identifiable information in this field.

" + "documentation":"

A user-defined description of the resource that you want to register with Systems Manager.

Don't enter personally identifiable information in this field.

" }, "DefaultInstanceName":{ "shape":"DefaultInstanceName", - "documentation":"

The name of the registered, managed instance as it will appear in the Systems Manager console or when you use the AWS command line tools to list Systems Manager resources.

Do not enter personally identifiable information in this field.

" + "documentation":"

The name of the registered, managed instance as it will appear in the Amazon Web Services Systems Manager console or when you use the Amazon Web Services command line tools to list Systems Manager resources.

Don't enter personally identifiable information in this field.

" }, "IamRole":{ "shape":"IamRole", - "documentation":"

The Amazon Identity and Access Management (IAM) role that you want to assign to the managed instance. This IAM role must provide AssumeRole permissions for the Systems Manager service principal ssm.amazonaws.com. For more information, see Create an IAM service role for a hybrid environment in the AWS Systems Manager User Guide.

" + "documentation":"

The name of the Identity and Access Management (IAM) role that you want to assign to the managed instance. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create an IAM service role for a hybrid environment in the Amazon Web Services Systems Manager User Guide.

" }, "RegistrationLimit":{ "shape":"RegistrationLimit", - "documentation":"

Specify the maximum number of managed instances you want to register. The default value is 1 instance.

", + "documentation":"

Specify the maximum number of managed instances you want to register. The default value is 1.

", "box":true }, "ExpirationDate":{ @@ -4235,7 +4235,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an activation to identify which servers or virtual machines (VMs) in your on-premises environment you intend to activate. In this case, you could specify the following key name/value pairs:

  • Key=OS,Value=Windows

  • Key=Environment,Value=Production

When you install SSM Agent on your on-premises servers and VMs, you specify an activation ID and code. When you specify the activation ID and code, tags assigned to the activation are automatically applied to the on-premises servers or VMs.

You can't add tags to or delete tags from an existing activation. You can tag your on-premises servers and VMs after they connect to Systems Manager for the first time and are assigned a managed instance ID. This means they are listed in the AWS Systems Manager console with an ID that is prefixed with \"mi-\". For information about how to add tags to your managed instances, see AddTagsToResource. For information about how to remove tags from your managed instances, see RemoveTagsFromResource.

" + "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an activation to identify which servers or virtual machines (VMs) in your on-premises environment you intend to activate. In this case, you could specify the following key-value pairs:

  • Key=OS,Value=Windows

  • Key=Environment,Value=Production

When you install SSM Agent on your on-premises servers and VMs, you specify an activation ID and code. When you specify the activation ID and code, tags assigned to the activation are automatically applied to the on-premises servers or VMs.

You can't add tags to or delete tags from an existing activation. You can tag your on-premises servers and VMs after they connect to Systems Manager for the first time and are assigned a managed instance ID. This means they are listed in the Amazon Web Services Systems Manager console with an ID that is prefixed with \"mi-\". For information about how to add tags to your managed instances, see AddTagsToResource. For information about how to remove tags from your managed instances, see RemoveTagsFromResource.

" } } }, @@ -4273,11 +4273,11 @@ "members":{ "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the SSM document that contains the configuration information for the instance. You can specify Command or Automation documents.

You can specify AWS-predefined documents, documents you created, or a document that is shared with you from another account.

For SSM documents that are shared with you from other AWS accounts, you must specify the complete SSM document ARN, in the following format:

arn:aws:ssm:region:account-id:document/document-name

For example:

arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document

For AWS-predefined documents and SSM documents you created in your account, you only need to specify the document name. For example, AWS-ApplyPatchBaseline or My-Document.

" + "documentation":"

The name of the SSM document that contains the configuration information for the instance. You can specify Command or Automation runbooks.

You can specify Amazon Web Services-predefined documents, documents you created, or a document that is shared with you from another account.

For SSM documents that are shared with you from other Amazon Web Services accounts, you must specify the complete SSM document ARN, in the following format:

arn:aws:ssm:region:account-id:document/document-name

For example:

arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document

For Amazon Web Services-predefined documents and SSM documents you created in your account, you only need to specify the document name. For example, AWS-ApplyPatchBaseline or My-Document.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The ID of the instance.

" + "documentation":"

The instance ID.

InstanceId has been deprecated. To specify an instance ID for an association, use the Targets parameter. Requests that include the parameter InstanceID with Systems Manager documents (SSM documents) that use schema version 2.0 or later will fail. In addition, if you use the parameter InstanceId, you can't use the parameters AssociationName, DocumentVersion, MaxErrors, MaxConcurrency, OutputLocation, or ScheduleExpression. To use these parameters, you must use the Targets parameter.

" }, "Parameters":{ "shape":"Parameters", @@ -4285,7 +4285,7 @@ }, "AutomationTargetParameterName":{ "shape":"AutomationTargetParameterName", - "documentation":"

Specify the target for the association. This target is required for associations that use an Automation document and target resources by using rate controls.

" + "documentation":"

Specify the target for the association. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -4309,11 +4309,11 @@ }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.

Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.

" + "documentation":"

The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.

Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.

" }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.

If a new instance starts and attempts to run an association while Systems Manager is running MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.

" + "documentation":"

The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.

If a new instance starts and attempts to run an association while Systems Manager is running MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.

" }, "ComplianceSeverity":{ "shape":"AssociationComplianceSeverity", @@ -4321,22 +4321,22 @@ }, "SyncCompliance":{ "shape":"AssociationSyncCompliance", - "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.

By default, all associations use AUTO mode.

" }, "ApplyOnlyAtCronInterval":{ "shape":"ApplyOnlyAtCronInterval", - "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it. This parameter is not supported for rate expressions.

" + "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it. This parameter isn't supported for rate expressions.

" }, "CalendarNames":{ "shape":"CalendarNameOrARNList", - "documentation":"

The names or Amazon Resource Names (ARNs) of the Systems Manager Change Calendar type documents your associations are gated under. The associations only run when that Change Calendar is open. For more information, see AWS Systems Manager Change Calendar.

" + "documentation":"

The names or Amazon Resource Names (ARNs) of the Change Calendar type documents your associations are gated under. The associations only run when that Change Calendar is open. For more information, see Amazon Web Services Systems Manager Change Calendar.

" }, "TargetLocations":{ "shape":"TargetLocations", "documentation":"

Use this action to create an association in multiple Regions and multiple accounts.

" } }, - "documentation":"

Describes the association of a Systems Manager SSM document and an instance.

" + "documentation":"

Describes the association of a Amazon Web Services Systems Manager document (SSM document) and an instance.

" }, "CreateAssociationBatchResult":{ "type":"structure", @@ -4357,7 +4357,7 @@ "members":{ "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the SSM document that contains the configuration information for the instance. You can specify Command or Automation documents.

You can specify AWS-predefined documents, documents you created, or a document that is shared with you from another account.

For SSM documents that are shared with you from other AWS accounts, you must specify the complete SSM document ARN, in the following format:

arn:partition:ssm:region:account-id:document/document-name

For example:

arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document

For AWS-predefined documents and SSM documents you created in your account, you only need to specify the document name. For example, AWS-ApplyPatchBaseline or My-Document.

" + "documentation":"

The name of the SSM Command document or Automation runbook that contains the configuration information for the instance.

You can specify Amazon Web Services-predefined documents, documents you created, or a document that is shared with you from another account.

For Systems Manager documents (SSM documents) that are shared with you from other Amazon Web Services accounts, you must specify the complete SSM document ARN, in the following format:

arn:partition:ssm:region:account-id:document/document-name

For example:

arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document

For Amazon Web Services-predefined documents and SSM documents you created in your account, you only need to specify the document name. For example, AWS-ApplyPatchBaseline or My-Document.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -4365,7 +4365,7 @@ }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The instance ID.

InstanceId has been deprecated. To specify an instance ID for an association, use the Targets parameter. Requests that include the parameter InstanceID with SSM documents that use schema version 2.0 or later will fail. In addition, if you use the parameter InstanceId, you cannot use the parameters AssociationName, DocumentVersion, MaxErrors, MaxConcurrency, OutputLocation, or ScheduleExpression. To use these parameters, you must use the Targets parameter.

" + "documentation":"

The instance ID.

InstanceId has been deprecated. To specify an instance ID for an association, use the Targets parameter. Requests that include the parameter InstanceID with Systems Manager documents (SSM documents) that use schema version 2.0 or later will fail. In addition, if you use the parameter InstanceId, you can't use the parameters AssociationName, DocumentVersion, MaxErrors, MaxConcurrency, OutputLocation, or ScheduleExpression. To use these parameters, you must use the Targets parameter.

" }, "Parameters":{ "shape":"Parameters", @@ -4373,7 +4373,7 @@ }, "Targets":{ "shape":"Targets", - "documentation":"

The targets for the association. You can target instances by using tags, AWS Resource Groups, all instances in an AWS account, or individual instance IDs. For more information about choosing targets for an association, see Using targets and rate controls with State Manager associations in the AWS Systems Manager User Guide.

" + "documentation":"

The targets for the association. You can target instances by using tags, Amazon Web Services resource groups, all instances in an Amazon Web Services account, or individual instance IDs. For more information about choosing targets for an association, see Using targets and rate controls with State Manager associations in the Amazon Web Services Systems Manager User Guide.

" }, "ScheduleExpression":{ "shape":"ScheduleExpression", @@ -4381,7 +4381,7 @@ }, "OutputLocation":{ "shape":"InstanceAssociationOutputLocation", - "documentation":"

An S3 bucket where you want to store the output details of the request.

" + "documentation":"

An Amazon Simple Storage Service (Amazon S3) bucket where you want to store the output details of the request.

" }, "AssociationName":{ "shape":"AssociationName", @@ -4389,15 +4389,15 @@ }, "AutomationTargetParameterName":{ "shape":"AutomationTargetParameterName", - "documentation":"

Specify the target for the association. This target is required for associations that use an Automation document and target resources by using rate controls.

" + "documentation":"

Specify the target for the association. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager.

" }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.

Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.

" + "documentation":"

The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.

Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.

" }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.

If a new instance starts and attempts to run an association while Systems Manager is running MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.

" + "documentation":"

The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.

If a new instance starts and attempts to run an association while Systems Manager is running MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.

" }, "ComplianceSeverity":{ "shape":"AssociationComplianceSeverity", @@ -4405,19 +4405,19 @@ }, "SyncCompliance":{ "shape":"AssociationSyncCompliance", - "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager. It is managed by your direct call to the PutComplianceItems API operation.

By default, all associations use AUTO mode.

" }, "ApplyOnlyAtCronInterval":{ "shape":"ApplyOnlyAtCronInterval", - "documentation":"

By default, when you create a new association, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it. This parameter is not supported for rate expressions.

" + "documentation":"

By default, when you create a new association, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it. This parameter isn't supported for rate expressions.

" }, "CalendarNames":{ "shape":"CalendarNameOrARNList", - "documentation":"

The names or Amazon Resource Names (ARNs) of the Systems Manager Change Calendar type documents you want to gate your associations under. The associations only run when that Change Calendar is open. For more information, see AWS Systems Manager Change Calendar.

" + "documentation":"

The names or Amazon Resource Names (ARNs) of the Change Calendar type documents you want to gate your associations under. The associations only run when that change calendar is open. For more information, see Amazon Web Services Systems Manager Change Calendar.

" }, "TargetLocations":{ "shape":"TargetLocations", - "documentation":"

A location is a combination of AWS Regions and AWS accounts where you want to run the association. Use this action to create an association in multiple Regions and multiple accounts.

" + "documentation":"

A location is a combination of Amazon Web Services Regions and Amazon Web Services accounts where you want to run the association. Use this action to create an association in multiple Regions and multiple accounts.

" } } }, @@ -4439,27 +4439,27 @@ "members":{ "Content":{ "shape":"DocumentContent", - "documentation":"

The content for the new SSM document in JSON or YAML format. We recommend storing the contents for your new document in an external JSON or YAML file and referencing the file in a command.

For examples, see the following topics in the AWS Systems Manager User Guide.

" + "documentation":"

The content for the new SSM document in JSON or YAML format. We recommend storing the contents for your new document in an external JSON or YAML file and referencing the file in a command.

For examples, see the following topics in the Amazon Web Services Systems Manager User Guide.

" }, "Requires":{ "shape":"DocumentRequiresList", - "documentation":"

A list of SSM documents required by a document. This parameter is used exclusively by AWS AppConfig. When a user creates an AppConfig configuration in an SSM document, the user must also specify a required document for validation purposes. In this case, an ApplicationConfiguration document requires an ApplicationConfigurationSchema document for validation purposes. For more information, see AWS AppConfig in the AWS Systems Manager User Guide.

" + "documentation":"

A list of SSM documents required by a document. This parameter is used exclusively by AppConfig. When a user creates an AppConfig configuration in an SSM document, the user must also specify a required document for validation purposes. In this case, an ApplicationConfiguration document requires an ApplicationConfigurationSchema document for validation purposes. For more information, see What is AppConfig? in the AppConfig User Guide.

" }, "Attachments":{ "shape":"AttachmentsSourceList", - "documentation":"

A list of key and value pairs that describe attachments to a version of a document.

" + "documentation":"

A list of key-value pairs that describe attachments to a version of a document.

" }, "Name":{ "shape":"DocumentName", - "documentation":"

A name for the Systems Manager document.

You can't use the following strings as document name prefixes. These are reserved by AWS for use as document name prefixes:

  • aws-

  • amazon

  • amzn

" + "documentation":"

A name for the SSM document.

You can't use the following strings as document name prefixes. These are reserved by Amazon Web Services for use as document name prefixes:

  • aws-

  • amazon

  • amzn

" }, "DisplayName":{ "shape":"DocumentDisplayName", - "documentation":"

An optional field where you can specify a friendly name for the Systems Manager document. This value can differ for each version of the document. You can update this value at a later time using the UpdateDocument action.

" + "documentation":"

An optional field where you can specify a friendly name for the SSM document. This value can differ for each version of the document. You can update this value at a later time using the UpdateDocument operation.

" }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

An optional field specifying the version of the artifact you are creating with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and cannot be changed.

" + "documentation":"

An optional field specifying the version of the artifact you are creating with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and can't be changed.

" }, "DocumentType":{ "shape":"DocumentType", @@ -4471,11 +4471,11 @@ }, "TargetType":{ "shape":"TargetType", - "documentation":"

Specify a target type to define the kinds of resources the document can run on. For example, to run a document on EC2 instances, specify the following value: /AWS::EC2::Instance. If you specify a value of '/' the document can run on all types of resources. If you don't specify a value, the document can't run on any resources. For a list of valid resource types, see AWS resource and property types reference in the AWS CloudFormation User Guide.

" + "documentation":"

Specify a target type to define the kinds of resources the document can run on. For example, to run a document on EC2 instances, specify the following value: /AWS::EC2::Instance. If you specify a value of '/' the document can run on all types of resources. If you don't specify a value, the document can't run on any resources. For a list of valid resource types, see Amazon Web Services resource and property types reference in the CloudFormation User Guide.

" }, "Tags":{ "shape":"TagList", - "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an SSM document to identify the types of targets or the environment where it will run. In this case, you could specify the following key name/value pairs:

  • Key=OS,Value=Windows

  • Key=Environment,Value=Production

To add tags to an existing SSM document, use the AddTagsToResource action.

" + "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an SSM document to identify the types of targets or the environment where it will run. In this case, you could specify the following key-value pairs:

  • Key=OS,Value=Windows

  • Key=Environment,Value=Production

To add tags to an existing SSM document, use the AddTagsToResource operation.

" } } }, @@ -4484,7 +4484,7 @@ "members":{ "DocumentDescription":{ "shape":"DocumentDescription", - "documentation":"

Information about the Systems Manager document.

" + "documentation":"

Information about the SSM document.

" } } }, @@ -4508,11 +4508,11 @@ }, "StartDate":{ "shape":"MaintenanceWindowStringDateTime", - "documentation":"

The date and time, in ISO-8601 Extended format, for when you want the maintenance window to become active. StartDate allows you to delay activation of the maintenance window until the specified future date.

" + "documentation":"

The date and time, in ISO-8601 Extended format, for when you want the maintenance window to become active. StartDate allows you to delay activation of the maintenance window until the specified future date.

" }, "EndDate":{ "shape":"MaintenanceWindowStringDateTime", - "documentation":"

The date and time, in ISO-8601 Extended format, for when you want the maintenance window to become inactive. EndDate allows you to set a date and time in the future when the maintenance window will no longer run.

" + "documentation":"

The date and time, in ISO-8601 Extended format, for when you want the maintenance window to become inactive. EndDate allows you to set a date and time in the future when the maintenance window will no longer run.

" }, "Schedule":{ "shape":"MaintenanceWindowSchedule", @@ -4524,7 +4524,7 @@ }, "ScheduleOffset":{ "shape":"MaintenanceWindowOffset", - "documentation":"

The number of days to wait after the date and time specified by a CRON expression before running the maintenance window.

For example, the following cron expression schedules a maintenance window to run on the third Tuesday of every month at 11:30 PM.

cron(30 23 ? * TUE#3 *)

If the schedule offset is 2, the maintenance window won't run until two days later.

", + "documentation":"

The number of days to wait after the date and time specified by a cron expression before running the maintenance window.

For example, the following cron expression schedules a maintenance window to run on the third Tuesday of every month at 11:30 PM.

cron(30 23 ? * TUE#3 *)

If the schedule offset is 2, the maintenance window won't run until two days later.

", "box":true }, "Duration":{ @@ -4533,11 +4533,11 @@ }, "Cutoff":{ "shape":"MaintenanceWindowCutoff", - "documentation":"

The number of hours before the end of the maintenance window that Systems Manager stops scheduling new tasks for execution.

" + "documentation":"

The number of hours before the end of the maintenance window that Amazon Web Services Systems Manager stops scheduling new tasks for execution.

" }, "AllowUnassociatedTargets":{ "shape":"MaintenanceWindowAllowUnassociatedTargets", - "documentation":"

Enables a maintenance window task to run on managed instances, even if you have not registered those instances as targets. If enabled, then you must specify the unregistered instances (by instance ID) when you register a task with the maintenance window.

If you don't enable this option, then you must specify previously-registered targets when you register a task with the maintenance window.

" + "documentation":"

Enables a maintenance window task to run on managed instances, even if you haven't registered those instances as targets. If enabled, then you must specify the unregistered instances (by instance ID) when you register a task with the maintenance window.

If you don't enable this option, then you must specify previously-registered targets when you register a task with the maintenance window.

" }, "ClientToken":{ "shape":"ClientToken", @@ -4546,7 +4546,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a maintenance window to identify the type of tasks it will run, the types of targets, and the environment it will run in. In this case, you could specify the following key name/value pairs:

  • Key=TaskType,Value=AgentUpdate

  • Key=OS,Value=Windows

  • Key=Environment,Value=Production

To add tags to an existing maintenance window, use the AddTagsToResource action.

" + "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a maintenance window to identify the type of tasks it will run, the types of targets, and the environment it will run in. In this case, you could specify the following key-value pairs:

  • Key=TaskType,Value=AgentUpdate

  • Key=OS,Value=Windows

  • Key=Environment,Value=Production

To add tags to an existing maintenance window, use the AddTagsToResource operation.

" } } }, @@ -4577,7 +4577,7 @@ }, "OperationalData":{ "shape":"OpsItemOperationalData", - "documentation":"

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. Operational data that is not searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view AWS CLI example commands that use these keys, see Creating OpsItems manually in the AWS Systems Manager User Guide.

" + "documentation":"

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API operation) can view and search on the specified data. Operational data that isn't searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API operation).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view Amazon Web Services CLI example commands that use these keys, see Creating OpsItems manually in the Amazon Web Services Systems Manager User Guide.

" }, "Notifications":{ "shape":"OpsItemNotifications", @@ -4593,7 +4593,7 @@ }, "Source":{ "shape":"OpsItemSource", - "documentation":"

The origin of the OpsItem, such as Amazon EC2 or Systems Manager.

The source name can't contain the following strings: aws, amazon, and amzn.

" + "documentation":"

The origin of the OpsItem, such as Amazon EC2 or Systems Manager.

The source name can't contain the following strings: aws, amazon, and amzn.

" }, "Title":{ "shape":"OpsItemTitle", @@ -4601,7 +4601,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Optional metadata that you assign to a resource. You can restrict access to OpsItems by using an inline IAM policy that specifies tags. For more information, see Getting started with OpsCenter in the AWS Systems Manager User Guide.

Tags use a key-value pair. For example:

Key=Department,Value=Finance

To add tags to an existing OpsItem, use the AddTagsToResource action.

" + "documentation":"

Optional metadata that you assign to a resource. You can restrict access to OpsItems by using an inline IAM policy that specifies tags. For more information, see Getting started with OpsCenter in the Amazon Web Services Systems Manager User Guide.

Tags use a key-value pair. For example:

Key=Department,Value=Finance

To add tags to a new OpsItem, a user must have IAM permissions for both the ssm:CreateOpsItems operation and the ssm:AddTagsToResource operation. To add tags to an existing OpsItem, use the AddTagsToResource operation.

" }, "Category":{ "shape":"OpsItemCategory", @@ -4652,7 +4652,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Optional metadata that you assign to a resource. You can specify a maximum of five tags for an OpsMetadata object. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an OpsMetadata object to identify an environment or target AWS Region. In this case, you could specify the following key-value pairs:

  • Key=Environment,Value=Production

  • Key=Region,Value=us-east-2

" + "documentation":"

Optional metadata that you assign to a resource. You can specify a maximum of five tags for an OpsMetadata object. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an OpsMetadata object to identify an environment or target Amazon Web Services Region. In this case, you could specify the following key-value pairs:

  • Key=Environment,Value=Production

  • Key=Region,Value=us-east-2

" } } }, @@ -4671,7 +4671,7 @@ "members":{ "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

Defines the operating system the patch baseline applies to. The Default value is WINDOWS.

" + "documentation":"

Defines the operating system the patch baseline applies to. The default value is WINDOWS.

" }, "Name":{ "shape":"BaselineName", @@ -4687,24 +4687,24 @@ }, "ApprovedPatches":{ "shape":"PatchIdList", - "documentation":"

A list of explicitly approved patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the AWS Systems Manager User Guide.

" + "documentation":"

A list of explicitly approved patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" }, "ApprovedPatchesComplianceLevel":{ "shape":"PatchComplianceLevel", - "documentation":"

Defines the compliance level for approved patches. When an approved patch is reported as missing, this value describes the severity of the compliance violation. The default value is UNSPECIFIED.

" + "documentation":"

Defines the compliance level for approved patches. When an approved patch is reported as missing, this value describes the severity of the compliance violation. The default value is UNSPECIFIED.

" }, "ApprovedPatchesEnableNonSecurity":{ "shape":"Boolean", - "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is 'false'. Applies to Linux instances only.

", + "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is false. Applies to Linux instances only.

", "box":true }, "RejectedPatches":{ "shape":"PatchIdList", - "documentation":"

A list of explicitly rejected patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the AWS Systems Manager User Guide.

" + "documentation":"

A list of explicitly rejected patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" }, "RejectedPatchesAction":{ "shape":"PatchAction", - "documentation":"

The action for Patch Manager to take on patches included in the RejectedPackages list.

  • ALLOW_AS_DEPENDENCY: A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as InstalledOther. This is the default action if no option is specified.

  • BLOCK: Packages in the RejectedPatches list, and packages that include them as dependencies, are not installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as InstalledRejected.

" + "documentation":"

The action for Patch Manager to take on patches included in the RejectedPackages list.

  • ALLOW_AS_DEPENDENCY : A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as InstalledOther. This is the default action if no option is specified.

  • BLOCK : Packages in the RejectedPatches list, and packages that include them as dependencies, aren't installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as InstalledRejected.

" }, "Description":{ "shape":"BaselineDescription", @@ -4721,7 +4721,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a patch baseline to identify the severity level of patches it specifies and the operating system family it applies to. In this case, you could specify the following key name/value pairs:

  • Key=PatchSeverity,Value=Critical

  • Key=OS,Value=Windows

To add tags to an existing patch baseline, use the AddTagsToResource action.

" + "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a patch baseline to identify the severity level of patches it specifies and the operating system family it applies to. In this case, you could specify the following key-value pairs:

  • Key=PatchSeverity,Value=Critical

  • Key=OS,Value=Windows

To add tags to an existing patch baseline, use the AddTagsToResource operation.

" } } }, @@ -4748,7 +4748,7 @@ }, "SyncType":{ "shape":"ResourceDataSyncType", - "documentation":"

Specify SyncToDestination to create a resource data sync that synchronizes data to an S3 bucket for Inventory. If you specify SyncToDestination, you must provide a value for S3Destination. Specify SyncFromSource to synchronize data from a single account and multiple Regions, or multiple AWS accounts and Regions, as listed in AWS Organizations for Explorer. If you specify SyncFromSource, you must provide a value for SyncSource. The default value is SyncToDestination.

" + "documentation":"

Specify SyncToDestination to create a resource data sync that synchronizes data to an S3 bucket for Inventory. If you specify SyncToDestination, you must provide a value for S3Destination. Specify SyncFromSource to synchronize data from a single account and multiple Regions, or multiple Amazon Web Services accounts and Amazon Web Services Regions, as listed in Organizations for Explorer. If you specify SyncFromSource, you must provide a value for SyncSource. The default value is SyncToDestination.

" }, "SyncSource":{ "shape":"ResourceDataSyncSource", @@ -4798,11 +4798,11 @@ "members":{ "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the Systems Manager document.

" + "documentation":"

The name of the SSM document.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The ID of the instance.

" + "documentation":"

The instance ID.

InstanceId has been deprecated. To specify an instance ID for an association, use the Targets parameter. Requests that include the parameter InstanceID with Systems Manager documents (SSM documents) that use schema version 2.0 or later will fail. In addition, if you use the parameter InstanceId, you can't use the parameters AssociationName, DocumentVersion, MaxErrors, MaxConcurrency, OutputLocation, or ScheduleExpression. To use these parameters, you must use the Targets parameter.

" }, "AssociationId":{ "shape":"AssociationId", @@ -4833,7 +4833,7 @@ }, "Force":{ "shape":"Boolean", - "documentation":"

Some SSM document types require that you specify a Force flag before you can delete the document. For example, you must specify a Force flag to delete a document of type ApplicationConfigurationSchema. You can restrict access to the Force flag in an AWS Identity and Access Management (IAM) policy.

" + "documentation":"

Some SSM document types require that you specify a Force flag before you can delete the document. For example, you must specify a Force flag to delete a document of type ApplicationConfigurationSchema. You can restrict access to the Force flag in an Identity and Access Management (IAM) policy.

" } } }, @@ -4852,7 +4852,7 @@ }, "SchemaDeleteOption":{ "shape":"InventorySchemaDeleteOption", - "documentation":"

Use the SchemaDeleteOption to delete a custom inventory type (schema). If you don't choose this option, the system only deletes existing inventory data associated with the custom inventory type. Choose one of the following options:

DisableSchema: If you choose this option, the system ignores all inventory data for the specified version, and any earlier versions. To enable this schema again, you must call the PutInventory action for a version greater than the disabled version.

DeleteSchema: This option deletes the specified custom type from the Inventory service. You can recreate the schema later, if you want.

" + "documentation":"

Use the SchemaDeleteOption to delete a custom inventory type (schema). If you don't choose this option, the system only deletes existing inventory data associated with the custom inventory type. Choose one of the following options:

DisableSchema: If you choose this option, the system ignores all inventory data for the specified version, and any earlier versions. To enable this schema again, you must call the PutInventory operation for a version greater than the disabled version.

DeleteSchema: This option deletes the specified custom type from the Inventory service. You can recreate the schema later, if you want.

" }, "DryRun":{ "shape":"DryRun", @@ -4870,7 +4870,7 @@ "members":{ "DeletionId":{ "shape":"UUID", - "documentation":"

Every DeleteInventory action is assigned a unique ID. This option returns a unique ID. You can use this ID to query the status of a delete operation. This option is useful for ensuring that a delete operation has completed before you begin other actions.

" + "documentation":"

Every DeleteInventory operation is assigned a unique ID. This option returns a unique ID. You can use this ID to query the status of a delete operation. This option is useful for ensuring that a delete operation has completed before you begin other operations.

" }, "TypeName":{ "shape":"InventoryItemTypeName", @@ -4878,7 +4878,7 @@ }, "DeletionSummary":{ "shape":"InventoryDeletionSummary", - "documentation":"

A summary of the delete operation. For more information about this summary, see Deleting custom inventory in the AWS Systems Manager User Guide.

" + "documentation":"

A summary of the delete operation. For more information about this summary, see Deleting custom inventory in the Amazon Web Services Systems Manager User Guide.

" } } }, @@ -4937,7 +4937,7 @@ "members":{ "Names":{ "shape":"ParameterNameList", - "documentation":"

The names of the parameters to delete.

" + "documentation":"

The names of the parameters to delete. After deleting a parameter, wait for at least 30 seconds to create a parameter with the same name.

" } } }, @@ -4950,7 +4950,7 @@ }, "InvalidParameters":{ "shape":"ParameterNameList", - "documentation":"

The names of parameters that weren't deleted because the parameters are not valid.

" + "documentation":"

The names of parameters that weren't deleted because the parameters aren't valid.

" } } }, @@ -5055,7 +5055,7 @@ }, "Safe":{ "shape":"Boolean", - "documentation":"

The system checks if the target is being referenced by a task. If the target is being referenced, the system returns an error and does not deregister the target from the maintenance window.

", + "documentation":"

The system checks if the target is being referenced by a task. If the target is being referenced, the system returns an error and doesn't deregister the target from the maintenance window.

", "box":true } } @@ -5152,7 +5152,7 @@ "members":{ "ActivationList":{ "shape":"ActivationList", - "documentation":"

A list of activations for your AWS account.

" + "documentation":"

A list of activations for your Amazon Web Services account.

" }, "NextToken":{ "shape":"NextToken", @@ -5244,7 +5244,7 @@ "members":{ "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the Systems Manager document.

" + "documentation":"

The name of the SSM document.

" }, "InstanceId":{ "shape":"InstanceId", @@ -5346,7 +5346,7 @@ "members":{ "Filters":{ "shape":"PatchOrchestratorFilterList", - "documentation":"

Filters used to scope down the returned patches.

" + "documentation":"

Each element in the array is a structure containing a key-value pair.

Windows Server

Supported keys for Windows Server instance patches include the following:

  • PATCH_SET

    Sample values: OS | APPLICATION

  • PRODUCT

    Sample values: WindowsServer2012 | Office 2010 | MicrosoftDefenderAntivirus

  • PRODUCT_FAMILY

    Sample values: Windows | Office

  • MSRC_SEVERITY

    Sample values: ServicePacks | Important | Moderate

  • CLASSIFICATION

    Sample values: ServicePacks | SecurityUpdates | DefinitionUpdates

  • PATCH_ID

    Sample values: KB123456 | KB4516046

Linux

When specifying filters for Linux patches, you must specify a key-pair for PRODUCT. For example, using the Command Line Interface (CLI), the following command fails:

aws ssm describe-available-patches --filters Key=CVE_ID,Values=CVE-2018-3615

However, the following command succeeds:

aws ssm describe-available-patches --filters Key=PRODUCT,Values=AmazonLinux2018.03 Key=CVE_ID,Values=CVE-2018-3615

Supported keys for Linux instance patches include the following:

  • PRODUCT

    Sample values: AmazonLinux2018.03 | AmazonLinux2.0

  • NAME

    Sample values: kernel-headers | samba-python | php

  • SEVERITY

    Sample values: Critical | Important | Medium | Low

  • EPOCH

    Sample values: 0 | 1

  • VERSION

    Sample values: 78.6.1 | 4.10.16

  • RELEASE

    Sample values: 9.56.amzn1 | 1.amzn2

  • ARCH

    Sample values: i686 | x86_64

  • REPOSITORY

    Sample values: Core | Updates

  • ADVISORY_ID

    Sample values: ALAS-2018-1058 | ALAS2-2021-1594

  • CVE_ID

    Sample values: CVE-2018-3615 | CVE-2020-1472

  • BUGZILLA_ID

    Sample values: 1463241

" }, "MaxResults":{ "shape":"PatchBaselineMaxResults", @@ -5403,11 +5403,11 @@ "members":{ "AccountIds":{ "shape":"AccountIdList", - "documentation":"

The account IDs that have permission to use this document. The ID can be either an AWS account or All.

" + "documentation":"

The account IDs that have permission to use this document. The ID can be either an Amazon Web Services account or All.

" }, "AccountSharingInfoList":{ "shape":"AccountSharingInfoList", - "documentation":"

A list of AWS accounts where the current document is shared and the version shared with each account.

" + "documentation":"

A list of Amazon Web Services accounts where the current document is shared and the version shared with each account.

" }, "NextToken":{ "shape":"NextToken", @@ -5421,7 +5421,7 @@ "members":{ "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the Systems Manager document.

" + "documentation":"

The name of the SSM document.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -5429,7 +5429,7 @@ }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and cannot be changed.

" + "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and can't be changed.

" } } }, @@ -5438,7 +5438,7 @@ "members":{ "Document":{ "shape":"DocumentDescription", - "documentation":"

Information about the Systems Manager document.

" + "documentation":"

Information about the SSM document.

" } } }, @@ -5583,7 +5583,7 @@ }, "Filters":{ "shape":"InstancePatchStateFilterList", - "documentation":"

Each entry in the array is a structure containing:

Key (string between 1 and 200 characters)

Values (array containing a single string)

Type (string \"Equal\", \"NotEqual\", \"LessThan\", \"GreaterThan\")

" + "documentation":"

Each entry in the array is a structure containing:

  • Key (string between 1 and 200 characters)

  • Values (array containing a single string)

  • Type (string \"Equal\", \"NotEqual\", \"LessThan\", \"GreaterThan\")

" }, "NextToken":{ "shape":"NextToken", @@ -5615,7 +5615,7 @@ "members":{ "InstanceIds":{ "shape":"InstanceIdList", - "documentation":"

The ID of the instance whose patch state information should be retrieved.

" + "documentation":"

The ID of the instance for which patch state information should be retrieved.

" }, "NextToken":{ "shape":"NextToken", @@ -5651,7 +5651,7 @@ }, "Filters":{ "shape":"PatchOrchestratorFilterList", - "documentation":"

An array of structures. Each entry in the array is a structure containing a Key, Value combination. Valid values for Key are Classification | KBId | Severity | State.

" + "documentation":"

Each element in the array is a structure containing a key-value pair.

Supported keys for DescribeInstancePatchesinclude the following:

  • Classification

    Sample values: Security | SecurityUpdates

  • KBId

    Sample values: KB4480056 | java-1.7.0-openjdk.x86_64

  • Severity

    Sample values: Important | Medium | Low

  • State

    Sample values: Installed | InstalledOther | InstalledPendingReboot

" }, "NextToken":{ "shape":"NextToken", @@ -5669,7 +5669,7 @@ "members":{ "Patches":{ "shape":"PatchComplianceDataList", - "documentation":"

Each entry in the array is a structure containing:

Title (string)

KBId (string)

Classification (string)

Severity (string)

State (string, such as \"INSTALLED\" or \"FAILED\")

InstalledTime (DateTime)

InstalledBy (string)

" + "documentation":"

Each entry in the array is a structure containing:

  • Title (string)

  • KBId (string)

  • Classification (string)

  • Severity (string)

  • State (string, such as \"INSTALLED\" or \"FAILED\")

  • InstalledTime (DateTime)

  • InstalledBy (string)

" }, "NextToken":{ "shape":"NextToken", @@ -5682,7 +5682,7 @@ "members":{ "DeletionId":{ "shape":"UUID", - "documentation":"

Specify the delete inventory ID for which you want information. This ID was returned by the DeleteInventory action.

" + "documentation":"

Specify the delete inventory ID for which you want information. This ID was returned by the DeleteInventory operation.

" }, "NextToken":{ "shape":"NextToken", @@ -5725,7 +5725,7 @@ }, "Filters":{ "shape":"MaintenanceWindowFilterList", - "documentation":"

Optional filters used to scope down the returned task invocations. The supported filter key is STATUS with the corresponding values PENDING, IN_PROGRESS, SUCCESS, FAILED, TIMED_OUT, CANCELLING, and CANCELLED.

" + "documentation":"

Optional filters used to scope down the returned task invocations. The supported filter key is STATUS with the corresponding values PENDING, IN_PROGRESS, SUCCESS, FAILED, TIMED_OUT, CANCELLING, and CANCELLED.

" }, "MaxResults":{ "shape":"MaintenanceWindowMaxResults", @@ -5761,7 +5761,7 @@ }, "Filters":{ "shape":"MaintenanceWindowFilterList", - "documentation":"

Optional filters used to scope down the returned tasks. The supported filter key is STATUS with the corresponding values PENDING, IN_PROGRESS, SUCCESS, FAILED, TIMED_OUT, CANCELLING, and CANCELLED.

" + "documentation":"

Optional filters used to scope down the returned tasks. The supported filter key is STATUS with the corresponding values PENDING, IN_PROGRESS, SUCCESS, FAILED, TIMED_OUT, CANCELLING, and CANCELLED.

" }, "MaxResults":{ "shape":"MaintenanceWindowMaxResults", @@ -5797,7 +5797,7 @@ }, "Filters":{ "shape":"MaintenanceWindowFilterList", - "documentation":"

Each entry in the array is a structure containing:

Key (string, between 1 and 128 characters)

Values (array of strings, each string is between 1 and 256 characters)

The supported Keys are ExecutedBefore and ExecutedAfter with the value being a date/time string such as 2016-11-04T05:00:00Z.

" + "documentation":"

Each entry in the array is a structure containing:

  • Key. A string between 1 and 128 characters. Supported keys include ExecutedBefore and ExecutedAfter.

  • Values. An array of strings, each between 1 and 256 characters. Supported values are date/time strings in a valid ISO 8601 date/time format, such as 2021-11-04T05:00:00Z.

" }, "MaxResults":{ "shape":"MaintenanceWindowMaxResults", @@ -5832,11 +5832,11 @@ }, "Targets":{ "shape":"Targets", - "documentation":"

The instance ID or key/value pair to retrieve information about.

" + "documentation":"

The instance ID or key-value pair to retrieve information about.

" }, "ResourceType":{ "shape":"MaintenanceWindowResourceType", - "documentation":"

The type of resource you want to retrieve information about. For example, \"INSTANCE\".

" + "documentation":"

The type of resource you want to retrieve information about. For example, INSTANCE.

" }, "Filters":{ "shape":"PatchOrchestratorFilterList", @@ -5876,7 +5876,7 @@ }, "Filters":{ "shape":"MaintenanceWindowFilterList", - "documentation":"

Optional filters that can be used to narrow down the scope of the returned window targets. The supported filter keys are Type, WindowTargetId and OwnerInformation.

" + "documentation":"

Optional filters that can be used to narrow down the scope of the returned window targets. The supported filter keys are Type, WindowTargetId, and OwnerInformation.

" }, "MaxResults":{ "shape":"MaintenanceWindowMaxResults", @@ -5912,7 +5912,7 @@ }, "Filters":{ "shape":"MaintenanceWindowFilterList", - "documentation":"

Optional filters used to narrow down the scope of the returned tasks. The supported filter keys are WindowTaskId, TaskArn, Priority, and TaskType.

" + "documentation":"

Optional filters used to narrow down the scope of the returned tasks. The supported filter keys are WindowTaskId, TaskArn, Priority, and TaskType.

" }, "MaxResults":{ "shape":"MaintenanceWindowMaxResults", @@ -5947,11 +5947,11 @@ "members":{ "Targets":{ "shape":"Targets", - "documentation":"

The instance ID or key/value pair to retrieve information about.

" + "documentation":"

The instance ID or key-value pair to retrieve information about.

" }, "ResourceType":{ "shape":"MaintenanceWindowResourceType", - "documentation":"

The type of resource you want to retrieve information about. For example, \"INSTANCE\".

" + "documentation":"

The type of resource you want to retrieve information about. For example, INSTANCE.

" }, "MaxResults":{ "shape":"MaintenanceWindowSearchMaxResults", @@ -5982,7 +5982,7 @@ "members":{ "Filters":{ "shape":"MaintenanceWindowFilterList", - "documentation":"

Optional filters used to narrow down the scope of the returned maintenance windows. Supported filter keys are Name and Enabled.

" + "documentation":"

Optional filters used to narrow down the scope of the returned maintenance windows. Supported filter keys are Name and Enabled. For example, Name=MyMaintenanceWindow and Enabled=True.

" }, "MaxResults":{ "shape":"MaintenanceWindowMaxResults", @@ -6013,7 +6013,7 @@ "members":{ "OpsItemFilters":{ "shape":"OpsItemFilters", - "documentation":"

One or more filters to limit the response.

  • Key: CreatedTime

    Operations: GreaterThan, LessThan

  • Key: LastModifiedBy

    Operations: Contains, Equals

  • Key: LastModifiedTime

    Operations: GreaterThan, LessThan

  • Key: Priority

    Operations: Equals

  • Key: Source

    Operations: Contains, Equals

  • Key: Status

    Operations: Equals

  • Key: Title

    Operations: Contains

  • Key: OperationalData*

    Operations: Equals

  • Key: OperationalDataKey

    Operations: Equals

  • Key: OperationalDataValue

    Operations: Equals, Contains

  • Key: OpsItemId

    Operations: Equals

  • Key: ResourceId

    Operations: Contains

  • Key: AutomationId

    Operations: Equals

*If you filter the response by using the OperationalData operator, specify a key-value pair by using the following JSON format: {\"key\":\"key_name\",\"value\":\"a_value\"}

" + "documentation":"

One or more filters to limit the response.

  • Key: CreatedTime

    Operations: GreaterThan, LessThan

  • Key: LastModifiedBy

    Operations: Contains, Equals

  • Key: LastModifiedTime

    Operations: GreaterThan, LessThan

  • Key: Priority

    Operations: Equals

  • Key: Source

    Operations: Contains, Equals

  • Key: Status

    Operations: Equals

  • Key: Title*

    Operations: Equals,Contains

  • Key: OperationalData**

    Operations: Equals

  • Key: OperationalDataKey

    Operations: Equals

  • Key: OperationalDataValue

    Operations: Equals, Contains

  • Key: OpsItemId

    Operations: Equals

  • Key: ResourceId

    Operations: Contains

  • Key: AutomationId

    Operations: Equals

*The Equals operator for Title matches the first 100 characters. If you specify more than 100 characters, they system returns an error that the filter value exceeds the length limit.

**If you filter the response by using the OperationalData operator, specify a key-value pair by using the following JSON format: {\"key\":\"key_name\",\"value\":\"a_value\"}

" }, "MaxResults":{ "shape":"OpsItemMaxResults", @@ -6078,7 +6078,7 @@ "members":{ "Filters":{ "shape":"PatchOrchestratorFilterList", - "documentation":"

Each element in the array is a structure containing:

Key: (string, \"NAME_PREFIX\" or \"OWNER\")

Value: (array of strings, exactly 1 entry, between 1 and 255 characters)

" + "documentation":"

Each element in the array is a structure containing a key-value pair.

Supported keys for DescribePatchBaselines include the following:

  • NAME_PREFIX

    Sample values: AWS- | My-

  • OWNER

    Sample values: AWS | Self

  • OPERATING_SYSTEM

    Sample values: AMAZON_LINUX | SUSE | WINDOWS

" }, "MaxResults":{ "shape":"PatchBaselineMaxResults", @@ -6096,7 +6096,7 @@ "members":{ "BaselineIdentities":{ "shape":"PatchBaselineIdentityList", - "documentation":"

An array of PatchBaselineIdentity elements.

" + "documentation":"

An array of PatchBaselineIdentity elements.

" }, "NextToken":{ "shape":"NextToken", @@ -6131,12 +6131,12 @@ }, "InstancesWithInstalledPendingRebootPatches":{ "shape":"InstancesCount", - "documentation":"

The number of instances with patches installed by Patch Manager that have not been rebooted after the patch installation. The status of these instances is NON_COMPLIANT.

", + "documentation":"

The number of instances with patches installed by Patch Manager that haven't been rebooted after the patch installation. The status of these instances is NON_COMPLIANT.

", "box":true }, "InstancesWithInstalledRejectedPatches":{ "shape":"InstancesCount", - "documentation":"

The number of instances with patches installed that are specified in a RejectedPatches list. Patches with a status of INSTALLED_REJECTED were typically installed before they were added to a RejectedPatches list.

If ALLOW_AS_DEPENDENCY is the specified option for RejectedPatchesAction, the value of InstancesWithInstalledRejectedPatches will always be 0 (zero).

", + "documentation":"

The number of instances with patches installed that are specified in a RejectedPatches list. Patches with a status of INSTALLED_REJECTED were typically installed before they were added to a RejectedPatches list.

If ALLOW_AS_DEPENDENCY is the specified option for RejectedPatchesAction, the value of InstancesWithInstalledRejectedPatches will always be 0 (zero).

", "box":true }, "InstancesWithMissingPatches":{ @@ -6153,22 +6153,22 @@ }, "InstancesWithUnreportedNotApplicablePatches":{ "shape":"Integer", - "documentation":"

The number of instances with NotApplicable patches beyond the supported limit, which are not reported by name to Systems Manager Inventory.

", + "documentation":"

The number of instances with NotApplicable patches beyond the supported limit, which aren't reported by name to Inventory. Inventory is a capability of Amazon Web Services Systems Manager.

", "box":true }, "InstancesWithCriticalNonCompliantPatches":{ "shape":"InstancesCount", - "documentation":"

The number of instances where patches that are specified as \"Critical\" for compliance reporting in the patch baseline are not installed. These patches might be missing, have failed installation, were rejected, or were installed but awaiting a required instance reboot. The status of these instances is NON_COMPLIANT.

", + "documentation":"

The number of instances where patches that are specified as Critical for compliance reporting in the patch baseline aren't installed. These patches might be missing, have failed installation, were rejected, or were installed but awaiting a required instance reboot. The status of these instances is NON_COMPLIANT.

", "box":true }, "InstancesWithSecurityNonCompliantPatches":{ "shape":"InstancesCount", - "documentation":"

The number of instances where patches that are specified as \"Security\" in a patch advisory are not installed. These patches might be missing, have failed installation, were rejected, or were installed but awaiting a required instance reboot. The status of these instances is NON_COMPLIANT.

", + "documentation":"

The number of instances where patches that are specified as Security in a patch advisory aren't installed. These patches might be missing, have failed installation, were rejected, or were installed but awaiting a required instance reboot. The status of these instances is NON_COMPLIANT.

", "box":true }, "InstancesWithOtherNonCompliantPatches":{ "shape":"InstancesCount", - "documentation":"

The number of instances with patches installed that are specified as other than \"Critical\" or \"Security\" but are not compliant with the patch baseline. The status of these instances is NON_COMPLIANT.

", + "documentation":"

The number of instances with patches installed that are specified as other than Critical or Security but aren't compliant with the patch baseline. The status of these instances is NON_COMPLIANT.

", "box":true } } @@ -6183,7 +6183,7 @@ }, "Filters":{ "shape":"PatchOrchestratorFilterList", - "documentation":"

One or more filters. Use a filter to return a more specific list of results.

For DescribePatchGroups,valid filter keys include the following:

  • NAME_PREFIX: The name of the patch group. Wildcards (*) are accepted.

  • OPERATING_SYSTEM: The supported operating system type to return results for. For valid operating system values, see GetDefaultPatchBaselineRequest$OperatingSystem in CreatePatchBaseline.

    Examples:

    • --filters Key=NAME_PREFIX,Values=MyPatchGroup*

    • --filters Key=OPERATING_SYSTEM,Values=AMAZON_LINUX_2

" + "documentation":"

Each element in the array is a structure containing a key-value pair.

Supported keys for DescribePatchGroups include the following:

  • NAME_PREFIX

    Sample values: AWS- | My-.

  • OPERATING_SYSTEM

    Sample values: AMAZON_LINUX | SUSE | WINDOWS

" }, "NextToken":{ "shape":"NextToken", @@ -6196,7 +6196,7 @@ "members":{ "Mappings":{ "shape":"PatchGroupPatchBaselineMappingList", - "documentation":"

Each entry in the array contains:

PatchGroup: string (between 1 and 256 characters, Regex: ^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$)

PatchBaselineIdentity: A PatchBaselineIdentity element.

" + "documentation":"

Each entry in the array contains:

  • PatchGroup: string (between 1 and 256 characters. Regex: ^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$)

  • PatchBaselineIdentity: A PatchBaselineIdentity element.

" }, "NextToken":{ "shape":"NextToken", @@ -6221,7 +6221,7 @@ }, "PatchSet":{ "shape":"PatchSet", - "documentation":"

Indicates whether to list patches for the Windows operating system or for Microsoft applications. Not applicable for the Linux or macOS operating systems.

" + "documentation":"

Indicates whether to list patches for the Windows operating system or for applications released by Microsoft. Not applicable for the Linux or macOS operating systems.

" }, "MaxResults":{ "shape":"MaxResults", @@ -6358,11 +6358,11 @@ }, "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the Systems Manager document.

" + "documentation":"

The name of the SSM document.

" }, "DisplayName":{ "shape":"DocumentDisplayName", - "documentation":"

The friendly name of the Systems Manager document. This value can differ for each version of the document. If you want to update this value, see UpdateDocument.

" + "documentation":"

The friendly name of the SSM document. This value can differ for each version of the document. If you want to update this value, see UpdateDocument.

" }, "VersionName":{ "shape":"DocumentVersionName", @@ -6370,7 +6370,7 @@ }, "Owner":{ "shape":"DocumentOwner", - "documentation":"

The AWS user account that created the document.

" + "documentation":"

The Amazon Web Services user account that created the document.

" }, "CreatedDate":{ "shape":"DateTime", @@ -6378,11 +6378,11 @@ }, "Status":{ "shape":"DocumentStatus", - "documentation":"

The status of the Systems Manager document.

" + "documentation":"

The status of the SSM document.

" }, "StatusInformation":{ "shape":"DocumentStatusInformation", - "documentation":"

A message returned by AWS Systems Manager that explains the Status value. For example, a Failed status might be explained by the StatusInformation message, \"The specified S3 bucket does not exist. Verify that the URL of the S3 bucket is correct.\"

" + "documentation":"

A message returned by Amazon Web Services Systems Manager that explains the Status value. For example, a Failed status might be explained by the StatusInformation message, \"The specified S3 bucket doesn't exist. Verify that the URL of the S3 bucket is correct.\"

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -6398,7 +6398,7 @@ }, "PlatformTypes":{ "shape":"PlatformTypeList", - "documentation":"

The list of OS platforms compatible with this Systems Manager document.

" + "documentation":"

The list of OS platforms compatible with this SSM document.

" }, "DocumentType":{ "shape":"DocumentType", @@ -6422,7 +6422,7 @@ }, "TargetType":{ "shape":"TargetType", - "documentation":"

The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid resource types, see AWS resource and property types reference in the AWS CloudFormation User Guide.

" + "documentation":"

The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid resource types, see Amazon Web Services resource and property types reference in the CloudFormation User Guide.

" }, "Tags":{ "shape":"TagList", @@ -6457,7 +6457,7 @@ "documentation":"

The current status of the review.

" } }, - "documentation":"

Describes a Systems Manager document.

" + "documentation":"

Describes a Amazon Web Services Systems Manager document (SSM document).

" }, "DocumentDisplayName":{ "type":"string", @@ -6524,23 +6524,23 @@ "members":{ "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the Systems Manager document.

" + "documentation":"

The name of the SSM document.

" }, "CreatedDate":{ "shape":"DateTime", - "documentation":"

The date the Systems Manager document was created.

" + "documentation":"

The date the SSM document was created.

" }, "DisplayName":{ "shape":"DocumentDisplayName", - "documentation":"

An optional field where you can specify a friendly name for the Systems Manager document. This value can differ for each version of the document. If you want to update this value, see UpdateDocument.

" + "documentation":"

An optional field where you can specify a friendly name for the SSM document. This value can differ for each version of the document. If you want to update this value, see UpdateDocument.

" }, "Owner":{ "shape":"DocumentOwner", - "documentation":"

The AWS user account that created the document.

" + "documentation":"

The Amazon Web Services user account that created the document.

" }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and cannot be changed.

" + "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and can't be changed.

" }, "PlatformTypes":{ "shape":"PlatformTypeList", @@ -6564,7 +6564,7 @@ }, "TargetType":{ "shape":"TargetType", - "documentation":"

The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid resource types, see AWS resource and property types reference in the AWS CloudFormation User Guide.

" + "documentation":"

The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid resource types, see Amazon Web Services resource and property types reference in the CloudFormation User Guide.

" }, "Tags":{ "shape":"TagList", @@ -6583,7 +6583,7 @@ "documentation":"

The user in your organization who created the document.

" } }, - "documentation":"

Describes the name of a Systems Manager document.

" + "documentation":"

Describes the name of a SSM document.

" }, "DocumentIdentifierList":{ "type":"list", @@ -6601,7 +6601,7 @@ "documentation":"

The value for the filter key.

" } }, - "documentation":"

One or more filters. Use a filter to return a more specific list of documents.

For keys, you can specify one or more tags that have been applied to a document.

You can also use AWS-provided keys, some of which have specific allowed values. These keys and their associated values are as follows:

DocumentType
  • ApplicationConfiguration

  • ApplicationConfigurationSchema

  • Automation

  • ChangeCalendar

  • Command

  • DeploymentStrategy

  • Package

  • Policy

  • Session

Owner

Note that only one Owner can be specified in a request. For example: Key=Owner,Values=Self.

  • Amazon

  • Private

  • Public

  • Self

  • ThirdParty

PlatformTypes
  • Linux

  • Windows

Name is another AWS-provided key. If you use Name as a key, you can use a name prefix to return a list of documents. For example, in the AWS CLI, to return a list of all documents that begin with Te, run the following command:

aws ssm list-documents --filters Key=Name,Values=Te

You can also use the TargetType AWS-provided key. For a list of valid resource type values that can be used with this key, see AWS resource and property types reference in the AWS CloudFormation User Guide.

If you specify more than two keys, only documents that are identified by all the tags are returned in the results. If you specify more than two values for a key, documents that are identified by any of the values are returned in the results.

To specify a custom key and value pair, use the format Key=tag:tagName,Values=valueName.

For example, if you created a key called region and are using the AWS CLI to call the list-documents command:

aws ssm list-documents --filters Key=tag:region,Values=east,west Key=Owner,Values=Self

" + "documentation":"

One or more filters. Use a filter to return a more specific list of documents.

For keys, you can specify one or more tags that have been applied to a document.

You can also use Amazon Web Services-provided keys, some of which have specific allowed values. These keys and their associated values are as follows:

DocumentType
  • ApplicationConfiguration

  • ApplicationConfigurationSchema

  • Automation

  • ChangeCalendar

  • Command

  • DeploymentStrategy

  • Package

  • Policy

  • Session

Owner

Note that only one Owner can be specified in a request. For example: Key=Owner,Values=Self.

  • Amazon

  • Private

  • Public

  • Self

  • ThirdParty

PlatformTypes
  • Linux

  • Windows

Name is another Amazon Web Services-provided key. If you use Name as a key, you can use a name prefix to return a list of documents. For example, in the Amazon Web Services CLI, to return a list of all documents that begin with Te, run the following command:

aws ssm list-documents --filters Key=Name,Values=Te

You can also use the TargetType Amazon Web Services-provided key. For a list of valid resource type values that can be used with this key, see Amazon Web Services resource and property types reference in the CloudFormation User Guide.

If you specify more than two keys, only documents that are identified by all the tags are returned in the results. If you specify more than two values for a key, documents that are identified by any of the values are returned in the results.

To specify a custom key-value pair, use the format Key=tag:tagName,Values=valueName.

For example, if you created a key called region and are using the Amazon Web Services CLI to call the list-documents command:

aws ssm list-documents --filters Key=tag:region,Values=east,west Key=Owner,Values=Self

" }, "DocumentKeyValuesFilterKey":{ "type":"string", @@ -6628,7 +6628,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

You can have at most 500 active Systems Manager documents.

", + "documentation":"

You can have at most 500 active SSM documents.

", "exception":true }, "DocumentMetadataEnum":{ @@ -6691,7 +6691,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The document cannot be shared with more AWS user accounts. You can share a document with a maximum of 20 accounts. You can publicly share up to five documents. If you need to increase this limit, contact AWS Support.

", + "documentation":"

The document can't be shared with more Amazon Web Services user accounts. You can share a document with a maximum of 20 accounts. You can publicly share up to five documents. If you need to increase this limit, contact Amazon Web Services Support.

", "exception":true }, "DocumentPermissionMaxResults":{ @@ -6854,7 +6854,7 @@ }, "DisplayName":{ "shape":"DocumentDisplayName", - "documentation":"

The friendly name of the Systems Manager document. This value can differ for each version of the document. If you want to update this value, see UpdateDocument.

" + "documentation":"

The friendly name of the SSM document. This value can differ for each version of the document. If you want to update this value, see UpdateDocument.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -6862,7 +6862,7 @@ }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

The version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and cannot be changed.

" + "documentation":"

The version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and can't be changed.

" }, "CreatedDate":{ "shape":"DateTime", @@ -6878,11 +6878,11 @@ }, "Status":{ "shape":"DocumentStatus", - "documentation":"

The status of the Systems Manager document, such as Creating, Active, Failed, and Deleting.

" + "documentation":"

The status of the SSM document, such as Creating, Active, Failed, and Deleting.

" }, "StatusInformation":{ "shape":"DocumentStatusInformation", - "documentation":"

A message returned by AWS Systems Manager that explains the Status value. For example, a Failed status might be explained by the StatusInformation message, \"The specified S3 bucket does not exist. Verify that the URL of the S3 bucket is correct.\"

" + "documentation":"

A message returned by Amazon Web Services Systems Manager that explains the Status value. For example, a Failed status might be explained by the StatusInformation message, \"The specified S3 bucket doesn't exist. Verify that the URL of the S3 bucket is correct.\"

" }, "ReviewStatus":{ "shape":"ReviewStatus", @@ -6917,7 +6917,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

Error returned when the ID specified for a resource, such as a maintenance window or Patch baseline, doesn't exist.

For information about resource quotas in Systems Manager, see Systems Manager service quotas in the AWS General Reference.

", + "documentation":"

Error returned when the ID specified for a resource, such as a maintenance window or patch baseline, doesn't exist.

For information about resource quotas in Amazon Web Services Systems Manager, see Systems Manager service quotas in the Amazon Web Services General Reference.

", "exception":true }, "DryRun":{"type":"boolean"}, @@ -6941,7 +6941,7 @@ "type":"structure", "members":{ }, - "documentation":"

You cannot specify an instance ID in more than one association.

", + "documentation":"

You can't specify an instance ID in more than one association.

", "exception":true }, "EffectiveInstanceAssociationMaxResults":{ @@ -6961,7 +6961,7 @@ "documentation":"

The status of the patch in a patch baseline. This includes information about whether the patch is currently approved, due to be approved by a rule, explicitly approved, or explicitly rejected and the date the patch was or will be approved.

" } }, - "documentation":"

The EffectivePatch structure defines metadata about a patch along with the approval state of the patch in a particular patch baseline. The approval state includes information about whether the patch is currently approved, due to be approved by a rule, explicitly approved, or explicitly rejected and the date the patch was or will be approved.

" + "documentation":"

The EffectivePatch structure defines metadata about a patch along with the approval state of the patch in a particular patch baseline. The approval state includes information about whether the patch is currently approved, due to be approved by a rule, explicitly approved, or explicitly rejected and the date the patch was or will be approved.

" }, "EffectivePatchList":{ "type":"list", @@ -7035,7 +7035,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

You attempted to register a LAMBDA or STEP_FUNCTIONS task in a region where the corresponding service is not available.

", + "documentation":"

You attempted to register a LAMBDA or STEP_FUNCTIONS task in a region where the corresponding service isn't available.

", "exception":true }, "GetAutomationExecutionRequest":{ @@ -7044,7 +7044,7 @@ "members":{ "AutomationExecutionId":{ "shape":"AutomationExecutionId", - "documentation":"

The unique identifier for an existing automation execution to examine. The execution ID is returned by StartAutomationExecution when the execution of an Automation document is initiated.

" + "documentation":"

The unique identifier for an existing automation execution to examine. The execution ID is returned by StartAutomationExecution when the execution of an Automation runbook is initiated.

" } } }, @@ -7063,11 +7063,11 @@ "members":{ "CalendarNames":{ "shape":"CalendarNameOrARNList", - "documentation":"

The names or Amazon Resource Names (ARNs) of the Systems Manager documents that represent the calendar entries for which you want to get the state.

" + "documentation":"

The names or Amazon Resource Names (ARNs) of the Systems Manager documents (SSM documents) that represent the calendar entries for which you want to get the state.

" }, "AtTime":{ "shape":"ISO8601String", - "documentation":"

(Optional) The specific time for which you want to get calendar state information, in ISO 8601 format. If you do not add AtTime, the current time is assumed.

" + "documentation":"

(Optional) The specific time for which you want to get calendar state information, in ISO 8601 format. If you don't specify a value or AtTime, the current time is used.

" } } }, @@ -7076,11 +7076,11 @@ "members":{ "State":{ "shape":"CalendarState", - "documentation":"

The state of the calendar. An OPEN calendar indicates that actions are allowed to proceed, and a CLOSED calendar indicates that actions are not allowed to proceed.

" + "documentation":"

The state of the calendar. An OPEN calendar indicates that actions are allowed to proceed, and a CLOSED calendar indicates that actions aren't allowed to proceed.

" }, "AtTime":{ "shape":"ISO8601String", - "documentation":"

The time, as an ISO 8601 string, that you specified in your command. If you did not specify a time, GetCalendarState uses the current time.

" + "documentation":"

The time, as an ISO 8601 string, that you specified in your command. If you don't specify a time, GetCalendarState uses the current time.

" }, "NextTransitionTime":{ "shape":"ISO8601String", @@ -7101,11 +7101,11 @@ }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

(Required) The ID of the managed instance targeted by the command. A managed instance can be an Amazon Elastic Compute Cloud (Amazon EC2) instance or an instance in your hybrid environment that is configured for AWS Systems Manager.

" + "documentation":"

(Required) The ID of the managed instance targeted by the command. A managed instance can be an Amazon Elastic Compute Cloud (Amazon EC2) instance or an instance in your hybrid environment that is configured for Amazon Web Services Systems Manager.

" }, "PluginName":{ "shape":"CommandPluginName", - "documentation":"

The name of the plugin for which you want detailed results. If the document contains only one plugin, you can omit the name and details for that plugin. If the document contains more than one plugin, you must specify the name of the plugin for which you want to view details.

Plugin names are also referred to as step names in Systems Manager documents. For example, aws:RunShellScript is a plugin.

To find the PluginName, check the document content and find the name of the plugin. Alternatively, use ListCommandInvocations with the CommandId and Details parameters. The PluginName is the Name attribute of the CommandPlugin object in the CommandPlugins list.

" + "documentation":"

The name of the plugin for which you want detailed results. If the document contains only one plugin, you can omit the name and details for that plugin. If the document contains more than one plugin, you must specify the name of the plugin for which you want to view details.

Plugin names are also referred to as step names in Systems Manager documents (SSM documents). For example, aws:RunShellScript is a plugin.

To find the PluginName, check the document content and find the name of the plugin. Alternatively, use ListCommandInvocations with the CommandId and Details parameters. The PluginName is the Name attribute of the CommandPlugin object in the CommandPlugins list.

" } } }, @@ -7130,7 +7130,7 @@ }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The SSM document version used in the request.

" + "documentation":"

The Systems Manager document (SSM document) version used in the request.

" }, "PluginName":{ "shape":"CommandPluginName", @@ -7138,11 +7138,11 @@ }, "ResponseCode":{ "shape":"ResponseCode", - "documentation":"

The error level response code for the plugin script. If the response code is -1, then the command has not started running on the instance, or it was not received by the instance.

" + "documentation":"

The error level response code for the plugin script. If the response code is -1, then the command hasn't started running on the instance, or it wasn't received by the instance.

" }, "ExecutionStartDateTime":{ "shape":"StringDateTime", - "documentation":"

The date and time the plugin started running. Date and time are written in ISO 8601 format. For example, June 7, 2017 is represented as 2017-06-7. The following sample AWS CLI command uses the InvokedBefore filter.

aws ssm list-commands --filters key=InvokedBefore,value=2017-06-07T00:00:00Z

If the plugin has not started to run, the string is empty.

" + "documentation":"

The date and time the plugin started running. Date and time are written in ISO 8601 format. For example, June 7, 2017 is represented as 2017-06-7. The following sample Amazon Web Services CLI command uses the InvokedBefore filter.

aws ssm list-commands --filters key=InvokedBefore,value=2017-06-07T00:00:00Z

If the plugin hasn't started to run, the string is empty.

" }, "ExecutionElapsedTime":{ "shape":"StringDateTime", @@ -7150,7 +7150,7 @@ }, "ExecutionEndDateTime":{ "shape":"StringDateTime", - "documentation":"

The date and time the plugin finished running. Date and time are written in ISO 8601 format. For example, June 7, 2017 is represented as 2017-06-7. The following sample AWS CLI command uses the InvokedAfter filter.

aws ssm list-commands --filters key=InvokedAfter,value=2017-06-07T00:00:00Z

If the plugin has not started to run, the string is empty.

" + "documentation":"

The date and time the plugin finished running. Date and time are written in ISO 8601 format. For example, June 7, 2017 is represented as 2017-06-7. The following sample Amazon Web Services CLI command uses the InvokedAfter filter.

aws ssm list-commands --filters key=InvokedAfter,value=2017-06-07T00:00:00Z

If the plugin hasn't started to run, the string is empty.

" }, "Status":{ "shape":"CommandInvocationStatus", @@ -7158,27 +7158,27 @@ }, "StatusDetails":{ "shape":"StatusDetails", - "documentation":"

A detailed status of the command execution for an invocation. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding command statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Delayed: The system attempted to send the command to the target, but the target was not available. The instance might not be available because of network issues, because the instance was stopped, or for similar reasons. The system will try to send the command again.

  • Success: The command or plugin ran successfully. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: The command started to run on the instance, but the execution was not complete before the timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command wasn't run successfully on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" + "documentation":"

A detailed status of the command execution for an invocation. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding command statuses in the Amazon Web Services Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command hasn't been sent to the instance.

  • In Progress: The command has been sent to the instance but hasn't reached a terminal state.

  • Delayed: The system attempted to send the command to the target, but the target wasn't available. The instance might not be available because of network issues, because the instance was stopped, or for similar reasons. The system will try to send the command again.

  • Success: The command or plugin ran successfully. This is a terminal state.

  • Delivery Timed Out: The command wasn't delivered to the instance before the delivery timeout expired. Delivery timeouts don't count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: The command started to run on the instance, but the execution wasn't complete before the timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command wasn't run successfully on the instance. For a plugin, this indicates that the result code wasn't zero. For a command invocation, this indicates that the result code for one or more plugins wasn't zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" }, "StandardOutputContent":{ "shape":"StandardOutputContent", - "documentation":"

The first 24,000 characters written by the plugin to stdout. If the command has not finished running, if ExecutionStatus is neither Succeeded nor Failed, then this string is empty.

" + "documentation":"

The first 24,000 characters written by the plugin to stdout. If the command hasn't finished running, if ExecutionStatus is neither Succeeded nor Failed, then this string is empty.

" }, "StandardOutputUrl":{ "shape":"Url", - "documentation":"

The URL for the complete text written by the plugin to stdout in Amazon Simple Storage Service (Amazon S3). If an S3 bucket was not specified, then this string is empty.

" + "documentation":"

The URL for the complete text written by the plugin to stdout in Amazon Simple Storage Service (Amazon S3). If an S3 bucket wasn't specified, then this string is empty.

" }, "StandardErrorContent":{ "shape":"StandardErrorContent", - "documentation":"

The first 8,000 characters written by the plugin to stderr. If the command has not finished running, then this string is empty.

" + "documentation":"

The first 8,000 characters written by the plugin to stderr. If the command hasn't finished running, then this string is empty.

" }, "StandardErrorUrl":{ "shape":"Url", - "documentation":"

The URL for the complete text written by the plugin to stderr. If the command has not finished running, then this string is empty.

" + "documentation":"

The URL for the complete text written by the plugin to stderr. If the command hasn't finished running, then this string is empty.

" }, "CloudWatchOutputConfig":{ "shape":"CloudWatchOutputConfig", - "documentation":"

CloudWatch Logs information where Systems Manager sent the command output.

" + "documentation":"

Amazon CloudWatch Logs information where Systems Manager sent the command output.

" } } }, @@ -7188,7 +7188,7 @@ "members":{ "Target":{ "shape":"SessionTarget", - "documentation":"

The ID of the instance.

" + "documentation":"

The instance ID.

" } } }, @@ -7240,7 +7240,7 @@ }, "SnapshotId":{ "shape":"SnapshotId", - "documentation":"

The user-defined snapshot ID.

" + "documentation":"

The snapshot ID provided by the user when running AWS-RunPatchBaseline.

" }, "BaselineOverride":{ "shape":"BaselineOverride", @@ -7253,7 +7253,7 @@ "members":{ "InstanceId":{ "shape":"InstanceId", - "documentation":"

The ID of the instance.

" + "documentation":"

The instance ID.

" }, "SnapshotId":{ "shape":"SnapshotId", @@ -7261,7 +7261,7 @@ }, "SnapshotDownloadUrl":{ "shape":"SnapshotDownloadUrl", - "documentation":"

A pre-signed Amazon S3 URL that can be used to download the patch snapshot.

" + "documentation":"

A pre-signed Amazon Simple Storage Service (Amazon S3) URL that can be used to download the patch snapshot.

" }, "Product":{ "shape":"Product", @@ -7275,7 +7275,7 @@ "members":{ "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the Systems Manager document.

" + "documentation":"

The name of the SSM document.

" }, "VersionName":{ "shape":"DocumentVersionName", @@ -7296,19 +7296,19 @@ "members":{ "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the Systems Manager document.

" + "documentation":"

The name of the SSM document.

" }, "CreatedDate":{ "shape":"DateTime", - "documentation":"

The date the Systems Manager document was created.

" + "documentation":"

The date the SSM document was created.

" }, "DisplayName":{ "shape":"DocumentDisplayName", - "documentation":"

The friendly name of the Systems Manager document. This value can differ for each version of the document. If you want to update this value, see UpdateDocument.

" + "documentation":"

The friendly name of the SSM document. This value can differ for each version of the document. If you want to update this value, see UpdateDocument.

" }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

The version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and cannot be changed.

" + "documentation":"

The version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and can't be changed.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -7316,15 +7316,15 @@ }, "Status":{ "shape":"DocumentStatus", - "documentation":"

The status of the Systems Manager document, such as Creating, Active, Updating, Failed, and Deleting.

" + "documentation":"

The status of the SSM document, such as Creating, Active, Updating, Failed, and Deleting.

" }, "StatusInformation":{ "shape":"DocumentStatusInformation", - "documentation":"

A message returned by AWS Systems Manager that explains the Status value. For example, a Failed status might be explained by the StatusInformation message, \"The specified S3 bucket does not exist. Verify that the URL of the S3 bucket is correct.\"

" + "documentation":"

A message returned by Amazon Web Services Systems Manager that explains the Status value. For example, a Failed status might be explained by the StatusInformation message, \"The specified S3 bucket doesn't exist. Verify that the URL of the S3 bucket is correct.\"

" }, "Content":{ "shape":"DocumentContent", - "documentation":"

The contents of the Systems Manager document.

" + "documentation":"

The contents of the SSM document.

" }, "DocumentType":{ "shape":"DocumentType", @@ -7459,7 +7459,7 @@ }, "StatusDetails":{ "shape":"MaintenanceWindowExecutionStatusDetails", - "documentation":"

The details explaining the Status. Only available for certain status values.

" + "documentation":"

The details explaining the status. Not available for all status values.

" }, "StartTime":{ "shape":"DateTime", @@ -7514,7 +7514,7 @@ }, "TaskType":{ "shape":"MaintenanceWindowTaskType", - "documentation":"

Retrieves the task type for a maintenance window. Task types include the following: LAMBDA, STEP_FUNCTIONS, AUTOMATION, RUN_COMMAND.

" + "documentation":"

Retrieves the task type for a maintenance window.

" }, "Parameters":{ "shape":"MaintenanceWindowExecutionTaskInvocationParameters", @@ -7538,7 +7538,7 @@ }, "OwnerInformation":{ "shape":"OwnerInformation", - "documentation":"

User-provided value to be included in any CloudWatch events raised while running tasks for these targets in this maintenance window.

" + "documentation":"

User-provided value to be included in any Amazon CloudWatch Events or Amazon EventBridge events raised while running tasks for these targets in this maintenance window.

" }, "WindowTargetId":{ "shape":"MaintenanceWindowTaskTargetId", @@ -7576,7 +7576,7 @@ }, "TaskArn":{ "shape":"MaintenanceWindowTaskArn", - "documentation":"

The ARN of the task that ran.

" + "documentation":"

The Amazon Resource Name (ARN) of the task that ran.

" }, "ServiceRole":{ "shape":"ServiceRole", @@ -7588,7 +7588,7 @@ }, "TaskParameters":{ "shape":"MaintenanceWindowTaskParametersList", - "documentation":"

The parameters passed to the task when it was run.

TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

The map has the following format:

Key: string, between 1 and 255 characters

Value: an array of strings, each string is between 1 and 255 characters

" + "documentation":"

The parameters passed to the task when it was run.

TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

The map has the following format:

  • Key: string, between 1 and 255 characters

  • Value: an array of strings, each between 1 and 255 characters

" }, "Priority":{ "shape":"MaintenanceWindowTaskPriority", @@ -7608,7 +7608,7 @@ }, "StatusDetails":{ "shape":"MaintenanceWindowExecutionStatusDetails", - "documentation":"

The details explaining the Status. Only available for certain status values.

" + "documentation":"

The details explaining the status. Not available for all status values.

" }, "StartTime":{ "shape":"DateTime", @@ -7647,11 +7647,11 @@ }, "StartDate":{ "shape":"MaintenanceWindowStringDateTime", - "documentation":"

The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become active. The maintenance window will not run before this specified time.

" + "documentation":"

The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become active. The maintenance window won't run before this specified time.

" }, "EndDate":{ "shape":"MaintenanceWindowStringDateTime", - "documentation":"

The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become inactive. The maintenance window will not run after this specified time.

" + "documentation":"

The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become inactive. The maintenance window won't run after this specified time.

" }, "Schedule":{ "shape":"MaintenanceWindowSchedule", @@ -7663,7 +7663,7 @@ }, "ScheduleOffset":{ "shape":"MaintenanceWindowOffset", - "documentation":"

The number of days to wait to run a maintenance window after the scheduled CRON expression date and time.

", + "documentation":"

The number of days to wait to run a maintenance window after the scheduled cron expression date and time.

", "box":true }, "NextExecutionTime":{ @@ -7676,7 +7676,7 @@ }, "Cutoff":{ "shape":"MaintenanceWindowCutoff", - "documentation":"

The number of hours before the end of the maintenance window that Systems Manager stops scheduling new tasks for execution.

" + "documentation":"

The number of hours before the end of the maintenance window that Amazon Web Services Systems Manager stops scheduling new tasks for execution.

" }, "AllowUnassociatedTargets":{ "shape":"MaintenanceWindowAllowUnassociatedTargets", @@ -7730,11 +7730,11 @@ }, "TaskArn":{ "shape":"MaintenanceWindowTaskArn", - "documentation":"

The resource that the task used during execution. For RUN_COMMAND and AUTOMATION task types, the TaskArn is the Systems Manager Document name/ARN. For LAMBDA tasks, the value is the function name/ARN. For STEP_FUNCTIONS tasks, the value is the state machine ARN.

" + "documentation":"

The resource that the task used during execution. For RUN_COMMAND and AUTOMATION task types, the value of TaskArn is the SSM document name/ARN. For LAMBDA tasks, the value is the function name/ARN. For STEP_FUNCTIONS tasks, the value is the state machine ARN.

" }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The ARN of the IAM service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.

" + "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.

" }, "TaskType":{ "shape":"MaintenanceWindowTaskType", @@ -7754,15 +7754,15 @@ }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The maximum number of targets allowed to run this task in parallel.

For maintenance window tasks without a target specified, you cannot supply a value for this option. Instead, the system inserts a placeholder value of 1, which may be reported in the response to this command. This value does not affect the running of your task and can be ignored.

" + "documentation":"

The maximum number of targets allowed to run this task in parallel.

For maintenance window tasks without a target specified, you can't supply a value for this option. Instead, the system inserts a placeholder value of 1, which may be reported in the response to this command. This value doesn't affect the running of your task and can be ignored.

" }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The maximum number of errors allowed before the task stops being scheduled.

For maintenance window tasks without a target specified, you cannot supply a value for this option. Instead, the system inserts a placeholder value of 1, which may be reported in the response to this command. This value does not affect the running of your task and can be ignored.

" + "documentation":"

The maximum number of errors allowed before the task stops being scheduled.

For maintenance window tasks without a target specified, you can't supply a value for this option. Instead, the system inserts a placeholder value of 1, which may be reported in the response to this command. This value doesn't affect the running of your task and can be ignored.

" }, "LoggingInfo":{ "shape":"LoggingInfo", - "documentation":"

The location in Amazon S3 where the task results are logged.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" + "documentation":"

The location in Amazon Simple Storage Service (Amazon S3) where the task results are logged.

LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" }, "Name":{ "shape":"MaintenanceWindowName", @@ -7843,15 +7843,15 @@ }, "Filters":{ "shape":"OpsFilterList", - "documentation":"

Optional filters used to scope down the returned OpsItems.

" + "documentation":"

Optional filters used to scope down the returned OpsData.

" }, "Aggregators":{ "shape":"OpsAggregatorList", - "documentation":"

Optional aggregators that return counts of OpsItems based on one or more expressions.

" + "documentation":"

Optional aggregators that return counts of OpsData based on one or more expressions.

" }, "ResultAttributes":{ "shape":"OpsResultAttributeList", - "documentation":"

The OpsItem data type to return.

" + "documentation":"

The OpsData data type to return.

" }, "NextToken":{ "shape":"NextToken", @@ -7869,7 +7869,7 @@ "members":{ "Entities":{ "shape":"OpsEntityList", - "documentation":"

The list of aggregated and filtered OpsItems.

" + "documentation":"

The list of aggregated details and filtered OpsData.

" }, "NextToken":{ "shape":"NextToken", @@ -7887,7 +7887,7 @@ }, "WithDecryption":{ "shape":"Boolean", - "documentation":"

Return decrypted values for secure string parameters. This flag is ignored for String and StringList parameter types.

", + "documentation":"

Return decrypted values for secure string parameters. This flag is ignored for String and StringList parameter types.

", "box":true }, "MaxResults":{ @@ -7920,11 +7920,11 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

The name of the parameter you want to query.

" + "documentation":"

The name of the parameter you want to query.

To query by parameter label, use \"Name\": \"name:label\". To query by parameter version, use \"Name\": \"name:version\".

" }, "WithDecryption":{ "shape":"Boolean", - "documentation":"

Return decrypted values for secure string parameters. This flag is ignored for String and StringList parameter types.

", + "documentation":"

Return decrypted values for secure string parameters. This flag is ignored for String and StringList parameter types.

", "box":true } } @@ -7949,11 +7949,11 @@ "members":{ "Path":{ "shape":"PSParameterName", - "documentation":"

The hierarchy for the parameter. Hierarchies start with a forward slash (/). The hierachy is the parameter name except the last part of the parameter. For the API call to succeeed, the last part of the parameter name cannot be in the path. A parameter name hierarchy can have a maximum of 15 levels. Here is an example of a hierarchy: /Finance/Prod/IAD/WinServ2016/license33

" + "documentation":"

The hierarchy for the parameter. Hierarchies start with a forward slash (/). The hierachy is the parameter name except the last part of the parameter. For the API call to succeeed, the last part of the parameter name can't be in the path. A parameter name hierarchy can have a maximum of 15 levels. Here is an example of a hierarchy: /Finance/Prod/IAD/WinServ2016/license33

" }, "Recursive":{ "shape":"Boolean", - "documentation":"

Retrieve all parameters within a hierarchy.

If a user has access to a path, then the user can access all levels of that path. For example, if a user has permission to access path /a, then the user can also access /a/b. Even if a user has explicitly been denied access in IAM for parameter /a/b, they can still call the GetParametersByPath API action recursively for /a and view /a/b.

", + "documentation":"

Retrieve all parameters within a hierarchy.

If a user has access to a path, then the user can access all levels of that path. For example, if a user has permission to access path /a, then the user can also access /a/b. Even if a user has explicitly been denied access in IAM for parameter /a/b, they can still call the GetParametersByPath API operation recursively for /a and view /a/b.

", "box":true }, "ParameterFilters":{ @@ -7995,11 +7995,11 @@ "members":{ "Names":{ "shape":"ParameterNameList", - "documentation":"

Names of the parameters for which you want to query information.

" + "documentation":"

Names of the parameters for which you want to query information.

To query by parameter label, use \"Name\": \"name:label\". To query by parameter version, use \"Name\": \"name:version\".

" }, "WithDecryption":{ "shape":"Boolean", - "documentation":"

Return decrypted secure string value. Return decrypted values for secure string parameters. This flag is ignored for String and StringList parameter types.

", + "documentation":"

Return decrypted secure string value. Return decrypted values for secure string parameters. This flag is ignored for String and StringList parameter types.

", "box":true } } @@ -8013,7 +8013,7 @@ }, "InvalidParameters":{ "shape":"ParameterNameList", - "documentation":"

A list of parameters that are not formatted correctly or do not run during an execution.

" + "documentation":"

A list of parameters that aren't formatted correctly or don't run during an execution.

" } } }, @@ -8054,7 +8054,7 @@ "members":{ "BaselineId":{ "shape":"BaselineId", - "documentation":"

The ID of the patch baseline to retrieve.

" + "documentation":"

The ID of the patch baseline to retrieve.

To retrieve information about an Amazon Web Services managed patch baseline, specify the full Amazon Resource Name (ARN) of the baseline. For example, for the baseline AWS-AmazonLinuxDefaultPatchBaseline, specify arn:aws:ssm:us-east-2:733109147000:patchbaseline/pb-0e392de35e7c563b7 instead of pb-0e392de35e7c563b7.

" } } }, @@ -8091,7 +8091,7 @@ }, "ApprovedPatchesEnableNonSecurity":{ "shape":"Boolean", - "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is 'false'. Applies to Linux instances only.

", + "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is false. Applies to Linux instances only.

", "box":true }, "RejectedPatches":{ @@ -8100,7 +8100,7 @@ }, "RejectedPatchesAction":{ "shape":"PatchAction", - "documentation":"

The action specified to take on patches included in the RejectedPatches list. A patch can be allowed only if it is a dependency of another package, or blocked entirely along with packages that include it as a dependency.

" + "documentation":"

The action specified to take on patches included in the RejectedPatches list. A patch can be allowed only if it is a dependency of another package, or blocked entirely along with packages that include it as a dependency.

" }, "PatchGroups":{ "shape":"PatchGroupList", @@ -8133,7 +8133,7 @@ "documentation":"

The ID of the service setting to get. The setting ID can be one of the following.

  • /ssm/automation/customer-script-log-destination

  • /ssm/automation/customer-script-log-group-name

  • /ssm/documents/console/public-sharing-permission

  • /ssm/parameter-store/default-parameter-tier

  • /ssm/parameter-store/high-throughput-enabled

  • /ssm/managed-instance/activation-tier

" } }, - "documentation":"

The request body of the GetServiceSetting API action.

" + "documentation":"

The request body of the GetServiceSetting API operation.

" }, "GetServiceSettingResult":{ "type":"structure", @@ -8143,17 +8143,17 @@ "documentation":"

The query result of the current service setting.

" } }, - "documentation":"

The query result body of the GetServiceSetting API action.

" + "documentation":"

The query result body of the GetServiceSetting API operation.

" }, "HierarchyLevelLimitExceededException":{ "type":"structure", "members":{ "message":{ "shape":"String", - "documentation":"

A hierarchy can have a maximum of 15 levels. For more information, see Requirements and constraints for parameter names in the AWS Systems Manager User Guide.

" + "documentation":"

A hierarchy can have a maximum of 15 levels. For more information, see Requirements and constraints for parameter names in the Amazon Web Services Systems Manager User Guide.

" } }, - "documentation":"

A hierarchy can have a maximum of 15 levels. For more information, see Requirements and constraints for parameter names in the AWS Systems Manager User Guide.

", + "documentation":"

A hierarchy can have a maximum of 15 levels. For more information, see Requirements and constraints for parameter names in the Amazon Web Services Systems Manager User Guide.

", "exception":true }, "HierarchyTypeMismatchException":{ @@ -8161,10 +8161,10 @@ "members":{ "message":{ "shape":"String", - "documentation":"

Parameter Store does not support changing a parameter type in a hierarchy. For example, you can't change a parameter from a String type to a SecureString type. You must create a new, unique parameter.

" + "documentation":"

Parameter Store doesn't support changing a parameter type in a hierarchy. For example, you can't change a parameter from a String type to a SecureString type. You must create a new, unique parameter.

" } }, - "documentation":"

Parameter Store does not support changing a parameter type in a hierarchy. For example, you can't change a parameter from a String type to a SecureString type. You must create a new, unique parameter.

", + "documentation":"

Parameter Store doesn't support changing a parameter type in a hierarchy. For example, you can't change a parameter from a String type to a SecureString type. You must create a new, unique parameter.

", "exception":true }, "IPAddress":{ @@ -8366,7 +8366,7 @@ }, "IsLatestVersion":{ "shape":"Boolean", - "documentation":"

Indicates whether the latest version of SSM Agent is running on your Linux Managed Instance. This field does not indicate whether or not the latest version is installed on Windows managed instances, because some older versions of Windows Server use the EC2Config service to process SSM requests.

", + "documentation":"

Indicates whether the latest version of SSM Agent is running on your Linux Managed Instance. This field doesn't indicate whether or not the latest version is installed on Windows managed instances, because some older versions of Windows Server use the EC2Config service to process Systems Manager requests.

", "box":true }, "PlatformType":{ @@ -8383,15 +8383,15 @@ }, "ActivationId":{ "shape":"ActivationId", - "documentation":"

The activation ID created by Systems Manager when the server or VM was registered.

" + "documentation":"

The activation ID created by Amazon Web Services Systems Manager when the server or virtual machine (VM) was registered.

" }, "IamRole":{ "shape":"IamRole", - "documentation":"

The Amazon Identity and Access Management (IAM) role assigned to the on-premises Systems Manager managed instance. This call does not return the IAM role for EC2 instances. To retrieve the IAM role for an EC2 instance, use the Amazon EC2 DescribeInstances action. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the AWS CLI Command Reference.

" + "documentation":"

The Identity and Access Management (IAM) role assigned to the on-premises Systems Manager managed instance. This call doesn't return the IAM role for Amazon Elastic Compute Cloud (Amazon EC2) instances. To retrieve the IAM role for an EC2 instance, use the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference.

" }, "RegistrationDate":{ "shape":"DateTime", - "documentation":"

The date the server or VM was registered with AWS as a managed instance.

", + "documentation":"

The date the server or VM was registered with Amazon Web Services as a managed instance.

", "box":true }, "ResourceType":{ @@ -8400,7 +8400,7 @@ }, "Name":{ "shape":"String", - "documentation":"

The name assigned to an on-premises server or virtual machine (VM) when it is activated as a Systems Manager managed instance. The name is specified as the DefaultInstanceName property using the CreateActivation command. It is applied to the managed instance by specifying the Activation Code and Activation ID when you install SSM Agent on the instance, as explained in Install SSM Agent for a hybrid environment (Linux) and Install SSM Agent for a hybrid environment (Windows). To retrieve the Name tag of an EC2 instance, use the Amazon EC2 DescribeInstances action. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the AWS CLI Command Reference.

" + "documentation":"

The name assigned to an on-premises server or virtual machine (VM) when it is activated as a Systems Manager managed instance. The name is specified as the DefaultInstanceName property using the CreateActivation command. It is applied to the managed instance by specifying the Activation Code and Activation ID when you install SSM Agent on the instance, as explained in Install SSM Agent for a hybrid environment (Linux) and Install SSM Agent for a hybrid environment (Windows). To retrieve the Name tag of an EC2 instance, use the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference.

" }, "IPAddress":{ "shape":"IPAddress", @@ -8445,7 +8445,7 @@ "documentation":"

The filter values.

" } }, - "documentation":"

Describes a filter for a specific list of instances. You can filter instances information by using tags. You specify tags by using a key-value mapping.

Use this action instead of the DescribeInstanceInformationRequest$InstanceInformationFilterList method. The InstanceInformationFilterList method is a legacy method and does not support tags.

" + "documentation":"

Describes a filter for a specific list of instances. You can filter instances information by using tags. You specify tags by using a key-value mapping.

Use this operation instead of the DescribeInstanceInformationRequest$InstanceInformationFilterList method. The InstanceInformationFilterList method is a legacy method and doesn't support tags.

" }, "InstanceInformationFilterKey":{ "type":"string", @@ -8488,7 +8488,7 @@ "members":{ "Key":{ "shape":"InstanceInformationStringFilterKey", - "documentation":"

The filter key name to describe your instances. For example:

\"InstanceIds\"|\"AgentVersion\"|\"PingStatus\"|\"PlatformTypes\"|\"ActivationIds\"|\"IamRole\"|\"ResourceType\"|\"AssociationStatus\"|\"Tag Key\"

Tag key is not a valid filter. You must specify either tag-key or tag:keyname and a string. Here are some valid examples: tag-key, tag:123, tag:al!, tag:Windows. Here are some invalid examples: tag-keys, Tag Key, tag:, tagKey, abc:keyname.

" + "documentation":"

The filter key name to describe your instances. For example:

\"InstanceIds\"|\"AgentVersion\"|\"PingStatus\"|\"PlatformTypes\"|\"ActivationIds\"|\"IamRole\"|\"ResourceType\"|\"AssociationStatus\"|\"Tag Key\"

Tag key isn't a valid filter. You must specify either tag-key or tag:keyname and a string. Here are some valid examples: tag-key, tag:123, tag:al!, tag:Windows. Here are some invalid examples: tag-keys, Tag Key, tag:, tagKey, abc:keyname.

" }, "Values":{ "shape":"InstanceInformationFilterValueSet", @@ -8535,7 +8535,7 @@ }, "InstallOverrideList":{ "shape":"InstallOverrideList", - "documentation":"

An https URL or an Amazon S3 path-style URL to a list of patches to be installed. This patch installation list, which you maintain in an S3 bucket in YAML format and specify in the SSM document AWS-RunPatchBaseline, overrides the patches specified by the default patch baseline.

For more information about the InstallOverrideList parameter, see About the SSM document AWS-RunPatchBaseline in the AWS Systems Manager User Guide.

" + "documentation":"

An https URL or an Amazon Simple Storage Service (Amazon S3) path-style URL to a list of patches to be installed. This patch installation list, which you maintain in an S3 bucket in YAML format and specify in the SSM document AWS-RunPatchBaseline, overrides the patches specified by the default patch baseline.

For more information about the InstallOverrideList parameter, see About the AWS-RunPatchBaseline SSM document in the Amazon Web Services Systems Manager User Guide.

" }, "OwnerInformation":{ "shape":"OwnerInformation", @@ -8556,7 +8556,7 @@ }, "InstalledRejectedCount":{ "shape":"PatchInstalledRejectedCount", - "documentation":"

The number of patches installed on an instance that are specified in a RejectedPatches list. Patches with a status of InstalledRejected were typically installed before they were added to a RejectedPatches list.

If ALLOW_AS_DEPENDENCY is the specified option for RejectedPatchesAction, the value of InstalledRejectedCount will always be 0 (zero).

", + "documentation":"

The number of patches installed on an instance that are specified in a RejectedPatches list. Patches with a status of InstalledRejected were typically installed before they were added to a RejectedPatches list.

If ALLOW_AS_DEPENDENCY is the specified option for RejectedPatchesAction, the value of InstalledRejectedCount will always be 0 (zero).

", "box":true }, "MissingCount":{ @@ -8569,7 +8569,7 @@ }, "UnreportedNotApplicableCount":{ "shape":"PatchUnreportedNotApplicableCount", - "documentation":"

The number of patches beyond the supported limit of NotApplicableCount that are not reported by name to Systems Manager Inventory.

", + "documentation":"

The number of patches beyond the supported limit of NotApplicableCount that aren't reported by name to Inventory. Inventory is a capability of Amazon Web Services Systems Manager.

", "box":true }, "NotApplicableCount":{ @@ -8586,7 +8586,7 @@ }, "Operation":{ "shape":"PatchOperationType", - "documentation":"

The type of patching operation that was performed: SCAN (assess patch compliance state) or INSTALL (install missing patches).

" + "documentation":"

The type of patching operation that was performed: or

  • SCAN assesses the patch compliance state.

  • INSTALL installs missing patches.

" }, "LastNoRebootInstallOperationTime":{ "shape":"DateTime", @@ -8594,21 +8594,21 @@ }, "RebootOption":{ "shape":"RebootOption", - "documentation":"

Indicates the reboot option specified in the patch baseline.

Reboot options apply to Install operations only. Reboots are not attempted for Patch Manager Scan operations.

  • RebootIfNeeded: Patch Manager tries to reboot the instance if it installed any patches, or if any patches are detected with a status of InstalledPendingReboot.

  • NoReboot: Patch Manager attempts to install missing packages without trying to reboot the system. Patches installed with this option are assigned a status of InstalledPendingReboot. These patches might not be in effect until a reboot is performed.

" + "documentation":"

Indicates the reboot option specified in the patch baseline.

Reboot options apply to Install operations only. Reboots aren't attempted for Patch Manager Scan operations.

  • RebootIfNeeded: Patch Manager tries to reboot the instance if it installed any patches, or if any patches are detected with a status of InstalledPendingReboot.

  • NoReboot: Patch Manager attempts to install missing packages without trying to reboot the system. Patches installed with this option are assigned a status of InstalledPendingReboot. These patches might not be in effect until a reboot is performed.

" }, "CriticalNonCompliantCount":{ "shape":"PatchCriticalNonCompliantCount", - "documentation":"

The number of instances where patches that are specified as \"Critical\" for compliance reporting in the patch baseline are not installed. These patches might be missing, have failed installation, were rejected, or were installed but awaiting a required instance reboot. The status of these instances is NON_COMPLIANT.

", + "documentation":"

The number of instances where patches that are specified as Critical for compliance reporting in the patch baseline aren't installed. These patches might be missing, have failed installation, were rejected, or were installed but awaiting a required instance reboot. The status of these instances is NON_COMPLIANT.

", "box":true }, "SecurityNonCompliantCount":{ "shape":"PatchSecurityNonCompliantCount", - "documentation":"

The number of instances where patches that are specified as \"Security\" in a patch advisory are not installed. These patches might be missing, have failed installation, were rejected, or were installed but awaiting a required instance reboot. The status of these instances is NON_COMPLIANT.

", + "documentation":"

The number of instances where patches that are specified as Security in a patch advisory aren't installed. These patches might be missing, have failed installation, were rejected, or were installed but awaiting a required instance reboot. The status of these instances is NON_COMPLIANT.

", "box":true }, "OtherNonCompliantCount":{ "shape":"PatchOtherNonCompliantCount", - "documentation":"

The number of instances with patches installed that are specified as other than \"Critical\" or \"Security\" but are not compliant with the patch baseline. The status of these instances is NON_COMPLIANT.

", + "documentation":"

The number of instances with patches installed that are specified as other than Critical or Security but aren't compliant with the patch baseline. The status of these instances is NON_COMPLIANT.

", "box":true } }, @@ -8624,18 +8624,18 @@ "members":{ "Key":{ "shape":"InstancePatchStateFilterKey", - "documentation":"

The key for the filter. Supported values are FailedCount, InstalledCount, InstalledOtherCount, MissingCount and NotApplicableCount.

" + "documentation":"

The key for the filter. Supported values include the following:

  • InstalledCount

  • InstalledOtherCount

  • InstalledPendingRebootCount

  • InstalledRejectedCount

  • MissingCount

  • FailedCount

  • UnreportedNotApplicableCount

  • NotApplicableCount

" }, "Values":{ "shape":"InstancePatchStateFilterValues", - "documentation":"

The value for the filter, must be an integer greater than or equal to 0.

" + "documentation":"

The value for the filter. Must be an integer greater than or equal to 0.

" }, "Type":{ "shape":"InstancePatchStateOperatorType", - "documentation":"

The type of comparison that should be performed for the value: Equal, NotEqual, LessThan or GreaterThan.

" + "documentation":"

The type of comparison that should be performed for the value.

" } }, - "documentation":"

Defines a filter used in DescribeInstancePatchStatesForPatchGroup used to scope down the information returned by the API.

" + "documentation":"

Defines a filter used in DescribeInstancePatchStatesForPatchGroup to scope down the information returned by the API.

Example: To filter for all instances in a patch group having more than three patches with a FailedCount status, use the following for the filter:

  • Value for Key: FailedCount

  • Value for Type: GreaterThan

  • Value for Values: 3

" }, "InstancePatchStateFilterKey":{ "type":"string", @@ -8694,7 +8694,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The activation is not valid. The activation might have been deleted, or the ActivationId and the ActivationCode do not match.

", + "documentation":"

The activation isn't valid. The activation might have been deleted, or the ActivationId and the ActivationCode don't match.

", "exception":true }, "InvalidActivationId":{ @@ -8702,7 +8702,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The activation ID is not valid. Verify the you entered the correct ActivationId or ActivationCode and try again.

", + "documentation":"

The activation ID isn't valid. Verify the you entered the correct ActivationId or ActivationCode and try again.

", "exception":true }, "InvalidAggregatorException":{ @@ -8710,7 +8710,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The specified aggregator is not valid for inventory groups. Verify that the aggregator uses a valid inventory type such as AWS:Application or AWS:InstanceInformation.

", + "documentation":"

The specified aggregator isn't valid for inventory groups. Verify that the aggregator uses a valid inventory type such as AWS:Application or AWS:InstanceInformation.

", "exception":true }, "InvalidAllowedPatternException":{ @@ -8718,10 +8718,10 @@ "members":{ "message":{ "shape":"String", - "documentation":"

The request does not meet the regular expression requirement.

" + "documentation":"

The request doesn't meet the regular expression requirement.

" } }, - "documentation":"

The request does not meet the regular expression requirement.

", + "documentation":"

The request doesn't meet the regular expression requirement.

", "exception":true }, "InvalidAssociation":{ @@ -8729,7 +8729,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The association is not valid or does not exist.

", + "documentation":"

The association isn't valid or doesn't exist.

", "exception":true }, "InvalidAssociationVersion":{ @@ -8737,7 +8737,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The version you specified is not valid. Use ListAssociationVersions to view all versions of an association according to the association ID. Or, use the $LATEST parameter to view the latest version of the association.

", + "documentation":"

The version you specified isn't valid. Use ListAssociationVersions to view all versions of an association according to the association ID. Or, use the $LATEST parameter to view the latest version of the association.

", "exception":true }, "InvalidAutomationExecutionParametersException":{ @@ -8745,7 +8745,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The supplied parameters for invoking the specified Automation document are incorrect. For example, they may not match the set of parameters permitted for the specified Automation document.

", + "documentation":"

The supplied parameters for invoking the specified Automation runbook are incorrect. For example, they may not match the set of parameters permitted for the specified Automation document.

", "exception":true }, "InvalidAutomationSignalException":{ @@ -8753,7 +8753,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The signal is not valid for the current Automation execution.

", + "documentation":"

The signal isn't valid for the current Automation execution.

", "exception":true }, "InvalidAutomationStatusUpdateException":{ @@ -8761,14 +8761,14 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The specified update status operation is not valid.

", + "documentation":"

The specified update status operation isn't valid.

", "exception":true }, "InvalidCommandId":{ "type":"structure", "members":{ }, - "documentation":"

The specified command ID is not valid. Verify the ID and try again.

", + "documentation":"

The specified command ID isn't valid. Verify the ID and try again.

", "exception":true }, "InvalidDeleteInventoryParametersException":{ @@ -8776,7 +8776,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

One or more of the parameters specified for the delete operation is not valid. Verify all parameters and try again.

", + "documentation":"

One or more of the parameters specified for the delete operation isn't valid. Verify all parameters and try again.

", "exception":true }, "InvalidDeletionIdException":{ @@ -8784,7 +8784,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The ID specified for the delete operation does not exist or is not valid. Verify the ID and try again.

", + "documentation":"

The ID specified for the delete operation doesn't exist or isn't valid. Verify the ID and try again.

", "exception":true }, "InvalidDocument":{ @@ -8792,10 +8792,10 @@ "members":{ "Message":{ "shape":"String", - "documentation":"

The document does not exist or the document is not available to the user. This exception can be issued by CreateAssociation, CreateAssociationBatch, DeleteAssociation, DeleteDocument, DescribeAssociation, DescribeDocument, GetDocument, SendCommand, or UpdateAssociationStatus.

" + "documentation":"

The SSM document doesn't exist or the document isn't available to the user. This exception can be issued by various API operations.

" } }, - "documentation":"

The specified document does not exist.

", + "documentation":"

The specified SSM document doesn't exist.

", "exception":true }, "InvalidDocumentContent":{ @@ -8806,7 +8806,7 @@ "documentation":"

A description of the validation error.

" } }, - "documentation":"

The content for the document is not valid.

", + "documentation":"

The content for the document isn't valid.

", "exception":true }, "InvalidDocumentOperation":{ @@ -8822,7 +8822,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The version of the document schema is not supported.

", + "documentation":"

The version of the document schema isn't supported.

", "exception":true }, "InvalidDocumentType":{ @@ -8830,7 +8830,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The document type is not valid. Valid document types are described in the DocumentType property.

", + "documentation":"

The SSM document type isn't valid. Valid document types are described in the DocumentType property.

", "exception":true }, "InvalidDocumentVersion":{ @@ -8838,7 +8838,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The document version is not valid or does not exist.

", + "documentation":"

The document version isn't valid or doesn't exist.

", "exception":true }, "InvalidFilter":{ @@ -8846,14 +8846,14 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The filter name is not valid. Verify the you entered the correct name and try again.

", + "documentation":"

The filter name isn't valid. Verify the you entered the correct name and try again.

", "exception":true }, "InvalidFilterKey":{ "type":"structure", "members":{ }, - "documentation":"

The specified key is not valid.

", + "documentation":"

The specified key isn't valid.

", "exception":true }, "InvalidFilterOption":{ @@ -8861,10 +8861,10 @@ "members":{ "message":{ "shape":"String", - "documentation":"

The specified filter option is not valid. Valid options are Equals and BeginsWith. For Path filter, valid options are Recursive and OneLevel.

" + "documentation":"

The specified filter option isn't valid. Valid options are Equals and BeginsWith. For Path filter, valid options are Recursive and OneLevel.

" } }, - "documentation":"

The specified filter option is not valid. Valid options are Equals and BeginsWith. For Path filter, valid options are Recursive and OneLevel.

", + "documentation":"

The specified filter option isn't valid. Valid options are Equals and BeginsWith. For Path filter, valid options are Recursive and OneLevel.

", "exception":true }, "InvalidFilterValue":{ @@ -8872,7 +8872,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The filter value is not valid. Verify the value and try again.

", + "documentation":"

The filter value isn't valid. Verify the value and try again.

", "exception":true }, "InvalidInstanceId":{ @@ -8880,7 +8880,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. Verify that SSM Agent is running.

SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

", + "documentation":"

The following problems can cause this exception:

  • You don't have permission to access the instance.

  • Amazon Web Services Systems Manager Agent(SSM Agent) isn't running. Verify that SSM Agent is running.

  • SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM Agent.

  • The instance isn't in valid state. Valid states are: Running, Pending, Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.

", "exception":true }, "InvalidInstanceInformationFilterValue":{ @@ -8888,7 +8888,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

The specified filter value is not valid.

", + "documentation":"

The specified filter value isn't valid.

", "exception":true }, "InvalidInventoryGroupException":{ @@ -8896,7 +8896,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The specified inventory group is not valid.

", + "documentation":"

The specified inventory group isn't valid.

", "exception":true }, "InvalidInventoryItemContextException":{ @@ -8912,7 +8912,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The request is not valid.

", + "documentation":"

The request isn't valid.

", "exception":true }, "InvalidItemContentException":{ @@ -8921,7 +8921,7 @@ "TypeName":{"shape":"InventoryItemTypeName"}, "Message":{"shape":"String"} }, - "documentation":"

One or more content items is not valid.

", + "documentation":"

One or more content items isn't valid.

", "exception":true }, "InvalidKeyId":{ @@ -8929,7 +8929,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

The query key ID is not valid.

", + "documentation":"

The query key ID isn't valid.

", "exception":true }, "InvalidNextToken":{ @@ -8937,7 +8937,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The specified token is not valid.

", + "documentation":"

The specified token isn't valid.

", "exception":true }, "InvalidNotificationConfig":{ @@ -8945,7 +8945,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

One or more configuration items is not valid. Verify that a valid Amazon Resource Name (ARN) was provided for an Amazon SNS topic.

", + "documentation":"

One or more configuration items isn't valid. Verify that a valid Amazon Resource Name (ARN) was provided for an Amazon Simple Notification Service topic.

", "exception":true }, "InvalidOptionException":{ @@ -8953,21 +8953,21 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The delete inventory option specified is not valid. Verify the option and try again.

", + "documentation":"

The delete inventory option specified isn't valid. Verify the option and try again.

", "exception":true }, "InvalidOutputFolder":{ "type":"structure", "members":{ }, - "documentation":"

The S3 bucket does not exist.

", + "documentation":"

The S3 bucket doesn't exist.

", "exception":true }, "InvalidOutputLocation":{ "type":"structure", "members":{ }, - "documentation":"

The output location is not valid or does not exist.

", + "documentation":"

The output location isn't valid or doesn't exist.

", "exception":true }, "InvalidParameters":{ @@ -8975,7 +8975,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

You must specify values for all required parameters in the Systems Manager document. You can only supply values to parameters defined in the Systems Manager document.

", + "documentation":"

You must specify values for all required parameters in the Amazon Web Services Systems Manager document (SSM document). You can only supply values to parameters defined in the SSM document.

", "exception":true }, "InvalidPermissionType":{ @@ -8983,14 +8983,14 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The permission type is not supported. Share is the only supported permission type.

", + "documentation":"

The permission type isn't supported. Share is the only supported permission type.

", "exception":true }, "InvalidPluginName":{ "type":"structure", "members":{ }, - "documentation":"

The plugin name is not valid.

", + "documentation":"

The plugin name isn't valid.

", "exception":true }, "InvalidPolicyAttributeException":{ @@ -9006,21 +9006,21 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

The policy type is not supported. Parameter Store supports the following policy types: Expiration, ExpirationNotification, and NoChangeNotification.

", + "documentation":"

The policy type isn't supported. Parameter Store supports the following policy types: Expiration, ExpirationNotification, and NoChangeNotification.

", "exception":true }, "InvalidResourceId":{ "type":"structure", "members":{ }, - "documentation":"

The resource ID is not valid. Verify that you entered the correct ID and try again.

", + "documentation":"

The resource ID isn't valid. Verify that you entered the correct ID and try again.

", "exception":true }, "InvalidResourceType":{ "type":"structure", "members":{ }, - "documentation":"

The resource type is not valid. For example, if you are attempting to tag an instance, the instance must be a registered, managed instance.

", + "documentation":"

The resource type isn't valid. For example, if you are attempting to tag an instance, the instance must be a registered, managed instance.

", "exception":true }, "InvalidResultAttributeException":{ @@ -9028,7 +9028,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The specified inventory item result attribute is not valid.

", + "documentation":"

The specified inventory item result attribute isn't valid.

", "exception":true }, "InvalidRole":{ @@ -9036,7 +9036,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The role name can't contain invalid characters. Also verify that you specified an IAM role for notifications that includes the required trust policy. For information about configuring the IAM role for Run Command notifications, see Configuring Amazon SNS Notifications for Run Command in the AWS Systems Manager User Guide.

", + "documentation":"

The role name can't contain invalid characters. Also verify that you specified an IAM role for notifications that includes the required trust policy. For information about configuring the IAM role for Run Command notifications, see Configuring Amazon SNS Notifications for Run Command in the Amazon Web Services Systems Manager User Guide.

", "exception":true }, "InvalidSchedule":{ @@ -9052,7 +9052,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The target is not valid or does not exist. It might not be configured for Systems Manager or you might not have permission to perform the operation.

", + "documentation":"

The target isn't valid or doesn't exist. It might not be configured for Systems Manager or you might not have permission to perform the operation.

", "exception":true }, "InvalidTypeNameException":{ @@ -9060,7 +9060,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The parameter type name is not valid.

", + "documentation":"

The parameter type name isn't valid.

", "exception":true }, "InvalidUpdate":{ @@ -9068,7 +9068,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The update is not valid.

", + "documentation":"

The update isn't valid.

", "exception":true }, "InventoryAggregator":{ @@ -9122,7 +9122,7 @@ "members":{ "DeletionId":{ "shape":"UUID", - "documentation":"

The deletion ID returned by the DeleteInventory action.

" + "documentation":"

The deletion ID returned by the DeleteInventory operation.

" }, "TypeName":{ "shape":"InventoryItemTypeName", @@ -9142,21 +9142,21 @@ }, "DeletionSummary":{ "shape":"InventoryDeletionSummary", - "documentation":"

Information about the delete operation. For more information about this summary, see Understanding the delete inventory summary in the AWS Systems Manager User Guide.

" + "documentation":"

Information about the delete operation. For more information about this summary, see Understanding the delete inventory summary in the Amazon Web Services Systems Manager User Guide.

" }, "LastStatusUpdateTime":{ "shape":"InventoryDeletionLastStatusUpdateTime", "documentation":"

The UTC timestamp of when the last status report.

" } }, - "documentation":"

Status information returned by the DeleteInventory action.

" + "documentation":"

Status information returned by the DeleteInventory operation.

" }, "InventoryDeletionSummary":{ "type":"structure", "members":{ "TotalCount":{ "shape":"TotalCount", - "documentation":"

The total number of items to delete. This count does not change during the delete operation.

" + "documentation":"

The total number of items to delete. This count doesn't change during the delete operation.

" }, "RemainingCount":{ "shape":"RemainingCount", @@ -9208,11 +9208,11 @@ }, "Values":{ "shape":"InventoryFilterValueList", - "documentation":"

Inventory filter values. Example: inventory filter where instance IDs are specified as values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g, i-1a2b3c4d5e6,Type=Equal

" + "documentation":"

Inventory filter values. Example: inventory filter where instance IDs are specified as values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g, i-1a2b3c4d5e6,Type=Equal.

" }, "Type":{ "shape":"InventoryQueryOperatorType", - "documentation":"

The type of filter.

The Exists filter must be used with aggregators. For more information, see Aggregating inventory data in the AWS Systems Manager User Guide.

" + "documentation":"

The type of filter.

The Exists filter must be used with aggregators. For more information, see Aggregating inventory data in the Amazon Web Services Systems Manager User Guide.

" } }, "documentation":"

One or more filters. Use a filter to return a more specific list of results.

" @@ -9274,7 +9274,7 @@ "members":{ "TypeName":{ "shape":"InventoryItemTypeName", - "documentation":"

The name of the inventory type. Default inventory item type names start with AWS. Custom inventory type names will start with Custom. Default inventory item types include the following: AWS:AWSComponent, AWS:Application, AWS:InstanceInformation, AWS:Network, and AWS:WindowsUpdate.

" + "documentation":"

The name of the inventory type. Default inventory item type names start with AWS. Custom inventory type names will start with Custom. Default inventory item types include the following: AWS:AWSComponent, AWS:Application, AWS:InstanceInformation, AWS:Network, and AWS:WindowsUpdate.

" }, "SchemaVersion":{ "shape":"InventoryItemSchemaVersion", @@ -9286,7 +9286,7 @@ }, "ContentHash":{ "shape":"InventoryItemContentHash", - "documentation":"

MD5 hash of the inventory item type contents. The content hash is used to determine whether to update inventory information. The PutInventory API does not update the inventory item type contents if the MD5 hash has not changed since last update.

" + "documentation":"

MD5 hash of the inventory item type contents. The content hash is used to determine whether to update inventory information. The PutInventory API doesn't update the inventory item type contents if the MD5 hash hasn't changed since last update.

" }, "Content":{ "shape":"InventoryItemEntryList", @@ -9367,7 +9367,7 @@ "members":{ "TypeName":{ "shape":"InventoryItemTypeName", - "documentation":"

The name of the inventory type. Default inventory item type names start with AWS. Custom inventory type names will start with Custom. Default inventory item types include the following: AWS:AWSComponent, AWS:Application, AWS:InstanceInformation, AWS:Network, and AWS:WindowsUpdate.

" + "documentation":"

The name of the inventory type. Default inventory item type names start with Amazon Web Services. Custom inventory type names will start with Custom. Default inventory item types include the following: AWS:AWSComponent, AWS:Application, AWS:InstanceInformation, AWS:Network, and AWS:WindowsUpdate.

" }, "Version":{ "shape":"InventoryItemSchemaVersion", @@ -9455,7 +9455,7 @@ }, "ContentHash":{ "shape":"InventoryItemContentHash", - "documentation":"

MD5 hash of the inventory item type contents. The content hash is used to determine whether to update inventory information. The PutInventory API does not update the inventory item type contents if the MD5 hash has not changed since last update.

" + "documentation":"

MD5 hash of the inventory item type contents. The content hash is used to determine whether to update inventory information. The PutInventory API doesn't update the inventory item type contents if the MD5 hash hasn't changed since last update.

" }, "Content":{ "shape":"InventoryItemEntryList", @@ -9482,7 +9482,7 @@ "type":"structure", "members":{ }, - "documentation":"

The command ID and instance ID you specified did not match any invocations. Verify the command ID and the instance ID and try again.

", + "documentation":"

The command ID and instance ID you specified didn't match any invocations. Verify the command ID and the instance ID and try again.

", "exception":true }, "InvocationTraceOutput":{ @@ -9539,7 +9539,7 @@ "members":{ "InvalidLabels":{ "shape":"ParameterLabelList", - "documentation":"

The label does not meet the requirements. For information about parameter label requirements, see Labeling parameters in the AWS Systems Manager User Guide.

" + "documentation":"

The label doesn't meet the requirements. For information about parameter label requirements, see Labeling parameters in the Amazon Web Services Systems Manager User Guide.

" }, "ParameterVersion":{ "shape":"PSParameterVersion", @@ -9595,7 +9595,7 @@ "members":{ "AssociationFilterList":{ "shape":"AssociationFilterList", - "documentation":"

One or more filters. Use a filter to return a more specific list of results.

Filtering associations using the InstanceID attribute only returns legacy associations created using the InstanceID attribute. Associations targeting the instance that are part of the Target Attributes ResourceGroup or Tags are not returned.

" + "documentation":"

One or more filters. Use a filter to return a more specific list of results.

Filtering associations using the InstanceID attribute only returns legacy associations created using the InstanceID attribute. Associations targeting the instance that are part of the Target Attributes ResourceGroup or Tags aren't returned.

" }, "MaxResults":{ "shape":"MaxResults", @@ -9647,7 +9647,7 @@ }, "Details":{ "shape":"Boolean", - "documentation":"

(Optional) If set this returns the response of the command executions and any command output. The default value is 'false'.

" + "documentation":"

(Optional) If set this returns the response of the command executions and any command output. The default value is false.

" } } }, @@ -9673,7 +9673,7 @@ }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

(Optional) Lists commands issued against this instance ID.

You can't specify an instance ID in the same command that you specify Status = Pending. This is because the command has not reached the instance yet.

" + "documentation":"

(Optional) Lists commands issued against this instance ID.

You can't specify an instance ID in the same command that you specify Status = Pending. This is because the command hasn't reached the instance yet.

" }, "MaxResults":{ "shape":"CommandMaxResults", @@ -9782,11 +9782,11 @@ "members":{ "Name":{ "shape":"DocumentName", - "documentation":"

The name of the document.

" + "documentation":"

The name of the change template.

" }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The version of the document.

" + "documentation":"

The version of the change template.

" }, "Metadata":{ "shape":"DocumentMetadataEnum", @@ -9808,19 +9808,19 @@ "members":{ "Name":{ "shape":"DocumentName", - "documentation":"

The name of the document.

" + "documentation":"

The name of the change template.

" }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The version of the document.

" + "documentation":"

The version of the change template.

" }, "Author":{ "shape":"DocumentAuthor", - "documentation":"

The user ID of the person in the organization who requested the document review.

" + "documentation":"

The user ID of the person in the organization who requested the review of the change template.

" }, "Metadata":{ "shape":"DocumentMetadataResponseInfo", - "documentation":"

Information about the response to the document approval request.

" + "documentation":"

Information about the response to the change template approval request.

" }, "NextToken":{ "shape":"NextToken", @@ -9869,7 +9869,7 @@ }, "Filters":{ "shape":"DocumentKeyValuesFilterList", - "documentation":"

One or more DocumentKeyValuesFilter objects. Use a filter to return a more specific list of results. For keys, you can specify one or more key-value pair tags that have been applied to a document. Other valid keys include Owner, Name, PlatformTypes, DocumentType, and TargetType. For example, to return documents you own use Key=Owner,Values=Self. To specify a custom key-value pair, use the format Key=tag:tagName,Values=valueName.

This API action only supports filtering documents by using a single tag key and one or more tag values. For example: Key=tag:tagName,Values=valueName1,valueName2

" + "documentation":"

One or more DocumentKeyValuesFilter objects. Use a filter to return a more specific list of results. For keys, you can specify one or more key-value pair tags that have been applied to a document. Other valid keys include Owner, Name, PlatformTypes, DocumentType, and TargetType. For example, to return documents you own use Key=Owner,Values=Self. To specify a custom key-value pair, use the format Key=tag:tagName,Values=valueName.

This API operation only supports filtering documents by using a single tag key and one or more tag values. For example: Key=tag:tagName,Values=valueName1,valueName2

" }, "MaxResults":{ "shape":"MaxResults", @@ -9887,7 +9887,7 @@ "members":{ "DocumentIdentifiers":{ "shape":"DocumentIdentifierList", - "documentation":"

The names of the Systems Manager documents.

" + "documentation":"

The names of the SSM documents.

" }, "NextToken":{ "shape":"NextToken", @@ -10090,7 +10090,7 @@ "members":{ "SyncType":{ "shape":"ResourceDataSyncType", - "documentation":"

View a list of resource data syncs according to the sync type. Specify SyncToDestination to view resource data syncs that synchronize data to an Amazon S3 bucket. Specify SyncFromSource to view resource data syncs from AWS Organizations or from multiple AWS Regions.

" + "documentation":"

View a list of resource data syncs according to the sync type. Specify SyncToDestination to view resource data syncs that synchronize data to an Amazon S3 bucket. Specify SyncFromSource to view resource data syncs from Organizations or from multiple Amazon Web Services Regions.

" }, "NextToken":{ "shape":"NextToken", @@ -10108,7 +10108,7 @@ "members":{ "ResourceDataSyncItems":{ "shape":"ResourceDataSyncItemList", - "documentation":"

A list of your current Resource Data Sync configurations and their statuses.

" + "documentation":"

A list of your current resource data sync configurations and their statuses.

" }, "NextToken":{ "shape":"NextToken", @@ -10159,10 +10159,10 @@ }, "S3Region":{ "shape":"S3Region", - "documentation":"

The Region where the S3 bucket is located.

" + "documentation":"

The Amazon Web Services Region where the S3 bucket is located.

" } }, - "documentation":"

Information about an S3 bucket to write instance-level logs to.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" + "documentation":"

Information about an Amazon Simple Storage Service (Amazon S3) bucket to write instance-level logs to.

LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" }, "Long":{"type":"long"}, "MaintenanceWindowAllowUnassociatedTargets":{"type":"boolean"}, @@ -10171,14 +10171,14 @@ "members":{ "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The version of an Automation document to use during task execution.

" + "documentation":"

The version of an Automation runbook to use during task execution.

" }, "Parameters":{ "shape":"AutomationParameterMap", - "documentation":"

The parameters for the AUTOMATION task.

For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

For AUTOMATION task types, Systems Manager ignores any values specified for these parameters.

" + "documentation":"

The parameters for the AUTOMATION task.

For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask.

LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

For AUTOMATION task types, Amazon Web Services Systems Manager ignores any values specified for these parameters.

" } }, - "documentation":"

The parameters for an AUTOMATION task type.

" + "documentation":"

The parameters for an AUTOMATION task type.

" }, "MaintenanceWindowCutoff":{ "type":"integer", @@ -10214,7 +10214,7 @@ }, "StatusDetails":{ "shape":"MaintenanceWindowExecutionStatusDetails", - "documentation":"

The details explaining the Status. Only available for certain status values.

" + "documentation":"

The details explaining the status. Not available for all status values.

" }, "StartTime":{ "shape":"DateTime", @@ -10283,7 +10283,7 @@ }, "StatusDetails":{ "shape":"MaintenanceWindowExecutionStatusDetails", - "documentation":"

The details explaining the status of the task execution. Only available for certain status values.

" + "documentation":"

The details explaining the status of the task execution. Not available for all status values.

" }, "StartTime":{ "shape":"DateTime", @@ -10295,7 +10295,7 @@ }, "TaskArn":{ "shape":"MaintenanceWindowTaskArn", - "documentation":"

The ARN of the task that ran.

" + "documentation":"

The Amazon Resource Name (ARN) of the task that ran.

" }, "TaskType":{ "shape":"MaintenanceWindowTaskType", @@ -10331,7 +10331,7 @@ }, "ExecutionId":{ "shape":"MaintenanceWindowExecutionTaskExecutionId", - "documentation":"

The ID of the action performed in the service that actually handled the task invocation. If the task type is RUN_COMMAND, this value is the command ID.

" + "documentation":"

The ID of the action performed in the service that actually handled the task invocation. If the task type is RUN_COMMAND, this value is the command ID.

" }, "TaskType":{ "shape":"MaintenanceWindowTaskType", @@ -10347,7 +10347,7 @@ }, "StatusDetails":{ "shape":"MaintenanceWindowExecutionStatusDetails", - "documentation":"

The details explaining the status of the task invocation. Only available for certain Status values.

" + "documentation":"

The details explaining the status of the task invocation. Not available for all status values.

" }, "StartTime":{ "shape":"DateTime", @@ -10359,7 +10359,7 @@ }, "OwnerInformation":{ "shape":"OwnerInformation", - "documentation":"

User-provided value that was specified when the target was registered with the maintenance window. This was also included in any CloudWatch events raised during the task invocation.

" + "documentation":"

User-provided value that was specified when the target was registered with the maintenance window. This was also included in any Amazon CloudWatch Events events raised during the task invocation.

" }, "WindowTargetId":{ "shape":"MaintenanceWindowTaskTargetId", @@ -10388,7 +10388,7 @@ "documentation":"

The filter values.

" } }, - "documentation":"

Filter used in the request. Supported filter keys are Name and Enabled.

" + "documentation":"

Filter used in the request. Supported filter keys depend on the API operation that includes the filter. API operations that use MaintenanceWindowFilter> include the following:

" }, "MaintenanceWindowFilterKey":{ "type":"string", @@ -10441,7 +10441,7 @@ }, "Cutoff":{ "shape":"MaintenanceWindowCutoff", - "documentation":"

The number of hours before the end of the maintenance window that Systems Manager stops scheduling new tasks for execution.

" + "documentation":"

The number of hours before the end of the maintenance window that Amazon Web Services Systems Manager stops scheduling new tasks for execution.

" }, "Schedule":{ "shape":"MaintenanceWindowSchedule", @@ -10453,7 +10453,7 @@ }, "ScheduleOffset":{ "shape":"MaintenanceWindowOffset", - "documentation":"

The number of days to wait to run a maintenance window after the scheduled CRON expression date and time.

", + "documentation":"

The number of days to wait to run a maintenance window after the scheduled cron expression date and time.

", "box":true }, "EndDate":{ @@ -10503,14 +10503,14 @@ }, "Qualifier":{ "shape":"MaintenanceWindowLambdaQualifier", - "documentation":"

(Optional) Specify a Lambda function version or alias name. If you specify a function version, the action uses the qualified function ARN to invoke a specific Lambda function. If you specify an alias name, the action uses the alias ARN to invoke the Lambda function version to which the alias points.

" + "documentation":"

(Optional) Specify an Lambda function version or alias name. If you specify a function version, the operation uses the qualified function Amazon Resource Name (ARN) to invoke a specific Lambda function. If you specify an alias name, the operation uses the alias ARN to invoke the Lambda function version to which the alias points.

" }, "Payload":{ "shape":"MaintenanceWindowLambdaPayload", "documentation":"

JSON to provide to your Lambda function as input.

" } }, - "documentation":"

The parameters for a LAMBDA task type.

For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

For Lambda tasks, Systems Manager ignores any values specified for TaskParameters and LoggingInfo.

" + "documentation":"

The parameters for a LAMBDA task type.

For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask.

LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

For Lambda tasks, Systems Manager ignores any values specified for TaskParameters and LoggingInfo.

" }, "MaintenanceWindowLambdaPayload":{ "type":"blob", @@ -10563,7 +10563,7 @@ }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The SSM document version to use in the request. You can specify $DEFAULT, $LATEST, or a specific version number. If you run commands by using the AWS CLI, then you must escape the first two options by using a backslash. If you specify a version number, then you don't need to use the backslash. For example:

--document-version \"\\$DEFAULT\"

--document-version \"\\$LATEST\"

--document-version \"3\"

" + "documentation":"

The Amazon Web Services Systems Manager document (SSM document) version to use in the request. You can specify $DEFAULT, $LATEST, or a specific version number. If you run commands by using the Amazon Web Services CLI, then you must escape the first two options by using a backslash. If you specify a version number, then you don't need to use the backslash. For example:

--document-version \"\\$DEFAULT\"

--document-version \"\\$LATEST\"

--document-version \"3\"

" }, "NotificationConfig":{ "shape":"NotificationConfig", @@ -10571,7 +10571,7 @@ }, "OutputS3BucketName":{ "shape":"S3BucketName", - "documentation":"

The name of the S3 bucket.

" + "documentation":"

The name of the Amazon Simple Storage Service (Amazon S3) bucket.

" }, "OutputS3KeyPrefix":{ "shape":"S3KeyPrefix", @@ -10579,19 +10579,19 @@ }, "Parameters":{ "shape":"Parameters", - "documentation":"

The parameters for the RUN_COMMAND task execution.

" + "documentation":"

The parameters for the RUN_COMMAND task execution.

" }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The ARN of the IAM service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.

" + "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.

" }, "TimeoutSeconds":{ "shape":"TimeoutSeconds", - "documentation":"

If this time is reached and the command has not already started running, it doesn't run.

", + "documentation":"

If this time is reached and the command hasn't already started running, it doesn't run.

", "box":true } }, - "documentation":"

The parameters for a RUN_COMMAND task type.

For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

For Run Command tasks, Systems Manager uses specified values for TaskParameters and LoggingInfo only if no values are specified for TaskInvocationParameters.

" + "documentation":"

The parameters for a RUN_COMMAND task type.

For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask.

LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

For RUN_COMMAND tasks, Systems Manager uses specified values for TaskParameters and LoggingInfo only if no values are specified for TaskInvocationParameters.

" }, "MaintenanceWindowSchedule":{ "type":"string", @@ -10617,14 +10617,14 @@ "members":{ "Input":{ "shape":"MaintenanceWindowStepFunctionsInput", - "documentation":"

The inputs for the STEP_FUNCTIONS task.

" + "documentation":"

The inputs for the STEP_FUNCTIONS task.

" }, "Name":{ "shape":"MaintenanceWindowStepFunctionsName", - "documentation":"

The name of the STEP_FUNCTIONS task.

" + "documentation":"

The name of the STEP_FUNCTIONS task.

" } }, - "documentation":"

The parameters for a STEP_FUNCTIONS task.

For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

For Step Functions tasks, Systems Manager ignores any values specified for TaskParameters and LoggingInfo.

" + "documentation":"

The parameters for a STEP_FUNCTIONS task.

For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask.

LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

TaskParameters has been deprecated. To specify parameters to pass to a task when it runs, instead use the Parameters option in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

For Step Functions tasks, Systems Manager ignores any values specified for TaskParameters and LoggingInfo.

" }, "MaintenanceWindowStringDateTime":{"type":"string"}, "MaintenanceWindowTarget":{ @@ -10648,7 +10648,7 @@ }, "OwnerInformation":{ "shape":"OwnerInformation", - "documentation":"

A user-provided value that will be included in any CloudWatch events that are raised while running tasks for these targets in this maintenance window.

" + "documentation":"

A user-provided value that will be included in any Amazon CloudWatch Events events that are raised while running tasks for these targets in this maintenance window.

" }, "Name":{ "shape":"MaintenanceWindowName", @@ -10684,15 +10684,15 @@ }, "TaskArn":{ "shape":"MaintenanceWindowTaskArn", - "documentation":"

The resource that the task uses during execution. For RUN_COMMAND and AUTOMATION task types, TaskArn is the Systems Manager document name or ARN. For LAMBDA tasks, it's the function name or ARN. For STEP_FUNCTIONS tasks, it's the state machine ARN.

" + "documentation":"

The resource that the task uses during execution. For RUN_COMMAND and AUTOMATION task types, TaskArn is the Amazon Web Services Systems Manager (SSM document) name or ARN. For LAMBDA tasks, it's the function name or ARN. For STEP_FUNCTIONS tasks, it's the state machine ARN.

" }, "Type":{ "shape":"MaintenanceWindowTaskType", - "documentation":"

The type of task. The type can be one of the following: RUN_COMMAND, AUTOMATION, LAMBDA, or STEP_FUNCTIONS.

" + "documentation":"

The type of task.

" }, "Targets":{ "shape":"Targets", - "documentation":"

The targets (either instances or tags). Instances are specified using Key=instanceids,Values=<instanceid1>,<instanceid2>. Tags are specified using Key=<tag name>,Values=<tag value>.

" + "documentation":"

The targets (either instances or tags). Instances are specified using Key=instanceids,Values=<instanceid1>,<instanceid2>. Tags are specified using Key=<tag name>,Values=<tag value>.

" }, "TaskParameters":{ "shape":"MaintenanceWindowTaskParameters", @@ -10704,11 +10704,11 @@ }, "LoggingInfo":{ "shape":"LoggingInfo", - "documentation":"

Information about an S3 bucket to write task-level logs to.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" + "documentation":"

Information about an S3 bucket to write task-level logs to.

LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The ARN of the IAM service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.

" + "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.

" }, "MaxConcurrency":{ "shape":"MaxConcurrency", @@ -10745,19 +10745,19 @@ "members":{ "RunCommand":{ "shape":"MaintenanceWindowRunCommandParameters", - "documentation":"

The parameters for a RUN_COMMAND task type.

" + "documentation":"

The parameters for a RUN_COMMAND task type.

" }, "Automation":{ "shape":"MaintenanceWindowAutomationParameters", - "documentation":"

The parameters for an AUTOMATION task type.

" + "documentation":"

The parameters for an AUTOMATION task type.

" }, "StepFunctions":{ "shape":"MaintenanceWindowStepFunctionsParameters", - "documentation":"

The parameters for a STEP_FUNCTIONS task type.

" + "documentation":"

The parameters for a STEP_FUNCTIONS task type.

" }, "Lambda":{ "shape":"MaintenanceWindowLambdaParameters", - "documentation":"

The parameters for a LAMBDA task type.

" + "documentation":"

The parameters for a LAMBDA task type.

" } }, "documentation":"

The parameters for task execution.

" @@ -10911,15 +10911,15 @@ }, "AccountIdsToAdd":{ "shape":"AccountIdList", - "documentation":"

The AWS user accounts that should have access to the document. The account IDs can either be a group of account IDs or All.

" + "documentation":"

The Amazon Web Services user accounts that should have access to the document. The account IDs can either be a group of account IDs or All.

" }, "AccountIdsToRemove":{ "shape":"AccountIdList", - "documentation":"

The AWS user accounts that should no longer have access to the document. The AWS user account can either be a group of account IDs or All. This action has a higher priority than AccountIdsToAdd. If you specify an account ID to add and the same ID to remove, the system removes access to the document.

" + "documentation":"

The Amazon Web Services user accounts that should no longer have access to the document. The Amazon Web Services user account can either be a group of account IDs or All. This action has a higher priority than AccountIdsToAdd. If you specify an account ID to add and the same ID to remove, the system removes access to the document.

" }, "SharedDocumentVersion":{ "shape":"SharedDocumentVersion", - "documentation":"

(Optional) The version of the document to share. If it's not specified, the system choose the Default version to share.

" + "documentation":"

(Optional) The version of the document to share. If it isn't specified, the system choose the Default version to share.

" } } }, @@ -10934,14 +10934,14 @@ "members":{ "NonCompliantCount":{ "shape":"ComplianceSummaryCount", - "documentation":"

The total number of compliance items that are not compliant.

" + "documentation":"

The total number of compliance items that aren't compliant.

" }, "SeveritySummary":{ "shape":"SeveritySummary", "documentation":"

A summary of the non-compliance severity by compliance type

" } }, - "documentation":"

A summary of resources that are not compliant. The summary is organized according to resource type.

" + "documentation":"

A summary of resources that aren't compliant. The summary is organized according to resource type.

" }, "NormalStringMap":{ "type":"map", @@ -10958,11 +10958,11 @@ }, "NotificationEvents":{ "shape":"NotificationEventList", - "documentation":"

The different events for which you can receive notifications. These events include the following: All (events), InProgress, Success, TimedOut, Cancelled, Failed. To learn more about these events, see Monitoring Systems Manager status changes using Amazon SNS notifications in the AWS Systems Manager User Guide.

" + "documentation":"

The different events for which you can receive notifications. To learn more about these events, see Monitoring Systems Manager status changes using Amazon SNS notifications in the Amazon Web Services Systems Manager User Guide.

" }, "NotificationType":{ "shape":"NotificationType", - "documentation":"

Command: Receive notification when the status of a command changes. Invocation: For commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes.

" + "documentation":"

The type of notification.

  • Command: Receive notification when the status of a command changes.

  • Invocation: For commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes.

" } }, "documentation":"

Configurations for sending notifications.

" @@ -11009,15 +11009,15 @@ "members":{ "AggregatorType":{ "shape":"OpsAggregatorType", - "documentation":"

Either a Range or Count aggregator for limiting an OpsItem summary.

" + "documentation":"

Either a Range or Count aggregator for limiting an OpsData summary.

" }, "TypeName":{ "shape":"OpsDataTypeName", - "documentation":"

The data type name to use for viewing counts of OpsItems.

" + "documentation":"

The data type name to use for viewing counts of OpsData.

" }, "AttributeName":{ "shape":"OpsDataAttributeName", - "documentation":"

The name of an OpsItem attribute on which to limit the count of OpsItems.

" + "documentation":"

The name of an OpsData attribute on which to limit the count of OpsData.

" }, "Values":{ "shape":"OpsAggregatorValueMap", @@ -11029,10 +11029,10 @@ }, "Aggregators":{ "shape":"OpsAggregatorList", - "documentation":"

A nested aggregator for viewing counts of OpsItems.

" + "documentation":"

A nested aggregator for viewing counts of OpsData.

" } }, - "documentation":"

One or more aggregators for viewing counts of OpsItems using different dimensions such as Source, CreatedTime, or Source and CreatedTime, to name a few.

" + "documentation":"

One or more aggregators for viewing counts of OpsData using different dimensions such as Source, CreatedTime, or Source and CreatedTime, to name a few.

" }, "OpsAggregatorList":{ "type":"list", @@ -11094,14 +11094,14 @@ "members":{ "CaptureTime":{ "shape":"OpsEntityItemCaptureTime", - "documentation":"

The time OpsItem data was captured.

" + "documentation":"

The time the OpsData was captured.

" }, "Content":{ "shape":"OpsEntityItemEntryList", - "documentation":"

The detailed data content for an OpsItem summaries result item.

" + "documentation":"

The details of an OpsData summary.

" } }, - "documentation":"

The OpsItem summaries result item.

" + "documentation":"

The OpsData summary.

" }, "OpsEntityItemCaptureTime":{ "type":"string", @@ -11150,7 +11150,7 @@ "documentation":"

The type of filter.

" } }, - "documentation":"

A filter for viewing OpsItem summaries.

" + "documentation":"

A filter for viewing OpsData summaries.

" }, "OpsFilterKey":{ "type":"string", @@ -11190,7 +11190,7 @@ "members":{ "CreatedBy":{ "shape":"String", - "documentation":"

The ARN of the AWS account that created the OpsItem.

" + "documentation":"

The ARN of the Amazon Web Services account that created the OpsItem.

" }, "OpsItemType":{ "shape":"OpsItemType", @@ -11206,7 +11206,7 @@ }, "LastModifiedBy":{ "shape":"String", - "documentation":"

The ARN of the AWS account that last updated the OpsItem.

" + "documentation":"

The ARN of the Amazon Web Services account that last updated the OpsItem.

" }, "LastModifiedTime":{ "shape":"DateTime", @@ -11214,7 +11214,7 @@ }, "Notifications":{ "shape":"OpsItemNotifications", - "documentation":"

The Amazon Resource Name (ARN) of an SNS topic where notifications are sent when this OpsItem is edited or changed.

" + "documentation":"

The Amazon Resource Name (ARN) of an Amazon Simple Notification Service (Amazon SNS) topic where notifications are sent when this OpsItem is edited or changed.

" }, "Priority":{ "shape":"OpsItemPriority", @@ -11226,7 +11226,7 @@ }, "Status":{ "shape":"OpsItemStatus", - "documentation":"

The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem details in the AWS Systems Manager User Guide.

" + "documentation":"

The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide.

" }, "OpsItemId":{ "shape":"OpsItemId", @@ -11246,7 +11246,7 @@ }, "OperationalData":{ "shape":"OpsItemOperationalData", - "documentation":"

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. Operational data that is not searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view AWS CLI example commands that use these keys, see Creating OpsItems manually in the AWS Systems Manager User Guide.

" + "documentation":"

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API operation) can view and search on the specified data. Operational data that isn't searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API operation).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view Amazon Web Services CLI example commands that use these keys, see Creating OpsItems manually in the Amazon Web Services Systems Manager User Guide.

" }, "Category":{ "shape":"OpsItemCategory", @@ -11273,7 +11273,7 @@ "documentation":"

The time specified in a change request for a runbook workflow to end. Currently supported only for the OpsItem type /aws/changerequest.

" } }, - "documentation":"

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" + "documentation":"

Operations engineers and IT professionals use Amazon Web Services Systems Manager OpsCenter to view, investigate, and remediate operational work items (OpsItems) impacting the performance and health of their Amazon Web Services resources. OpsCenter is integrated with Amazon EventBridge and Amazon CloudWatch. This means you can configure these services to automatically create an OpsItem in OpsCenter when a CloudWatch alarm enters the ALARM state or when EventBridge processes an event from any Amazon Web Services service that publishes events. Configuring Amazon CloudWatch alarms and EventBridge events to automatically create OpsItems allows you to quickly diagnose and remediate issues with Amazon Web Services resources from a single console.

To help you diagnose issues, each OpsItem includes contextually relevant information such as the name and ID of the Amazon Web Services resource that generated the OpsItem, alarm or event details, alarm history, and an alarm timeline graph. For the Amazon Web Services resource, OpsCenter aggregates information from Config, CloudTrail logs, and EventBridge, so you don't have to navigate across multiple console pages during your investigation. For more information, see OpsCenter in the Amazon Web Services Systems Manager User Guide.

" }, "OpsItemAlreadyExistsException":{ "type":"structure", @@ -11466,7 +11466,8 @@ "ChangeRequestByApproverArn", "ChangeRequestByApproverName", "ChangeRequestByTemplate", - "ChangeRequestByTargetsResourceGroup" + "ChangeRequestByTargetsResourceGroup", + "InsightByType" ] }, "OpsItemFilterOperator":{ @@ -11539,7 +11540,7 @@ "members":{ "Arn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of an SNS topic where notifications are sent when this OpsItem is edited or changed.

" + "documentation":"

The Amazon Resource Name (ARN) of an Amazon Simple Notification Service (Amazon SNS) topic where notifications are sent when this OpsItem is edited or changed.

" } }, "documentation":"

A notification about the OpsItem.

" @@ -11582,7 +11583,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The association was not found using the parameters you specified in the call. Verify the information and try again.

", + "documentation":"

The association wasn't found using the parameters you specified in the call. Verify the information and try again.

", "exception":true }, "OpsItemRelatedItemAssociationResourceType":{"type":"string"}, @@ -11709,7 +11710,8 @@ "ChangeCalendarOverrideRejected", "PendingApproval", "Approved", - "Rejected" + "Rejected", + "Closed" ] }, "OpsItemSummaries":{ @@ -11741,7 +11743,7 @@ }, "Source":{ "shape":"OpsItemSource", - "documentation":"

The impacted AWS resource.

" + "documentation":"

The impacted Amazon Web Services resource.

" }, "Status":{ "shape":"OpsItemStatus", @@ -11913,7 +11915,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

The OpsMetadata object does not exist.

", + "documentation":"

The OpsMetadata object doesn't exist.

", "exception":true }, "OpsMetadataResourceId":{ @@ -11936,7 +11938,7 @@ "members":{ "TypeName":{ "shape":"OpsDataTypeName", - "documentation":"

Name of the data type. Valid value: AWS:OpsItem, AWS:EC2InstanceInformation, AWS:OpsItemTrendline, or AWS:ComplianceSummary.

" + "documentation":"

Name of the data type. Valid value: AWS:OpsItem, AWS:EC2InstanceInformation, AWS:OpsItemTrendline, or AWS:ComplianceSummary.

" } }, "documentation":"

The OpsItem data type to return.

" @@ -12009,7 +12011,7 @@ }, "SourceResult":{ "shape":"String", - "documentation":"

Applies to parameters that reference information in other AWS services. SourceResult is the raw result or response from the source.

" + "documentation":"

Applies to parameters that reference information in other Amazon Web Services services. SourceResult is the raw result or response from the source.

" }, "LastModifiedDate":{ "shape":"DateTime", @@ -12024,7 +12026,7 @@ "documentation":"

The data type of the parameter, such as text or aws:ec2:image. The default is text.

" } }, - "documentation":"

An Systems Manager parameter in Parameter Store.

" + "documentation":"

An Amazon Web Services Systems Manager parameter in Parameter Store.

" }, "ParameterAlreadyExists":{ "type":"structure", @@ -12065,7 +12067,7 @@ }, "LastModifiedUser":{ "shape":"String", - "documentation":"

Amazon Resource Name (ARN) of the AWS user who last changed the parameter.

" + "documentation":"

Amazon Resource Name (ARN) of the Amazon Web Services user who last changed the parameter.

" }, "Description":{ "shape":"ParameterDescription", @@ -12093,7 +12095,7 @@ }, "Policies":{ "shape":"ParameterPolicyList", - "documentation":"

Information about the policies assigned to a parameter.

Assigning parameter policies in the AWS Systems Manager User Guide.

" + "documentation":"

Information about the policies assigned to a parameter.

Assigning parameter policies in the Amazon Web Services Systems Manager User Guide.

" }, "DataType":{ "shape":"ParameterDataType", @@ -12115,11 +12117,11 @@ }, "PolicyType":{ "shape":"String", - "documentation":"

The type of policy. Parameter Store supports the following policy types: Expiration, ExpirationNotification, and NoChangeNotification.

" + "documentation":"

The type of policy. Parameter Store, a capablility of Amazon Web Services Systems Manager, supports the following policy types: Expiration, ExpirationNotification, and NoChangeNotification.

" }, "PolicyStatus":{ "shape":"String", - "documentation":"

The status of the policy. Policies report the following statuses: Pending (the policy has not been enforced or applied yet), Finished (the policy was applied), Failed (the policy was not applied), or InProgress (the policy is being applied now).

" + "documentation":"

The status of the policy. Policies report the following statuses: Pending (the policy hasn't been enforced or applied yet), Finished (the policy was applied), Failed (the policy wasn't applied), or InProgress (the policy is being applied now).

" } }, "documentation":"

One or more policies assigned to a parameter.

" @@ -12146,7 +12148,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

You have exceeded the number of parameters for this AWS account. Delete one or more parameters and try again.

", + "documentation":"

You have exceeded the number of parameters for this Amazon Web Services account. Delete one or more parameters and try again.

", "exception":true }, "ParameterList":{ @@ -12158,7 +12160,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

Parameter Store retains the 100 most recently created versions of a parameter. After this number of versions has been created, Parameter Store deletes the oldest version when a new one is created. However, if the oldest version has a label attached to it, Parameter Store will not delete the version and instead presents this error message:

An error occurred (ParameterMaxVersionLimitExceeded) when calling the PutParameter operation: You attempted to create a new version of parameter-name by calling the PutParameter API with the overwrite flag. Version version-number, the oldest version, can't be deleted because it has a label associated with it. Move the label to another version of the parameter, and try again.

This safeguard is to prevent parameter versions with mission critical labels assigned to them from being deleted. To continue creating new parameters, first move the label from the oldest version of the parameter to a newer one for use in your operations. For information about moving parameter labels, see Move a parameter label (console) or Move a parameter label (CLI) in the AWS Systems Manager User Guide.

", + "documentation":"

Parameter Store retains the 100 most recently created versions of a parameter. After this number of versions has been created, Parameter Store deletes the oldest version when a new one is created. However, if the oldest version has a label attached to it, Parameter Store won't delete the version and instead presents this error message:

An error occurred (ParameterMaxVersionLimitExceeded) when calling the PutParameter operation: You attempted to create a new version of parameter-name by calling the PutParameter API with the overwrite flag. Version version-number, the oldest version, can't be deleted because it has a label associated with it. Move the label to another version of the parameter, and try again.

This safeguard is to prevent parameter versions with mission critical labels assigned to them from being deleted. To continue creating new parameters, first move the label from the oldest version of the parameter to a newer one for use in your operations. For information about moving parameter labels, see Move a parameter label (console) or Move a parameter label (CLI) in the Amazon Web Services Systems Manager User Guide.

", "exception":true }, "ParameterMetadata":{ @@ -12182,7 +12184,7 @@ }, "LastModifiedUser":{ "shape":"String", - "documentation":"

Amazon Resource Name (ARN) of the AWS user who last changed the parameter.

" + "documentation":"

Amazon Resource Name (ARN) of the Amazon Web Services user who last changed the parameter.

" }, "Description":{ "shape":"ParameterDescription", @@ -12227,7 +12229,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

The parameter could not be found. Verify the name and try again.

", + "documentation":"

The parameter couldn't be found. Verify the name and try again.

", "exception":true }, "ParameterPatternMismatchException":{ @@ -12235,10 +12237,10 @@ "members":{ "message":{ "shape":"String", - "documentation":"

The parameter name is not valid.

" + "documentation":"

The parameter name isn't valid.

" } }, - "documentation":"

The parameter name is not valid.

", + "documentation":"

The parameter name isn't valid.

", "exception":true }, "ParameterPolicies":{ @@ -12256,7 +12258,7 @@ "members":{ "Key":{ "shape":"ParameterStringFilterKey", - "documentation":"

The name of the filter.

The ParameterStringFilter object is used by the DescribeParameters and GetParametersByPath API actions. However, not all of the pattern values listed for Key can be used with both actions.

For DescribeActions, all of the listed patterns are valid, with the exception of Label.

For GetParametersByPath, the following patterns listed for Key are not valid: tag, Name, Path, and Tier.

For examples of CLI commands demonstrating valid parameter filter constructions, see Searching for Systems Manager parameters in the AWS Systems Manager User Guide.

" + "documentation":"

The name of the filter.

The ParameterStringFilter object is used by the DescribeParameters and GetParametersByPath API operations. However, not all of the pattern values listed for Key can be used with both operations.

For DescribeActions, all of the listed patterns are valid, with the exception of Label.

For GetParametersByPath, the following patterns listed for Key aren't valid: tag, Name, Path, and Tier.

For examples of Amazon Web Services CLI commands demonstrating valid parameter filter constructions, see Searching for Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.

" }, "Option":{ "shape":"ParameterStringQueryOption", @@ -12329,7 +12331,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

The specified parameter version was not found. Verify the parameter name and version, and try again.

", + "documentation":"

The specified parameter version wasn't found. Verify the parameter name and version, and try again.

", "exception":true }, "Parameters":{ @@ -12383,7 +12385,7 @@ "members":{ "Id":{ "shape":"PatchId", - "documentation":"

The ID of the patch. Applies to Windows patches only.

This ID is not the same as the Microsoft Knowledge Base ID.

" + "documentation":"

The ID of the patch. Applies to Windows patches only.

This ID isn't the same as the Microsoft Knowledge Base ID.

" }, "ReleaseDate":{ "shape":"DateTime", @@ -12502,7 +12504,7 @@ }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

Defines the operating system the patch baseline applies to. The Default value is WINDOWS.

" + "documentation":"

Defines the operating system the patch baseline applies to. The default value is WINDOWS.

" }, "BaselineDescription":{ "shape":"BaselineDescription", @@ -12510,7 +12512,7 @@ }, "DefaultBaseline":{ "shape":"DefaultBaseline", - "documentation":"

Whether this is the default baseline. Note that Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.

" + "documentation":"

Whether this is the default baseline. Amazon Web Services Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.

" } }, "documentation":"

Defines the basic information about a patch baseline.

" @@ -12557,19 +12559,19 @@ }, "Classification":{ "shape":"PatchClassification", - "documentation":"

The classification of the patch (for example, SecurityUpdates, Updates, CriticalUpdates).

" + "documentation":"

The classification of the patch, such as SecurityUpdates, Updates, and CriticalUpdates.

" }, "Severity":{ "shape":"PatchSeverity", - "documentation":"

The severity of the patch (for example, Critical, Important, Moderate).

" + "documentation":"

The severity of the patchsuch as Critical, Important, and Moderate.

" }, "State":{ "shape":"PatchComplianceDataState", - "documentation":"

The state of the patch on the instance, such as INSTALLED or FAILED.

For descriptions of each patch state, see About patch compliance in the AWS Systems Manager User Guide.

" + "documentation":"

The state of the patch on the instance, such as INSTALLED or FAILED.

For descriptions of each patch state, see About patch compliance in the Amazon Web Services Systems Manager User Guide.

" }, "InstalledTime":{ "shape":"DateTime", - "documentation":"

The date/time the patch was installed on the instance. Note that not all operating systems provide this level of information.

" + "documentation":"

The date/time the patch was installed on the instance. Not all operating systems provide this level of information.

" }, "CVEIds":{ "shape":"PatchCVEIds", @@ -12640,7 +12642,7 @@ "documentation":"

The value for the filter key.

Run the DescribePatchProperties command to view lists of valid values for each key based on operating system type.

" } }, - "documentation":"

Defines which patches should be included in a patch baseline.

A patch filter consists of a key and a set of values. The filter key is a patch property. For example, the available filter keys for WINDOWS are PATCH_SET, PRODUCT, PRODUCT_FAMILY, CLASSIFICATION, and MSRC_SEVERITY. The filter values define a matching criterion for the patch property indicated by the key. For example, if the filter key is PRODUCT and the filter values are [\"Office 2013\", \"Office 2016\"], then the filter accepts all patches where product name is either \"Office 2013\" or \"Office 2016\". The filter values can be exact values for the patch property given as a key, or a wildcard (*), which matches all values.

You can view lists of valid values for the patch properties by running the DescribePatchProperties command. For information about which patch properties can be used with each major operating system, see DescribePatchProperties.

" + "documentation":"

Defines which patches should be included in a patch baseline.

A patch filter consists of a key and a set of values. The filter key is a patch property. For example, the available filter keys for WINDOWS are PATCH_SET, PRODUCT, PRODUCT_FAMILY, CLASSIFICATION, and MSRC_SEVERITY.

The filter values define a matching criterion for the patch property indicated by the key. For example, if the filter key is PRODUCT and the filter values are [\"Office 2013\", \"Office 2016\"], then the filter accepts all patches where product name is either \"Office 2013\" or \"Office 2016\". The filter values can be exact values for the patch property given as a key, or a wildcard (*), which matches all values.

You can view lists of valid values for the patch properties by running the DescribePatchProperties command. For information about which patch properties can be used with each major operating system, see DescribePatchProperties.

" }, "PatchFilterGroup":{ "type":"structure", @@ -12767,7 +12769,7 @@ "documentation":"

The value for the filter.

" } }, - "documentation":"

Defines a filter used in Patch Manager APIs.

" + "documentation":"

Defines a filter used in Patch Manager APIs. Supported filter keys depend on the API operation that includes the filter. Patch Manager API operations that use PatchOrchestratorFilter include the following:

" }, "PatchOrchestratorFilterKey":{ "type":"string", @@ -12833,12 +12835,12 @@ }, "ApproveUntilDate":{ "shape":"PatchStringDateTime", - "documentation":"

The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Debian Server or Ubuntu Server.

Enter dates in the format YYYY-MM-DD. For example, 2020-12-31.

", + "documentation":"

The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Debian Server or Ubuntu Server.

Enter dates in the format YYYY-MM-DD. For example, 2021-12-31.

", "box":true }, "EnableNonSecurity":{ "shape":"Boolean", - "documentation":"

For instances identified by the approval rule filters, enables a patch baseline to apply non-security updates available in the specified repository. The default value is 'false'. Applies to Linux instances only.

", + "documentation":"

For instances identified by the approval rule filters, enables a patch baseline to apply non-security updates available in the specified repository. The default value is false. Applies to Linux instances only.

", "box":true } }, @@ -12925,7 +12927,7 @@ "members":{ "DeploymentStatus":{ "shape":"PatchDeploymentStatus", - "documentation":"

The approval status of a patch (APPROVED, PENDING_APPROVAL, EXPLICIT_APPROVED, EXPLICIT_REJECTED).

" + "documentation":"

The approval status of a patch.

" }, "ComplianceLevel":{ "shape":"PatchComplianceLevel", @@ -12933,7 +12935,7 @@ }, "ApprovalDate":{ "shape":"DateTime", - "documentation":"

The date the patch was approved (or will be approved if the status is PENDING_APPROVAL).

" + "documentation":"

The date the patch was approved (or will be approved if the status is PENDING_APPROVAL).

" } }, "documentation":"

Information about the approval status of a patch.

" @@ -12980,26 +12982,26 @@ "members":{ "TotalSteps":{ "shape":"Integer", - "documentation":"

The total number of steps run in all specified AWS Regions and accounts for the current Automation execution.

" + "documentation":"

The total number of steps run in all specified Amazon Web Services Regions and Amazon Web Services accounts for the current Automation execution.

" }, "SuccessSteps":{ "shape":"Integer", - "documentation":"

The total number of steps that successfully completed in all specified AWS Regions and accounts for the current Automation execution.

" + "documentation":"

The total number of steps that successfully completed in all specified Amazon Web Services Regions and Amazon Web Services accounts for the current Automation execution.

" }, "FailedSteps":{ "shape":"Integer", - "documentation":"

The total number of steps that failed to run in all specified AWS Regions and accounts for the current Automation execution.

" + "documentation":"

The total number of steps that failed to run in all specified Amazon Web Services Regions and Amazon Web Services accounts for the current Automation execution.

" }, "CancelledSteps":{ "shape":"Integer", - "documentation":"

The total number of steps that the system cancelled in all specified AWS Regions and accounts for the current Automation execution.

" + "documentation":"

The total number of steps that the system cancelled in all specified Amazon Web Services Regions and Amazon Web Services accounts for the current Automation execution.

" }, "TimedOutSteps":{ "shape":"Integer", - "documentation":"

The total number of steps that timed out in all specified AWS Regions and accounts for the current Automation execution.

" + "documentation":"

The total number of steps that timed out in all specified Amazon Web Services Regions and Amazon Web Services accounts for the current Automation execution.

" } }, - "documentation":"

An aggregate of step execution statuses displayed in the AWS Console for a multi-Region and multi-account Automation execution.

" + "documentation":"

An aggregate of step execution statuses displayed in the Amazon Web Services Systems Manager console for a multi-Region and multi-account Automation execution.

" }, "PutComplianceItemsRequest":{ "type":"structure", @@ -13083,11 +13085,11 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

Naming Constraints:

  • Parameter names are case sensitive.

  • A parameter name must be unique within an AWS Region

  • A parameter name can't be prefixed with \"aws\" or \"ssm\" (case-insensitive).

  • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

    In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

  • A parameter name can't include spaces.

  • Parameter hierarchies are limited to a maximum depth of fifteen levels.

For additional information about valid values for parameter names, see Creating Systems Manager parameters in the AWS Systems Manager User Guide.

The maximum length constraint listed below includes capacity for additional system attributes that are not part of the name. The maximum length for a parameter name, including the full length of the parameter ARN, is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters:

arn:aws:ssm:us-east-2:111122223333:parameter/ExampleParameterName

" + "documentation":"

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

Naming Constraints:

  • Parameter names are case sensitive.

  • A parameter name must be unique within an Amazon Web Services Region

  • A parameter name can't be prefixed with \"aws\" or \"ssm\" (case-insensitive).

  • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

    In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

  • A parameter name can't include spaces.

  • Parameter hierarchies are limited to a maximum depth of fifteen levels.

For additional information about valid values for parameter names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.

The maximum length constraint listed below includes capacity for additional system attributes that aren't part of the name. The maximum length for a parameter name, including the full length of the parameter ARN, is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters:

arn:aws:ssm:us-east-2:111122223333:parameter/ExampleParameterName

" }, "Description":{ "shape":"ParameterDescription", - "documentation":"

Information about the parameter that you want to add to the system. Optional but recommended.

Do not enter personally identifiable information in this field.

" + "documentation":"

Information about the parameter that you want to add to the system. Optional but recommended.

Don't enter personally identifiable information in this field.

" }, "Value":{ "shape":"PSParameterValue", @@ -13095,15 +13097,15 @@ }, "Type":{ "shape":"ParameterType", - "documentation":"

The type of parameter that you want to add to the system.

SecureString is not currently supported for AWS CloudFormation templates.

Items in a StringList must be separated by a comma (,). You can't use other punctuation or special character to escape items in the list. If you have a parameter value that requires a comma, then use the String data type.

Specifying a parameter type is not required when updating a parameter. You must specify a parameter type when creating a parameter.

" + "documentation":"

The type of parameter that you want to add to the system.

SecureString isn't currently supported for CloudFormation templates.

Items in a StringList must be separated by a comma (,). You can't use other punctuation or special character to escape items in the list. If you have a parameter value that requires a comma, then use the String data type.

Specifying a parameter type isn't required when updating a parameter. You must specify a parameter type when creating a parameter.

" }, "KeyId":{ "shape":"ParameterKeyId", - "documentation":"

The KMS Key ID that you want to use to encrypt a parameter. Either the default AWS Key Management Service (AWS KMS) key automatically assigned to your AWS account or a custom key. Required for parameters that use the SecureString data type.

If you don't specify a key ID, the system uses the default key associated with your AWS account.

  • To use your default AWS KMS key, choose the SecureString data type, and do not specify the Key ID when you create the parameter. The system automatically populates Key ID with your default KMS key.

  • To use a custom KMS key, choose the SecureString data type with the Key ID parameter.

" + "documentation":"

The Key Management Service (KMS) ID that you want to use to encrypt a parameter. Either the default KMS key automatically assigned to your Amazon Web Services account or a custom key. Required for parameters that use the SecureString data type.

If you don't specify a key ID, the system uses the default key associated with your Amazon Web Services account.

  • To use your default KMS key, choose the SecureString data type, and do not specify the Key ID when you create the parameter. The system automatically populates Key ID with your default KMS key.

  • To use a custom KMS key, choose the SecureString data type with the Key ID parameter.

" }, "Overwrite":{ "shape":"Boolean", - "documentation":"

Overwrite an existing parameter. The default value is 'false'.

", + "documentation":"

Overwrite an existing parameter. The default value is false.

", "box":true }, "AllowedPattern":{ @@ -13112,19 +13114,19 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a Systems Manager parameter to identify the type of resource to which it applies, the environment, or the type of configuration data referenced by the parameter. In this case, you could specify the following key name/value pairs:

  • Key=Resource,Value=S3bucket

  • Key=OS,Value=Windows

  • Key=ParameterType,Value=LicenseKey

To add tags to an existing Systems Manager parameter, use the AddTagsToResource action.

" + "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a Systems Manager parameter to identify the type of resource to which it applies, the environment, or the type of configuration data referenced by the parameter. In this case, you could specify the following key-value pairs:

  • Key=Resource,Value=S3bucket

  • Key=OS,Value=Windows

  • Key=ParameterType,Value=LicenseKey

To add tags to an existing Systems Manager parameter, use the AddTagsToResource operation.

" }, "Tier":{ "shape":"ParameterTier", - "documentation":"

The parameter tier to assign to a parameter.

Parameter Store offers a standard tier and an advanced tier for parameters. Standard parameters have a content size limit of 4 KB and can't be configured to use parameter policies. You can create a maximum of 10,000 standard parameters for each Region in an AWS account. Standard parameters are offered at no additional cost.

Advanced parameters have a content size limit of 8 KB and can be configured to use parameter policies. You can create a maximum of 100,000 advanced parameters for each Region in an AWS account. Advanced parameters incur a charge. For more information, see Standard and advanced parameter tiers in the AWS Systems Manager User Guide.

You can change a standard parameter to an advanced parameter any time. But you can't revert an advanced parameter to a standard parameter. Reverting an advanced parameter to a standard parameter would result in data loss because the system would truncate the size of the parameter from 8 KB to 4 KB. Reverting would also remove any policies attached to the parameter. Lastly, advanced parameters use a different form of encryption than standard parameters.

If you no longer need an advanced parameter, or if you no longer want to incur charges for an advanced parameter, you must delete it and recreate it as a new standard parameter.

Using the Default Tier Configuration

In PutParameter requests, you can specify the tier to create the parameter in. Whenever you specify a tier in the request, Parameter Store creates or updates the parameter according to that request. However, if you do not specify a tier in a request, Parameter Store assigns the tier based on the current Parameter Store default tier configuration.

The default tier when you begin using Parameter Store is the standard-parameter tier. If you use the advanced-parameter tier, you can specify one of the following as the default:

  • Advanced: With this option, Parameter Store evaluates all requests as advanced parameters.

  • Intelligent-Tiering: With this option, Parameter Store evaluates each request to determine if the parameter is standard or advanced.

    If the request doesn't include any options that require an advanced parameter, the parameter is created in the standard-parameter tier. If one or more options requiring an advanced parameter are included in the request, Parameter Store create a parameter in the advanced-parameter tier.

    This approach helps control your parameter-related costs by always creating standard parameters unless an advanced parameter is necessary.

Options that require an advanced parameter include the following:

  • The content size of the parameter is more than 4 KB.

  • The parameter uses a parameter policy.

  • More than 10,000 parameters already exist in your AWS account in the current Region.

For more information about configuring the default tier option, see Specifying a default parameter tier in the AWS Systems Manager User Guide.

" + "documentation":"

The parameter tier to assign to a parameter.

Parameter Store offers a standard tier and an advanced tier for parameters. Standard parameters have a content size limit of 4 KB and can't be configured to use parameter policies. You can create a maximum of 10,000 standard parameters for each Region in an Amazon Web Services account. Standard parameters are offered at no additional cost.

Advanced parameters have a content size limit of 8 KB and can be configured to use parameter policies. You can create a maximum of 100,000 advanced parameters for each Region in an Amazon Web Services account. Advanced parameters incur a charge. For more information, see Standard and advanced parameter tiers in the Amazon Web Services Systems Manager User Guide.

You can change a standard parameter to an advanced parameter any time. But you can't revert an advanced parameter to a standard parameter. Reverting an advanced parameter to a standard parameter would result in data loss because the system would truncate the size of the parameter from 8 KB to 4 KB. Reverting would also remove any policies attached to the parameter. Lastly, advanced parameters use a different form of encryption than standard parameters.

If you no longer need an advanced parameter, or if you no longer want to incur charges for an advanced parameter, you must delete it and recreate it as a new standard parameter.

Using the Default Tier Configuration

In PutParameter requests, you can specify the tier to create the parameter in. Whenever you specify a tier in the request, Parameter Store creates or updates the parameter according to that request. However, if you don't specify a tier in a request, Parameter Store assigns the tier based on the current Parameter Store default tier configuration.

The default tier when you begin using Parameter Store is the standard-parameter tier. If you use the advanced-parameter tier, you can specify one of the following as the default:

  • Advanced: With this option, Parameter Store evaluates all requests as advanced parameters.

  • Intelligent-Tiering: With this option, Parameter Store evaluates each request to determine if the parameter is standard or advanced.

    If the request doesn't include any options that require an advanced parameter, the parameter is created in the standard-parameter tier. If one or more options requiring an advanced parameter are included in the request, Parameter Store create a parameter in the advanced-parameter tier.

    This approach helps control your parameter-related costs by always creating standard parameters unless an advanced parameter is necessary.

Options that require an advanced parameter include the following:

  • The content size of the parameter is more than 4 KB.

  • The parameter uses a parameter policy.

  • More than 10,000 parameters already exist in your Amazon Web Services account in the current Amazon Web Services Region.

For more information about configuring the default tier option, see Specifying a default parameter tier in the Amazon Web Services Systems Manager User Guide.

" }, "Policies":{ "shape":"ParameterPolicies", - "documentation":"

One or more policies to apply to a parameter. This action takes a JSON array. Parameter Store supports the following policy types:

Expiration: This policy deletes the parameter after it expires. When you create the policy, you specify the expiration date. You can update the expiration date and time by updating the policy. Updating the parameter does not affect the expiration date and time. When the expiration time is reached, Parameter Store deletes the parameter.

ExpirationNotification: This policy triggers an event in Amazon CloudWatch Events that notifies you about the expiration. By using this policy, you can receive notification before or after the expiration time is reached, in units of days or hours.

NoChangeNotification: This policy triggers a CloudWatch event if a parameter has not been modified for a specified period of time. This policy type is useful when, for example, a secret needs to be changed within a period of time, but it has not been changed.

All existing policies are preserved until you send new policies or an empty policy. For more information about parameter policies, see Assigning parameter policies.

" + "documentation":"

One or more policies to apply to a parameter. This operation takes a JSON array. Parameter Store, a capability of Amazon Web Services Systems Manager supports the following policy types:

Expiration: This policy deletes the parameter after it expires. When you create the policy, you specify the expiration date. You can update the expiration date and time by updating the policy. Updating the parameter doesn't affect the expiration date and time. When the expiration time is reached, Parameter Store deletes the parameter.

ExpirationNotification: This policy triggers an event in Amazon CloudWatch Events that notifies you about the expiration. By using this policy, you can receive notification before or after the expiration time is reached, in units of days or hours.

NoChangeNotification: This policy triggers a CloudWatch Events event if a parameter hasn't been modified for a specified period of time. This policy type is useful when, for example, a secret needs to be changed within a period of time, but it hasn't been changed.

All existing policies are preserved until you send new policies or an empty policy. For more information about parameter policies, see Assigning parameter policies.

" }, "DataType":{ "shape":"ParameterDataType", - "documentation":"

The data type for a String parameter. Supported data types include plain text and Amazon Machine Image IDs.

The following data type values are supported.

  • text

  • aws:ec2:image

When you create a String parameter and specify aws:ec2:image, Systems Manager validates the parameter value is in the required format, such as ami-12345abcdeEXAMPLE, and that the specified AMI is available in your AWS account. For more information, see Native parameter support for Amazon Machine Image IDs in the AWS Systems Manager User Guide.

" + "documentation":"

The data type for a String parameter. Supported data types include plain text and Amazon Machine Image (AMI) IDs.

The following data type values are supported.

  • text

  • aws:ec2:image

When you create a String parameter and specify aws:ec2:image, Amazon Web Services Systems Manager validates the parameter value is in the required format, such as ami-12345abcdeEXAMPLE, and that the specified AMI is available in your Amazon Web Services account. For more information, see Native parameter support for Amazon Machine Image (AMI) IDs in the Amazon Web Services Systems Manager User Guide.

" } } }, @@ -13133,7 +13135,7 @@ "members":{ "Version":{ "shape":"PSParameterVersion", - "documentation":"

The new version number of a parameter. If you edit a parameter value, Parameter Store automatically creates a new version and assigns this new version a unique ID. You can reference a parameter version ID in API actions or in Systems Manager documents (SSM documents). By default, if you don't specify a specific version, the system returns the latest parameter value when a parameter is called.

" + "documentation":"

The new version number of a parameter. If you edit a parameter value, Parameter Store automatically creates a new version and assigns this new version a unique ID. You can reference a parameter version ID in API operations or in Systems Manager documents (SSM documents). By default, if you don't specify a specific version, the system returns the latest parameter value when a parameter is called.

" }, "Tier":{ "shape":"ParameterTier", @@ -13183,11 +13185,11 @@ "members":{ "BaselineId":{ "shape":"BaselineId", - "documentation":"

The ID of the patch baseline to register the patch group with.

" + "documentation":"

The ID of the patch baseline to register with the patch group.

" }, "PatchGroup":{ "shape":"PatchGroup", - "documentation":"

The name of the patch group that should be registered with the patch baseline.

" + "documentation":"

The name of the patch group to be registered with the patch baseline.

" } } }, @@ -13222,11 +13224,11 @@ }, "Targets":{ "shape":"Targets", - "documentation":"

The targets to register with the maintenance window. In other words, the instances to run commands on when the maintenance window runs.

If a single maintenance window task is registered with multiple targets, its task invocations occur sequentially and not in parallel. If your task must run on multiple targets at the same time, register a task for each target individually and assign each task the same priority level.

You can specify targets using instance IDs, resource group names, or tags that have been applied to instances.

Example 1: Specify instance IDs

Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

Example 2: Use tag key-pairs applied to instances

Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

Example 3: Use tag-keys applied to instances

Key=tag-key,Values=my-tag-key-1,my-tag-key-2

Example 4: Use resource group names

Key=resource-groups:Name,Values=resource-group-name

Example 5: Use filters for resource group types

Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

For Key=resource-groups:ResourceTypeFilters, specify resource types in the following format

Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

For more information about these examples formats, including the best use case for each one, see Examples: Register targets with a maintenance window in the AWS Systems Manager User Guide.

" + "documentation":"

The targets to register with the maintenance window. In other words, the instances to run commands on when the maintenance window runs.

If a single maintenance window task is registered with multiple targets, its task invocations occur sequentially and not in parallel. If your task must run on multiple targets at the same time, register a task for each target individually and assign each task the same priority level.

You can specify targets using instance IDs, resource group names, or tags that have been applied to instances.

Example 1: Specify instance IDs

Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

Example 2: Use tag key-pairs applied to instances

Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

Example 3: Use tag-keys applied to instances

Key=tag-key,Values=my-tag-key-1,my-tag-key-2

Example 4: Use resource group names

Key=resource-groups:Name,Values=resource-group-name

Example 5: Use filters for resource group types

Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

For Key=resource-groups:ResourceTypeFilters, specify resource types in the following format

Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

For more information about these examples formats, including the best use case for each one, see Examples: Register targets with a maintenance window in the Amazon Web Services Systems Manager User Guide.

" }, "OwnerInformation":{ "shape":"OwnerInformation", - "documentation":"

User-provided value that will be included in any CloudWatch events raised while running tasks for these targets in this maintenance window.

" + "documentation":"

User-provided value that will be included in any Amazon CloudWatch Events events raised while running tasks for these targets in this maintenance window.

" }, "Name":{ "shape":"MaintenanceWindowName", @@ -13266,7 +13268,7 @@ }, "Targets":{ "shape":"Targets", - "documentation":"

The targets (either instances or maintenance window targets).

One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, AWS Lambda, and AWS Step Functions). For more information about running tasks that do not specify targets, see Registering maintenance window tasks without targets in the AWS Systems Manager User Guide.

Specify instances using the following format:

Key=InstanceIds,Values=<instance-id-1>,<instance-id-2>

Specify maintenance window targets using the following format:

Key=WindowTargetIds,Values=<window-target-id-1>,<window-target-id-2>

", + "documentation":"

The targets (either instances or maintenance window targets).

One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, Lambda, and Step Functions). For more information about running tasks that don't specify targets, see Registering maintenance window tasks without targets in the Amazon Web Services Systems Manager User Guide.

Specify instances using the following format:

Key=InstanceIds,Values=<instance-id-1>,<instance-id-2>

Specify maintenance window targets using the following format:

Key=WindowTargetIds,Values=<window-target-id-1>,<window-target-id-2>

", "box":true }, "TaskArn":{ @@ -13275,7 +13277,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The ARN of the IAM service role for Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.

For more information, see the following topics in the in the AWS Systems Manager User Guide:

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.

For more information, see the following topics in the in the Amazon Web Services Systems Manager User Guide:

" }, "TaskType":{ "shape":"MaintenanceWindowTaskType", @@ -13296,17 +13298,17 @@ }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The maximum number of targets this task can be run for in parallel.

For maintenance window tasks without a target specified, you cannot supply a value for this option. Instead, the system inserts a placeholder value of 1. This value does not affect the running of your task.

", + "documentation":"

The maximum number of targets this task can be run for in parallel.

For maintenance window tasks without a target specified, you can't supply a value for this option. Instead, the system inserts a placeholder value of 1. This value doesn't affect the running of your task.

", "box":true }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The maximum number of errors allowed before this task stops being scheduled.

For maintenance window tasks without a target specified, you cannot supply a value for this option. Instead, the system inserts a placeholder value of 1. This value does not affect the running of your task.

", + "documentation":"

The maximum number of errors allowed before this task stops being scheduled.

For maintenance window tasks without a target specified, you can't supply a value for this option. Instead, the system inserts a placeholder value of 1. This value doesn't affect the running of your task.

", "box":true }, "LoggingInfo":{ "shape":"LoggingInfo", - "documentation":"

A structure containing information about an S3 bucket to write instance-level logs to.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" + "documentation":"

A structure containing information about an Amazon Simple Storage Service (Amazon S3) bucket to write instance-level logs to.

LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" }, "Name":{ "shape":"MaintenanceWindowName", @@ -13368,11 +13370,11 @@ "members":{ "ResourceType":{ "shape":"ResourceTypeForTagging", - "documentation":"

The type of resource from which you want to remove a tag.

The ManagedInstance type for this API action is only for on-premises managed instances. Specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

" + "documentation":"

The type of resource from which you want to remove a tag.

The ManagedInstance type for this API operation is only for on-premises managed instances. Specify the name of the managed instance in the following format: mi-ID_number . For example, mi-1a2b3c4d5e6f.

" }, "ResourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the resource from which you want to remove tags. For example:

ManagedInstance: mi-012345abcde

MaintenanceWindow: mw-012345abcde

PatchBaseline: pb-012345abcde

OpsMetadata object: ResourceID for tagging is created from the Amazon Resource Name (ARN) for the object. Specifically, ResourceID is created from the strings that come after the word opsmetadata in the ARN. For example, an OpsMetadata object with an ARN of arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager has a ResourceID of either aws/ssm/MyGroup/appmanager or /aws/ssm/MyGroup/appmanager.

For the Document and Parameter values, use the name of the resource.

The ManagedInstance type for this API action is only for on-premises managed instances. Specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

" + "documentation":"

The ID of the resource from which you want to remove tags. For example:

ManagedInstance: mi-012345abcde

MaintenanceWindow: mw-012345abcde

PatchBaseline: pb-012345abcde

OpsMetadata object: ResourceID for tagging is created from the Amazon Resource Name (ARN) for the object. Specifically, ResourceID is created from the strings that come after the word opsmetadata in the ARN. For example, an OpsMetadata object with an ARN of arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager has a ResourceID of either aws/ssm/MyGroup/appmanager or /aws/ssm/MyGroup/appmanager.

For the Document and Parameter values, use the name of the resource.

The ManagedInstance type for this API operation is only for on-premises managed instances. Specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

" }, "TagKeys":{ "shape":"KeyList", @@ -13394,17 +13396,17 @@ "documentation":"

The Amazon Resource Name (ARN) of the service setting to reset. The setting ID can be one of the following.

  • /ssm/automation/customer-script-log-destination

  • /ssm/automation/customer-script-log-group-name

  • /ssm/documents/console/public-sharing-permission

  • /ssm/parameter-store/default-parameter-tier

  • /ssm/parameter-store/high-throughput-enabled

  • /ssm/managed-instance/activation-tier

" } }, - "documentation":"

The request body of the ResetServiceSetting API action.

" + "documentation":"

The request body of the ResetServiceSetting API operation.

" }, "ResetServiceSettingResult":{ "type":"structure", "members":{ "ServiceSetting":{ "shape":"ServiceSetting", - "documentation":"

The current, effective service setting after calling the ResetServiceSetting API action.

" + "documentation":"

The current, effective service setting after calling the ResetServiceSetting API operation.

" } }, - "documentation":"

The result body of the ResetServiceSetting API action.

" + "documentation":"

The result body of the ResetServiceSetting API operation.

" }, "ResolvedTargets":{ "type":"structure", @@ -13484,14 +13486,14 @@ "members":{ "OrganizationSourceType":{ "shape":"ResourceDataSyncOrganizationSourceType", - "documentation":"

If an AWS Organization is present, this is either OrganizationalUnits or EntireOrganization. For OrganizationalUnits, the data is aggregated from a set of organization units. For EntireOrganization, the data is aggregated from the entire AWS Organization.

" + "documentation":"

If an Amazon Web Services organization is present, this is either OrganizationalUnits or EntireOrganization. For OrganizationalUnits, the data is aggregated from a set of organization units. For EntireOrganization, the data is aggregated from the entire Amazon Web Services organization.

" }, "OrganizationalUnits":{ "shape":"ResourceDataSyncOrganizationalUnitList", - "documentation":"

The AWS Organizations organization units included in the sync.

" + "documentation":"

The Organizations organization units included in the sync.

" } }, - "documentation":"

Information about the AwsOrganizationsSource resource data sync source. A sync source of this type can synchronize data from AWS Organizations or, if an AWS Organization is not present, from multiple AWS Regions.

" + "documentation":"

Information about the AwsOrganizationsSource resource data sync source. A sync source of this type can synchronize data from Organizations or, if an Amazon Web Services organization isn't present, from multiple Amazon Web Services Regions.

" }, "ResourceDataSyncConflictException":{ "type":"structure", @@ -13518,7 +13520,7 @@ "documentation":"

The sharing data type. Only Organization is supported.

" } }, - "documentation":"

Synchronize Systems Manager Inventory data from multiple AWS accounts defined in AWS Organizations to a centralized S3 bucket. Data is synchronized to individual key prefixes in the central bucket. Each key prefix represents a different AWS account ID.

" + "documentation":"

Synchronize Amazon Web Services Systems Manager Inventory data from multiple Amazon Web Services accounts defined in Organizations to a centralized Amazon S3 bucket. Data is synchronized to individual key prefixes in the central bucket. Each key prefix represents a different Amazon Web Services account ID.

" }, "ResourceDataSyncDestinationDataSharingType":{ "type":"string", @@ -13540,11 +13542,11 @@ "members":{ "SyncName":{ "shape":"ResourceDataSyncName", - "documentation":"

The name of the Resource Data Sync.

" + "documentation":"

The name of the resource data sync.

" }, "SyncType":{ "shape":"ResourceDataSyncType", - "documentation":"

The type of resource data sync. If SyncType is SyncToDestination, then the resource data sync synchronizes data to an S3 bucket. If the SyncType is SyncFromSource then the resource data sync synchronizes data from AWS Organizations or from multiple AWS Regions.

" + "documentation":"

The type of resource data sync. If SyncType is SyncToDestination, then the resource data sync synchronizes data to an S3 bucket. If the SyncType is SyncFromSource then the resource data sync synchronizes data from Organizations or from multiple Amazon Web Services Regions.

" }, "SyncSource":{ "shape":"ResourceDataSyncSourceWithState", @@ -13579,7 +13581,7 @@ "documentation":"

The status message details reported by the last sync.

" } }, - "documentation":"

Information about a Resource Data Sync configuration, including its current status and last successful sync.

" + "documentation":"

Information about a resource data sync configuration, including its current status and last successful sync.

" }, "ResourceDataSyncItemList":{ "type":"list", @@ -13598,7 +13600,7 @@ "SyncType":{"shape":"ResourceDataSyncType"}, "Message":{"shape":"String"} }, - "documentation":"

The specified sync name was not found.

", + "documentation":"

The specified sync name wasn't found.

", "exception":true }, "ResourceDataSyncOrganizationSourceType":{ @@ -13611,10 +13613,10 @@ "members":{ "OrganizationalUnitId":{ "shape":"ResourceDataSyncOrganizationalUnitId", - "documentation":"

The AWS Organization unit ID data source for the sync.

" + "documentation":"

The Organizations unit ID data source for the sync.

" } }, - "documentation":"

The AWS Organizations organizational unit data source for the sync.

" + "documentation":"

The Organizations organizational unit data source for the sync.

" }, "ResourceDataSyncOrganizationalUnitId":{ "type":"string", @@ -13655,7 +13657,7 @@ }, "Region":{ "shape":"ResourceDataSyncS3Region", - "documentation":"

The AWS Region with the S3 bucket targeted by the Resource Data Sync.

" + "documentation":"

The Amazon Web Services Region with the S3 bucket targeted by the resource data sync.

" }, "AWSKMSKeyARN":{ "shape":"ResourceDataSyncAWSKMSKeyARN", @@ -13666,7 +13668,7 @@ "documentation":"

Enables destination data sharing. By default, this field is null.

" } }, - "documentation":"

Information about the target S3 bucket for the Resource Data Sync.

" + "documentation":"

Information about the target S3 bucket for the resource data sync.

" }, "ResourceDataSyncS3Format":{ "type":"string", @@ -13691,23 +13693,23 @@ "members":{ "SourceType":{ "shape":"ResourceDataSyncSourceType", - "documentation":"

The type of data source for the resource data sync. SourceType is either AwsOrganizations (if an organization is present in AWS Organizations) or SingleAccountMultiRegions.

" + "documentation":"

The type of data source for the resource data sync. SourceType is either AwsOrganizations (if an organization is present in Organizations) or SingleAccountMultiRegions.

" }, "AwsOrganizationsSource":{ "shape":"ResourceDataSyncAwsOrganizationsSource", - "documentation":"

Information about the AwsOrganizationsSource resource data sync source. A sync source of this type can synchronize data from AWS Organizations.

" + "documentation":"

Information about the AwsOrganizationsSource resource data sync source. A sync source of this type can synchronize data from Organizations.

" }, "SourceRegions":{ "shape":"ResourceDataSyncSourceRegionList", - "documentation":"

The SyncSource AWS Regions included in the resource data sync.

" + "documentation":"

The SyncSource Amazon Web Services Regions included in the resource data sync.

" }, "IncludeFutureRegions":{ "shape":"ResourceDataSyncIncludeFutureRegions", - "documentation":"

Whether to automatically synchronize and aggregate data from new AWS Regions when those Regions come online.

" + "documentation":"

Whether to automatically synchronize and aggregate data from new Amazon Web Services Regions when those Regions come online.

" }, "EnableAllOpsDataSources":{ "shape":"ResourceDataSyncEnableAllOpsDataSources", - "documentation":"

When you create a resource data sync, if you choose one of the AWS Organizations options, then Systems Manager automatically enables all OpsData sources in the selected AWS Regions for all AWS accounts in your organization (or in the selected organization units). For more information, see About multiple account and Region resource data syncs in the AWS Systems Manager User Guide.

" + "documentation":"

When you create a resource data sync, if you choose one of the Organizations options, then Systems Manager automatically enables all OpsData sources in the selected Amazon Web Services Regions for all Amazon Web Services accounts in your organization (or in the selected organization units). For more information, see About multiple account and Region resource data syncs in the Amazon Web Services Systems Manager User Guide.

" } }, "documentation":"

Information about the source of the data included in the resource data sync.

" @@ -13731,7 +13733,7 @@ "members":{ "SourceType":{ "shape":"ResourceDataSyncSourceType", - "documentation":"

The type of data source for the resource data sync. SourceType is either AwsOrganizations (if an organization is present in AWS Organizations) or singleAccountMultiRegions.

" + "documentation":"

The type of data source for the resource data sync. SourceType is either AwsOrganizations (if an organization is present in Organizations) or singleAccountMultiRegions.

" }, "AwsOrganizationsSource":{ "shape":"ResourceDataSyncAwsOrganizationsSource", @@ -13739,22 +13741,22 @@ }, "SourceRegions":{ "shape":"ResourceDataSyncSourceRegionList", - "documentation":"

The SyncSource AWS Regions included in the resource data sync.

" + "documentation":"

The SyncSource Amazon Web Services Regions included in the resource data sync.

" }, "IncludeFutureRegions":{ "shape":"ResourceDataSyncIncludeFutureRegions", - "documentation":"

Whether to automatically synchronize and aggregate data from new AWS Regions when those Regions come online.

" + "documentation":"

Whether to automatically synchronize and aggregate data from new Amazon Web Services Regions when those Regions come online.

" }, "State":{ "shape":"ResourceDataSyncState", - "documentation":"

The data type name for including resource data sync state. There are four sync states:

OrganizationNotExists: Your organization doesn't exist.

NoPermissions: The system can't locate the service-linked role. This role is automatically created when a user creates a resource data sync in Explorer.

InvalidOrganizationalUnit: You specified or selected an invalid unit in the resource data sync configuration.

TrustedAccessDisabled: You disabled Systems Manager access in the organization in AWS Organizations.

" + "documentation":"

The data type name for including resource data sync state. There are four sync states:

OrganizationNotExists: Your organization doesn't exist.

NoPermissions: The system can't locate the service-linked role. This role is automatically created when a user creates a resource data sync in Explorer.

InvalidOrganizationalUnit: You specified or selected an invalid unit in the resource data sync configuration.

TrustedAccessDisabled: You disabled Systems Manager access in the organization in Organizations.

" }, "EnableAllOpsDataSources":{ "shape":"ResourceDataSyncEnableAllOpsDataSources", - "documentation":"

When you create a resource data sync, if you choose one of the AWS Organizations options, then Systems Manager automatically enables all OpsData sources in the selected AWS Regions for all AWS accounts in your organization (or in the selected organization units). For more information, see About multiple account and Region resource data syncs in the AWS Systems Manager User Guide.

" + "documentation":"

When you create a resource data sync, if you choose one of the Organizations options, then Systems Manager automatically enables all OpsData sources in the selected Amazon Web Services Regions for all Amazon Web Services accounts in your organization (or in the selected organization units). For more information, see About multiple account and Region resource data syncs in the Amazon Web Services Systems Manager User Guide.

" } }, - "documentation":"

The data type name for including resource data sync state. There are four sync states:

OrganizationNotExists (Your organization doesn't exist)

NoPermissions (The system can't locate the service-linked role. This role is automatically created when a user creates a resource data sync in Explorer.)

InvalidOrganizationalUnit (You specified or selected an invalid unit in the resource data sync configuration.)

TrustedAccessDisabled (You disabled Systems Manager access in the organization in AWS Organizations.)

" + "documentation":"

The data type name for including resource data sync state. There are four sync states:

OrganizationNotExists (Your organization doesn't exist)

NoPermissions (The system can't locate the service-linked role. This role is automatically created when a user creates a resource data sync in Amazon Web Services Systems Manager Explorer.)

InvalidOrganizationalUnit (You specified or selected an invalid unit in the resource data sync configuration.)

TrustedAccessDisabled (You disabled Systems Manager access in the organization in Organizations.)

" }, "ResourceDataSyncState":{ "type":"string", @@ -13780,7 +13782,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

Error returned when the caller has exceeded the default resource quotas. For example, too many maintenance windows or patch baselines have been created.

For information about resource quotas in Systems Manager, see Systems Manager service quotas in the AWS General Reference.

", + "documentation":"

Error returned when the caller has exceeded the default resource quotas. For example, too many maintenance windows or patch baselines have been created.

For information about resource quotas in Systems Manager, see Systems Manager service quotas in the Amazon Web Services General Reference.

", "exception":true }, "ResourceType":{ @@ -13810,7 +13812,7 @@ "members":{ "TypeName":{ "shape":"InventoryItemTypeName", - "documentation":"

Name of the inventory item type. Valid value: AWS:InstanceInformation. Default Value: AWS:InstanceInformation.

" + "documentation":"

Name of the inventory item type. Valid value: AWS:InstanceInformation. Default Value: AWS:InstanceInformation.

" } }, "documentation":"

The inventory item result attribute.

" @@ -13844,7 +13846,7 @@ }, "StreamUrl":{ "shape":"StreamUrl", - "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in Systems Manager service endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" + "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

region represents the Region identifier for an Amazon Web Services Region supported by Amazon Web Services Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in Systems Manager service endpoints in the Amazon Web Services General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" } } }, @@ -13891,11 +13893,11 @@ "members":{ "DocumentName":{ "shape":"DocumentARN", - "documentation":"

The name of the Automation runbook (Automation document) used in a runbook workflow.

" + "documentation":"

The name of the Automation runbook used in a runbook workflow.

" }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The version of the Automation runbook (Automation document) used in a runbook workflow.

", + "documentation":"

The version of the Automation runbook used in a runbook workflow.

", "box":true }, "Parameters":{ @@ -13908,7 +13910,7 @@ }, "Targets":{ "shape":"Targets", - "documentation":"

A key-value mapping to target resources that the Runbook operation performs tasks on. Required if you specify TargetParameterName.

" + "documentation":"

A key-value mapping to target resources that the runbook operation performs tasks on. Required if you specify TargetParameterName.

" }, "MaxConcurrency":{ "shape":"MaxConcurrency", @@ -13920,11 +13922,11 @@ }, "TargetLocations":{ "shape":"TargetLocations", - "documentation":"

Information about the AWS Regions and accounts targeted by the current Runbook operation.

", + "documentation":"

Information about the Amazon Web Services Regions and Amazon Web Services accounts targeted by the current Runbook operation.

", "box":true } }, - "documentation":"

Information about an Automation runbook (Automation document) used in a runbook workflow in Change Manager.

The Automation runbooks specified for the runbook workflow can't run until all required approvals for the change request have been received.

" + "documentation":"

Information about an Automation runbook used in a runbook workflow in Change Manager.

The Automation runbooks specified for the runbook workflow can't run until all required approvals for the change request have been received.

" }, "Runbooks":{ "type":"list", @@ -13946,7 +13948,7 @@ "members":{ "OutputS3Region":{ "shape":"S3Region", - "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Region of the S3 bucket.

" + "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Amazon Web Services Systems Manager automatically determines the Region of the S3 bucket.

" }, "OutputS3BucketName":{ "shape":"S3BucketName", @@ -13967,7 +13969,7 @@ "documentation":"

A URL for an S3 bucket where you want to store the results of this request.

" } }, - "documentation":"

A URL for the S3 bucket where you want to store the results of this request.

" + "documentation":"

A URL for the Amazon Web Services Systems Manager (Systems Manager) bucket where you want to store the results of this request.

" }, "S3Region":{ "type":"string", @@ -14033,19 +14035,19 @@ "members":{ "InstanceIds":{ "shape":"InstanceIdList", - "documentation":"

The IDs of the instances where the command should run. Specifying instance IDs is most useful when you are targeting a limited number of instances, though you can specify up to 50 IDs.

To target a larger number of instances, or if you prefer not to list individual instance IDs, we recommend using the Targets option instead. Using Targets, which accepts tag key-value pairs to identify the instances to send commands to, you can a send command to tens, hundreds, or thousands of instances at once.

For more information about how to use targets, see Using targets and rate controls to send commands to a fleet in the AWS Systems Manager User Guide.

" + "documentation":"

The IDs of the instances where the command should run. Specifying instance IDs is most useful when you are targeting a limited number of instances, though you can specify up to 50 IDs.

To target a larger number of instances, or if you prefer not to list individual instance IDs, we recommend using the Targets option instead. Using Targets, which accepts tag key-value pairs to identify the instances to send commands to, you can a send command to tens, hundreds, or thousands of instances at once.

For more information about how to use targets, see Using targets and rate controls to send commands to a fleet in the Amazon Web Services Systems Manager User Guide.

" }, "Targets":{ "shape":"Targets", - "documentation":"

An array of search criteria that targets instances using a Key,Value combination that you specify. Specifying targets is most useful when you want to send a command to a large number of instances at once. Using Targets, which accepts tag key-value pairs to identify instances, you can send a command to tens, hundreds, or thousands of instances at once.

To send a command to a smaller number of instances, you can use the InstanceIds option instead.

For more information about how to use targets, see Sending commands to a fleet in the AWS Systems Manager User Guide.

" + "documentation":"

An array of search criteria that targets instances using a Key,Value combination that you specify. Specifying targets is most useful when you want to send a command to a large number of instances at once. Using Targets, which accepts tag key-value pairs to identify instances, you can send a command to tens, hundreds, or thousands of instances at once.

To send a command to a smaller number of instances, you can use the InstanceIds option instead.

For more information about how to use targets, see Sending commands to a fleet in the Amazon Web Services Systems Manager User Guide.

" }, "DocumentName":{ "shape":"DocumentARN", - "documentation":"

The name of the Systems Manager document to run. This can be a public document or a custom document. To run a shared document belonging to another account, specify the document ARN. For more information about how to use shared documents, see Using shared SSM documents in the AWS Systems Manager User Guide.

" + "documentation":"

The name of the Amazon Web Services Systems Manager document (SSM document) to run. This can be a public document or a custom document. To run a shared document belonging to another account, specify the document ARN. For more information about how to use shared documents, see Using shared SSM documents in the Amazon Web Services Systems Manager User Guide.

" }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The SSM document version to use in the request. You can specify $DEFAULT, $LATEST, or a specific version number. If you run commands by using the AWS CLI, then you must escape the first two options by using a backslash. If you specify a version number, then you don't need to use the backslash. For example:

--document-version \"\\$DEFAULT\"

--document-version \"\\$LATEST\"

--document-version \"3\"

" + "documentation":"

The SSM document version to use in the request. You can specify $DEFAULT, $LATEST, or a specific version number. If you run commands by using the Command Line Interface (Amazon Web Services CLI), then you must escape the first two options by using a backslash. If you specify a version number, then you don't need to use the backslash. For example:

--document-version \"\\$DEFAULT\"

--document-version \"\\$LATEST\"

--document-version \"3\"

" }, "DocumentHash":{ "shape":"DocumentHash", @@ -14057,7 +14059,7 @@ }, "TimeoutSeconds":{ "shape":"TimeoutSeconds", - "documentation":"

If this time is reached and the command has not already started running, it will not run.

", + "documentation":"

If this time is reached and the command hasn't already started running, it won't run.

", "box":true }, "Comment":{ @@ -14070,7 +14072,7 @@ }, "OutputS3Region":{ "shape":"S3Region", - "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Region of the S3 bucket.

" + "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Amazon Web Services Region of the S3 bucket.

" }, "OutputS3BucketName":{ "shape":"S3BucketName", @@ -14082,15 +14084,15 @@ }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

(Optional) The maximum number of instances that are allowed to run the command at the same time. You can specify a number such as 10 or a percentage such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Using concurrency controls in the AWS Systems Manager User Guide.

" + "documentation":"

(Optional) The maximum number of instances that are allowed to run the command at the same time. You can specify a number such as 10 or a percentage such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Using concurrency controls in the Amazon Web Services Systems Manager User Guide.

" }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The maximum number of errors allowed without the command failing. When the command fails one more time beyond the value of MaxErrors, the systems stops sending the command to additional targets. You can specify a number like 10 or a percentage like 10%. The default value is 0. For more information about how to use MaxErrors, see Using error controls in the AWS Systems Manager User Guide.

" + "documentation":"

The maximum number of errors allowed without the command failing. When the command fails one more time beyond the value of MaxErrors, the systems stops sending the command to additional targets. You can specify a number like 10 or a percentage like 10%. The default value is 0. For more information about how to use MaxErrors, see Using error controls in the Amazon Web Services Systems Manager User Guide.

" }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The ARN of the IAM service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for Run Command commands.

" + "documentation":"

The ARN of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for Run Command commands.

" }, "NotificationConfig":{ "shape":"NotificationConfig", @@ -14098,7 +14100,7 @@ }, "CloudWatchOutputConfig":{ "shape":"CloudWatchOutputConfig", - "documentation":"

Enables Systems Manager to send Run Command output to Amazon CloudWatch Logs.

" + "documentation":"

Enables Amazon Web Services Systems Manager to send Run Command output to Amazon CloudWatch Logs. Run Command is a capability of Amazon Web Services Systems Manager.

" } } }, @@ -14137,10 +14139,10 @@ }, "Status":{ "shape":"String", - "documentation":"

The status of the service setting. The value can be Default, Customized or PendingUpdate.

  • Default: The current setting uses a default value provisioned by the AWS service team.

  • Customized: The current setting use a custom value specified by the customer.

  • PendingUpdate: The current setting uses a default or custom value, but a setting change request is pending approval.

" + "documentation":"

The status of the service setting. The value can be Default, Customized or PendingUpdate.

  • Default: The current setting uses a default value provisioned by the Amazon Web Services service team.

  • Customized: The current setting use a custom value specified by the customer.

  • PendingUpdate: The current setting uses a default or custom value, but a setting change request is pending approval.

" } }, - "documentation":"

The service setting data structure.

ServiceSetting is an account-level setting for an AWS service. This setting defines how a user interacts with or uses a service or a feature of a service. For example, if an AWS service charges money to the account based on feature or service usage, then the AWS service team might create a default setting of \"false\". This means the user can't use this feature unless they change the setting to \"true\" and intentionally opt in for a paid feature.

Services map a SettingId object to a setting value. AWS services teams define the default value for a SettingId. You can't create a new SettingId, but you can overwrite the default value if you have the ssm:UpdateServiceSetting permission for the setting. Use the UpdateServiceSetting API action to change the default setting. Or, use the ResetServiceSetting to change the value back to the original value defined by the AWS service team.

" + "documentation":"

The service setting data structure.

ServiceSetting is an account-level setting for an Amazon Web Services service. This setting defines how a user interacts with or uses a service or a feature of a service. For example, if an Amazon Web Services service charges money to the account based on feature or service usage, then the Amazon Web Services service team might create a default setting of \"false\". This means the user can't use this feature unless they change the setting to \"true\" and intentionally opt in for a paid feature.

Services map a SettingId object to a setting value. Amazon Web Services services teams define the default value for a SettingId. You can't create a new SettingId, but you can overwrite the default value if you have the ssm:UpdateServiceSetting permission for the setting. Use the UpdateServiceSetting API operation to change the default setting. Or, use the ResetServiceSetting to change the value back to the original value defined by the Amazon Web Services service team.

" }, "ServiceSettingId":{ "type":"string", @@ -14152,7 +14154,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The specified service setting was not found. Either the service name or the setting has not been provisioned by the AWS service team.

", + "documentation":"

The specified service setting wasn't found. Either the service name or the setting hasn't been provisioned by the Amazon Web Services service team.

", "exception":true }, "ServiceSettingValue":{ @@ -14189,7 +14191,7 @@ }, "Owner":{ "shape":"SessionOwner", - "documentation":"

The ID of the AWS user account that started the session.

" + "documentation":"

The ID of the Amazon Web Services user account that started the session.

" }, "Details":{ "shape":"SessionDetails", @@ -14220,7 +14222,7 @@ }, "value":{ "shape":"SessionFilterValue", - "documentation":"

The filter value. Valid values for each filter key are as follows:

  • InvokedAfter: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, and later.

  • InvokedBefore: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018.

  • Target: Specify an instance to which session connections have been made.

  • Owner: Specify an AWS user account to see a list of sessions started by that user.

  • Status: Specify a valid session status to see a list of all sessions with that status. Status values you can specify include:

    • Connected

    • Connecting

    • Disconnected

    • Terminated

    • Terminating

    • Failed

  • SessionId: Specify a session ID to return details about the session.

" + "documentation":"

The filter value. Valid values for each filter key are as follows:

  • InvokedAfter: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, and later.

  • InvokedBefore: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018.

  • Target: Specify an instance to which session connections have been made.

  • Owner: Specify an Amazon Web Services user account to see a list of sessions started by that user.

  • Status: Specify a valid session status to see a list of all sessions with that status. Status values you can specify include:

    • Connected

    • Connecting

    • Disconnected

    • Terminated

    • Terminating

    • Failed

  • SessionId: Specify a session ID to return details about the session.

" } }, "documentation":"

Describes a filter for Session Manager information.

" @@ -14414,16 +14416,16 @@ "members":{ "DocumentName":{ "shape":"DocumentARN", - "documentation":"

The name of the Systems Manager document to run. This can be a public document or a custom document. To run a shared document belonging to another account, specify the document ARN. For more information about how to use shared documents, see Using shared SSM documents in the AWS Systems Manager User Guide.

" + "documentation":"

The name of the SSM document to run. This can be a public document or a custom document. To run a shared document belonging to another account, specify the document ARN. For more information about how to use shared documents, see Using shared SSM documents in the Amazon Web Services Systems Manager User Guide.

" }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The version of the Automation document to use for this execution.

", + "documentation":"

The version of the Automation runbook to use for this execution.

", "box":true }, "Parameters":{ "shape":"AutomationParameterMap", - "documentation":"

A key-value map of execution parameters, which match the declared parameters in the Automation document.

" + "documentation":"

A key-value map of execution parameters, which match the declared parameters in the Automation runbook.

" }, "ClientToken":{ "shape":"IdempotencyToken", @@ -14443,11 +14445,11 @@ }, "TargetMaps":{ "shape":"TargetMaps", - "documentation":"

A key-value mapping of document parameters to target resources. Both Targets and TargetMaps cannot be specified together.

" + "documentation":"

A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together.

" }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The maximum number of targets allowed to run this task in parallel. You can specify a number, such as 10, or a percentage, such as 10%. The default value is 10.

" + "documentation":"

The maximum number of targets allowed to run this task in parallel. You can specify a number, such as 10, or a percentage, such as 10%. The default value is 10.

" }, "MaxErrors":{ "shape":"MaxErrors", @@ -14455,12 +14457,12 @@ }, "TargetLocations":{ "shape":"TargetLocations", - "documentation":"

A location is a combination of AWS Regions and/or AWS accounts where you want to run the Automation. Use this action to start an Automation in multiple Regions and multiple accounts. For more information, see Running Automation workflows in multiple AWS Regions and accounts in the AWS Systems Manager User Guide.

", + "documentation":"

A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple Amazon Web Services accounts. For more information, see Running Automation workflows in multiple Amazon Web Services Regions and Amazon Web Services accounts in the Amazon Web Services Systems Manager User Guide.

", "box":true }, "Tags":{ "shape":"TagList", - "documentation":"

Optional metadata that you assign to a resource. You can specify a maximum of five tags for an automation. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an automation to identify an environment or operating system. In this case, you could specify the following key name/value pairs:

  • Key=environment,Value=test

  • Key=OS,Value=Windows

To add tags to an existing patch baseline, use the AddTagsToResource action.

" + "documentation":"

Optional metadata that you assign to a resource. You can specify a maximum of five tags for an automation. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an automation to identify an environment or operating system. In this case, you could specify the following key-value pairs:

  • Key=environment,Value=test

  • Key=OS,Value=Windows

To add tags to an existing patch baseline, use the AddTagsToResource operation.

" } } }, @@ -14507,11 +14509,11 @@ }, "Runbooks":{ "shape":"Runbooks", - "documentation":"

Information about the Automation runbooks (Automation documents) that are run during the runbook workflow.

The Automation runbooks specified for the runbook workflow can't run until all required approvals for the change request have been received.

" + "documentation":"

Information about the Automation runbooks that are run during the runbook workflow.

The Automation runbooks specified for the runbook workflow can't run until all required approvals for the change request have been received.

" }, "Tags":{ "shape":"TagList", - "documentation":"

Optional metadata that you assign to a resource. You can specify a maximum of five tags for a change request. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a change request to identify an environment or target AWS Region. In this case, you could specify the following key-value pairs:

  • Key=Environment,Value=Production

  • Key=Region,Value=us-east-2

" + "documentation":"

Optional metadata that you assign to a resource. You can specify a maximum of five tags for a change request. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a change request to identify an environment or target Amazon Web Services Region. In this case, you could specify the following key-value pairs:

  • Key=Environment,Value=Production

  • Key=Region,Value=us-east-2

" }, "ScheduledEndTime":{ "shape":"DateTime", @@ -14563,7 +14565,7 @@ }, "StreamUrl":{ "shape":"StreamUrl", - "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output)

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in Systems Manager service endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" + "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output)

region represents the Region identifier for an Amazon Web Services Region supported by Amazon Web Services Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in Systems Manager service endpoints in the Amazon Web Services General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" } } }, @@ -14607,20 +14609,20 @@ }, "OnFailure":{ "shape":"String", - "documentation":"

The action to take if the step fails. The default value is Abort.

" + "documentation":"

The action to take if the step fails. The default value is Abort.

" }, "MaxAttempts":{ "shape":"Integer", - "documentation":"

The maximum number of tries to run the action of the step. The default value is 1.

", + "documentation":"

The maximum number of tries to run the action of the step. The default value is 1.

", "box":true }, "ExecutionStartTime":{ "shape":"DateTime", - "documentation":"

If a step has begun execution, this contains the time the step started. If the step is in Pending status, this field is not populated.

" + "documentation":"

If a step has begun execution, this contains the time the step started. If the step is in Pending status, this field isn't populated.

" }, "ExecutionEndTime":{ "shape":"DateTime", - "documentation":"

If a step has finished execution, this contains the time the execution ended. If the step has not yet concluded, this field is not populated.

" + "documentation":"

If a step has finished execution, this contains the time the execution ended. If the step hasn't yet concluded, this field isn't populated.

" }, "StepStatus":{ "shape":"AutomationExecutionStatus", @@ -14684,7 +14686,7 @@ }, "TargetLocation":{ "shape":"TargetLocation", - "documentation":"

The combination of AWS Regions and accounts targeted by the current Automation execution.

", + "documentation":"

The combination of Amazon Web Services Regions and Amazon Web Services accounts targeted by the current Automation execution.

", "box":true } }, @@ -14800,7 +14802,7 @@ "documentation":"

The value of the tag.

" } }, - "documentation":"

Metadata that you assign to your AWS resources. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. In Systems Manager, you can apply tags to documents, managed instances, maintenance windows, Parameter Store parameters, and patch baselines.

" + "documentation":"

Metadata that you assign to your Amazon Web Services resources. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. In Amazon Web Services Systems Manager, you can apply tags to Systems Manager documents (SSM documents), managed instances, maintenance windows, parameters, patch baselines, OpsItems, and OpsMetadata.

" }, "TagKey":{ "type":"string", @@ -14828,10 +14830,10 @@ }, "Values":{ "shape":"TargetValues", - "documentation":"

User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to run a command on instances that include EC2 tags of ServerRole,WebServer.

Depending on the type of Target, the maximum number of values for a Key might be lower than the global maximum of 50.

" + "documentation":"

User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to run a command on instances that include EC2 tags of ServerRole,WebServer.

Depending on the type of target, the maximum number of values for a key might be lower than the global maximum of 50.

" } }, - "documentation":"

An array of search criteria that targets instances using a Key,Value combination that you specify.

One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, AWS Lambda, and AWS Step Functions). For more information about running tasks that do not specify targets, see Registering maintenance window tasks without targets in the AWS Systems Manager User Guide.

Supported formats include the following.

  • Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

  • Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

  • Key=tag-key,Values=my-tag-key-1,my-tag-key-2

  • Run Command and Maintenance window targets only: Key=resource-groups:Name,Values=resource-group-name

  • Maintenance window targets only: Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

  • Automation targets only: Key=ResourceGroup;Values=resource-group-name

For example:

  • Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE

  • Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3

  • Key=tag-key,Values=Name,Instance-Type,CostCenter

  • Run Command and Maintenance window targets only: Key=resource-groups:Name,Values=ProductionResourceGroup

    This example demonstrates how to target all resources in the resource group ProductionResourceGroup in your maintenance window.

  • Maintenance window targets only: Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

    This example demonstrates how to target only EC2 instances and VPCs in your maintenance window.

  • Automation targets only: Key=ResourceGroup,Values=MyResourceGroup

  • State Manager association targets only: Key=InstanceIds,Values=*

    This example demonstrates how to target all managed instances in the AWS Region where the association was created.

For more information about how to send commands that target instances using Key,Value parameters, see Targeting multiple instances in the AWS Systems Manager User Guide.

" + "documentation":"

An array of search criteria that targets instances using a key-value pair that you specify.

One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, Lambda, and Step Functions). For more information about running tasks that don't specify targets, see Registering maintenance window tasks without targets in the Amazon Web Services Systems Manager User Guide.

Supported formats include the following.

  • Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

  • Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

  • Key=tag-key,Values=my-tag-key-1,my-tag-key-2

  • Run Command and Maintenance window targets only: Key=resource-groups:Name,Values=resource-group-name

  • Maintenance window targets only: Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

  • Automation targets only: Key=ResourceGroup;Values=resource-group-name

For example:

  • Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE

  • Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3

  • Key=tag-key,Values=Name,Instance-Type,CostCenter

  • Run Command and Maintenance window targets only: Key=resource-groups:Name,Values=ProductionResourceGroup

    This example demonstrates how to target all resources in the resource group ProductionResourceGroup in your maintenance window.

  • Maintenance window targets only: Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

    This example demonstrates how to target only Amazon Elastic Compute Cloud (Amazon EC2) instances and VPCs in your maintenance window.

  • Automation targets only: Key=ResourceGroup,Values=MyResourceGroup

  • State Manager association targets only: Key=InstanceIds,Values=*

    This example demonstrates how to target all managed instances in the Amazon Web Services Region where the association was created.

For more information about how to send commands that target instances using Key,Value parameters, see Targeting multiple instances in the Amazon Web Services Systems Manager User Guide.

" }, "TargetCount":{"type":"integer"}, "TargetInUseException":{ @@ -14853,15 +14855,15 @@ "members":{ "Accounts":{ "shape":"Accounts", - "documentation":"

The AWS accounts targeted by the current Automation execution.

" + "documentation":"

The Amazon Web Services accounts targeted by the current Automation execution.

" }, "Regions":{ "shape":"Regions", - "documentation":"

The AWS Regions targeted by the current Automation execution.

" + "documentation":"

The Amazon Web Services Regions targeted by the current Automation execution.

" }, "TargetLocationMaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The maximum number of AWS accounts and AWS regions allowed to run the Automation concurrently.

", + "documentation":"

The maximum number of Amazon Web Services Regions and Amazon Web Services accounts allowed to run the Automation concurrently.

", "box":true }, "TargetLocationMaxErrors":{ @@ -14875,7 +14877,7 @@ "box":true } }, - "documentation":"

The combination of AWS Regions and accounts targeted by the current Automation execution.

" + "documentation":"

The combination of Amazon Web Services Regions and Amazon Web Services accounts targeted by the current Automation execution.

" }, "TargetLocations":{ "type":"list", @@ -14917,7 +14919,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The specified target instance for the session is not fully configured for use with Session Manager. For more information, see Getting started with Session Manager in the AWS Systems Manager User Guide. This error is also returned if you attempt to start a session on an instance that is located in a different account or Region

", + "documentation":"

The specified target instance for the session isn't fully configured for use with Session Manager. For more information, see Getting started with Session Manager in the Amazon Web Services Systems Manager User Guide. This error is also returned if you attempt to start a session on an instance that is located in a different account or Region

", "exception":true }, "TargetParameterList":{ @@ -15009,11 +15011,11 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

The parameter name of which you want to delete one or more labels.

" + "documentation":"

The name of the parameter from which you want to delete one or more labels.

" }, "ParameterVersion":{ "shape":"PSParameterVersion", - "documentation":"

The specific version of the parameter which you want to delete one or more labels from. If it is not present, the call will fail.

", + "documentation":"

The specific version of the parameter which you want to delete one or more labels from. If it isn't present, the call will fail.

", "box":true }, "Labels":{ @@ -15031,7 +15033,7 @@ }, "InvalidLabels":{ "shape":"ParameterLabelList", - "documentation":"

The labels that are not attached to the given parameter version.

" + "documentation":"

The labels that aren't attached to the given parameter version.

" } } }, @@ -15040,7 +15042,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The calendar entry contained in the specified Systems Manager document is not supported.

", + "documentation":"

The calendar entry contained in the specified SSM document isn't supported.

", "exception":true }, "UnsupportedFeatureRequiredException":{ @@ -15048,7 +15050,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

Microsoft application patching is only available on EC2 instances and advanced instances. To patch Microsoft applications on on-premises servers and VMs, you must enable advanced instances. For more information, see Using the advanced-instances tier in the AWS Systems Manager User Guide.

", + "documentation":"

Patching for applications released by Microsoft is only available on EC2 instances and advanced instances. To patch applications released by Microsoft on on-premises servers and VMs, you must enable advanced instances. For more information, see Enabling the advanced-instances tier in the Amazon Web Services Systems Manager User Guide.

", "exception":true }, "UnsupportedInventoryItemContextException":{ @@ -15057,7 +15059,7 @@ "TypeName":{"shape":"InventoryItemTypeName"}, "Message":{"shape":"String"} }, - "documentation":"

The Context attribute that you specified for the InventoryItem is not allowed for this inventory type. You can only use the Context attribute with inventory types like AWS:ComplianceItem.

", + "documentation":"

The Context attribute that you specified for the InventoryItem isn't allowed for this inventory type. You can only use the Context attribute with inventory types like AWS:ComplianceItem.

", "exception":true }, "UnsupportedInventorySchemaVersionException":{ @@ -15073,7 +15075,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The operating systems you specified is not supported, or the operation is not supported for the operating system.

", + "documentation":"

The operating systems you specified isn't supported, or the operation isn't supported for the operating system.

", "exception":true }, "UnsupportedParameterType":{ @@ -15081,7 +15083,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

The parameter type is not supported.

", + "documentation":"

The parameter type isn't supported.

", "exception":true }, "UnsupportedPlatformType":{ @@ -15089,7 +15091,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The document does not support the platform type of the given instance ID(s). For example, you sent an document for a Windows instance to a Linux instance.

", + "documentation":"

The document doesn't support the platform type of the given instance ID(s). For example, you sent an document for a Windows instance to a Linux instance.

", "exception":true }, "UpdateAssociationRequest":{ @@ -15102,7 +15104,7 @@ }, "Parameters":{ "shape":"Parameters", - "documentation":"

The parameters you want to update for the association. If you create a parameter using Parameter Store, you can reference the parameter using {{ssm:parameter-name}}

" + "documentation":"

The parameters you want to update for the association. If you create a parameter using Parameter Store, a capability of Amazon Web Services Systems Manager, you can reference the parameter using {{ssm:parameter-name}}.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -15118,7 +15120,7 @@ }, "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the SSM document that contains the configuration information for the instance. You can specify Command or Automation documents.

You can specify AWS-predefined documents, documents you created, or a document that is shared with you from another account.

For SSM documents that are shared with you from other AWS accounts, you must specify the complete SSM document ARN, in the following format:

arn:aws:ssm:region:account-id:document/document-name

For example:

arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document

For AWS-predefined documents and SSM documents you created in your account, you only need to specify the document name. For example, AWS-ApplyPatchBaseline or My-Document.

" + "documentation":"

The name of the SSM Command document or Automation runbook that contains the configuration information for the instance.

You can specify Amazon Web Services-predefined documents, documents you created, or a document that is shared with you from another account.

For Systems Manager document (SSM document) that are shared with you from other Amazon Web Services accounts, you must specify the complete SSM document ARN, in the following format:

arn:aws:ssm:region:account-id:document/document-name

For example:

arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document

For Amazon Web Services-predefined documents and SSM documents you created in your account, you only need to specify the document name. For example, AWS-ApplyPatchBaseline or My-Document.

" }, "Targets":{ "shape":"Targets", @@ -15134,15 +15136,15 @@ }, "AutomationTargetParameterName":{ "shape":"AutomationTargetParameterName", - "documentation":"

Specify the target for the association. This target is required for associations that use an Automation document and target resources by using rate controls.

" + "documentation":"

Specify the target for the association. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager.

" }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.

Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.

" + "documentation":"

The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.

Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.

" }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.

If a new instance starts and attempts to run an association while Systems Manager is running MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.

" + "documentation":"

The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.

If a new instance starts and attempts to run an association while Systems Manager is running MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.

" }, "ComplianceSeverity":{ "shape":"AssociationComplianceSeverity", @@ -15150,19 +15152,19 @@ }, "SyncCompliance":{ "shape":"AssociationSyncCompliance", - "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.

By default, all associations use AUTO mode.

" }, "ApplyOnlyAtCronInterval":{ "shape":"ApplyOnlyAtCronInterval", - "documentation":"

By default, when you update an association, the system runs it immediately after it is updated and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you update it. This parameter is not supported for rate expressions.

Also, if you specified this option when you created the association, you can reset it. To do so, specify the no-apply-only-at-cron-interval parameter when you update the association from the command line. This parameter forces the association to run immediately after updating it and according to the interval specified.

" + "documentation":"

By default, when you update an association, the system runs it immediately after it is updated and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you update it. This parameter isn't supported for rate expressions.

Also, if you specified this option when you created the association, you can reset it. To do so, specify the no-apply-only-at-cron-interval parameter when you update the association from the command line. This parameter forces the association to run immediately after updating it and according to the interval specified.

" }, "CalendarNames":{ "shape":"CalendarNameOrARNList", - "documentation":"

The names or Amazon Resource Names (ARNs) of the Systems Manager Change Calendar type documents you want to gate your associations under. The associations only run when that Change Calendar is open. For more information, see AWS Systems Manager Change Calendar.

" + "documentation":"

The names or Amazon Resource Names (ARNs) of the Change Calendar type documents you want to gate your associations under. The associations only run when that change calendar is open. For more information, see Amazon Web Services Systems Manager Change Calendar.

" }, "TargetLocations":{ "shape":"TargetLocations", - "documentation":"

A location is a combination of AWS Regions and AWS accounts where you want to run the association. Use this action to update an association in multiple Regions and multiple accounts.

" + "documentation":"

A location is a combination of Amazon Web Services Regions and Amazon Web Services accounts where you want to run the association. Use this action to update an association in multiple Regions and multiple accounts.

" } } }, @@ -15185,11 +15187,11 @@ "members":{ "Name":{ "shape":"DocumentARN", - "documentation":"

The name of the Systems Manager document.

" + "documentation":"

The name of the SSM document.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The ID of the instance.

" + "documentation":"

The instance ID.

" }, "AssociationStatus":{ "shape":"AssociationStatus", @@ -15241,15 +15243,15 @@ "members":{ "Name":{ "shape":"DocumentName", - "documentation":"

The name of the document for which a version is to be updated.

" + "documentation":"

The name of the change template for which a version's metadata is to be updated.

" }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The version of a document to update.

" + "documentation":"

The version of a change template in which to update approval metadata.

" }, "DocumentReviews":{ "shape":"DocumentReviews", - "documentation":"

The document review details to update.

" + "documentation":"

The change template review details to update.

" } } }, @@ -15271,19 +15273,19 @@ }, "Attachments":{ "shape":"AttachmentsSourceList", - "documentation":"

A list of key and value pairs that describe attachments to a version of a document.

" + "documentation":"

A list of key-value pairs that describe attachments to a version of a document.

" }, "Name":{ "shape":"DocumentName", - "documentation":"

The name of the Systems Manager document that you want to update.

" + "documentation":"

The name of the SSM document that you want to update.

" }, "DisplayName":{ "shape":"DocumentDisplayName", - "documentation":"

The friendly name of the Systems Manager document that you want to update. This value can differ for each version of the document. If you do not specify a value for this parameter in your request, the existing value is applied to the new document version.

" + "documentation":"

The friendly name of the SSM document that you want to update. This value can differ for each version of the document. If you don't specify a value for this parameter in your request, the existing value is applied to the new document version.

" }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

An optional field specifying the version of the artifact you are updating with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and cannot be changed.

" + "documentation":"

An optional field specifying the version of the artifact you are updating with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and can't be changed.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -15330,7 +15332,7 @@ }, "EndDate":{ "shape":"MaintenanceWindowStringDateTime", - "documentation":"

The date and time, in ISO-8601 Extended format, for when you want the maintenance window to become inactive. EndDate allows you to set a date and time in the future when the maintenance window will no longer run.

" + "documentation":"

The date and time, in ISO-8601 Extended format, for when you want the maintenance window to become inactive. EndDate allows you to set a date and time in the future when the maintenance window will no longer run.

" }, "Schedule":{ "shape":"MaintenanceWindowSchedule", @@ -15342,7 +15344,7 @@ }, "ScheduleOffset":{ "shape":"MaintenanceWindowOffset", - "documentation":"

The number of days to wait after the date and time specified by a CRON expression before running the maintenance window.

For example, the following cron expression schedules a maintenance window to run the third Tuesday of every month at 11:30 PM.

cron(30 23 ? * TUE#3 *)

If the schedule offset is 2, the maintenance window won't run until two days later.

", + "documentation":"

The number of days to wait after the date and time specified by a cron expression before running the maintenance window.

For example, the following cron expression schedules a maintenance window to run the third Tuesday of every month at 11:30 PM.

cron(30 23 ? * TUE#3 *)

If the schedule offset is 2, the maintenance window won't run until two days later.

", "box":true }, "Duration":{ @@ -15352,7 +15354,7 @@ }, "Cutoff":{ "shape":"MaintenanceWindowCutoff", - "documentation":"

The number of hours before the end of the maintenance window that Systems Manager stops scheduling new tasks for execution.

", + "documentation":"

The number of hours before the end of the maintenance window that Amazon Web Services Systems Manager stops scheduling new tasks for execution.

", "box":true }, "AllowUnassociatedTargets":{ @@ -15367,7 +15369,7 @@ }, "Replace":{ "shape":"Boolean", - "documentation":"

If True, then all fields that are required by the CreateMaintenanceWindow action are also required for this API request. Optional fields that are not specified are set to null.

", + "documentation":"

If True, then all fields that are required by the CreateMaintenanceWindow operation are also required for this API request. Optional fields that aren't specified are set to null.

", "box":true } } @@ -15389,11 +15391,11 @@ }, "StartDate":{ "shape":"MaintenanceWindowStringDateTime", - "documentation":"

The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become active. The maintenance window will not run before this specified time.

" + "documentation":"

The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become active. The maintenance window won't run before this specified time.

" }, "EndDate":{ "shape":"MaintenanceWindowStringDateTime", - "documentation":"

The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become inactive. The maintenance window will not run after this specified time.

" + "documentation":"

The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become inactive. The maintenance window won't run after this specified time.

" }, "Schedule":{ "shape":"MaintenanceWindowSchedule", @@ -15405,7 +15407,7 @@ }, "ScheduleOffset":{ "shape":"MaintenanceWindowOffset", - "documentation":"

The number of days to wait to run a maintenance window after the scheduled CRON expression date and time.

", + "documentation":"

The number of days to wait to run a maintenance window after the scheduled cron expression date and time.

", "box":true }, "Duration":{ @@ -15414,7 +15416,7 @@ }, "Cutoff":{ "shape":"MaintenanceWindowCutoff", - "documentation":"

The number of hours before the end of the maintenance window that Systems Manager stops scheduling new tasks for execution.

" + "documentation":"

The number of hours before the end of the maintenance window that Amazon Web Services Systems Manager stops scheduling new tasks for execution.

" }, "AllowUnassociatedTargets":{ "shape":"MaintenanceWindowAllowUnassociatedTargets", @@ -15447,7 +15449,7 @@ }, "OwnerInformation":{ "shape":"OwnerInformation", - "documentation":"

User-provided value that will be included in any CloudWatch events raised while running tasks for these targets in this maintenance window.

" + "documentation":"

User-provided value that will be included in any Amazon CloudWatch Events events raised while running tasks for these targets in this maintenance window.

" }, "Name":{ "shape":"MaintenanceWindowName", @@ -15459,7 +15461,7 @@ }, "Replace":{ "shape":"Boolean", - "documentation":"

If True, then all fields that are required by the RegisterTargetWithMaintenanceWindow action are also required for this API request. Optional fields that are not specified are set to null.

", + "documentation":"

If True, then all fields that are required by the RegisterTargetWithMaintenanceWindow operation are also required for this API request. Optional fields that aren't specified are set to null.

", "box":true } } @@ -15510,7 +15512,7 @@ }, "Targets":{ "shape":"Targets", - "documentation":"

The targets (either instances or tags) to modify. Instances are specified using Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified using Key=tag_name,Values=tag_value.

One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, AWS Lambda, and AWS Step Functions). For more information about running tasks that do not specify targets, see Registering maintenance window tasks without targets in the AWS Systems Manager User Guide.

" + "documentation":"

The targets (either instances or tags) to modify. Instances are specified using the format Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified using the format Key=tag_name,Values=tag_value.

One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, Lambda, and Step Functions). For more information about running tasks that don't specify targets, see Registering maintenance window tasks without targets in the Amazon Web Services Systems Manager User Guide.

" }, "TaskArn":{ "shape":"MaintenanceWindowTaskArn", @@ -15518,7 +15520,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The ARN of the IAM service role for Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.

For more information, see the following topics in the in the AWS Systems Manager User Guide:

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.

For more information, see the following topics in the in the Amazon Web Services Systems Manager User Guide:

" }, "TaskParameters":{ "shape":"MaintenanceWindowTaskParameters", @@ -15526,7 +15528,7 @@ }, "TaskInvocationParameters":{ "shape":"MaintenanceWindowTaskInvocationParameters", - "documentation":"

The parameters that the task should use during execution. Populate only the fields that match the task type. All other fields should be empty.

When you update a maintenance window task that has options specified in TaskInvocationParameters, you must provide again all the TaskInvocationParameters values that you want to retain. The values you do not specify again are removed. For example, suppose that when you registered a Run Command task, you specified TaskInvocationParameters values for Comment, NotificationConfig, and OutputS3BucketName. If you update the maintenance window task and specify only a different OutputS3BucketName value, the values for Comment and NotificationConfig are removed.

" + "documentation":"

The parameters that the task should use during execution. Populate only the fields that match the task type. All other fields should be empty.

When you update a maintenance window task that has options specified in TaskInvocationParameters, you must provide again all the TaskInvocationParameters values that you want to retain. The values you don't specify again are removed. For example, suppose that when you registered a Run Command task, you specified TaskInvocationParameters values for Comment, NotificationConfig, and OutputS3BucketName. If you update the maintenance window task and specify only a different OutputS3BucketName value, the values for Comment and NotificationConfig are removed.

" }, "Priority":{ "shape":"MaintenanceWindowTaskPriority", @@ -15535,15 +15537,15 @@ }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The new MaxConcurrency value you want to specify. MaxConcurrency is the number of targets that are allowed to run this task in parallel.

For maintenance window tasks without a target specified, you cannot supply a value for this option. Instead, the system inserts a placeholder value of 1, which may be reported in the response to this command. This value does not affect the running of your task and can be ignored.

" + "documentation":"

The new MaxConcurrency value you want to specify. MaxConcurrency is the number of targets that are allowed to run this task in parallel.

For maintenance window tasks without a target specified, you can't supply a value for this option. Instead, the system inserts a placeholder value of 1, which may be reported in the response to this command. This value doesn't affect the running of your task and can be ignored.

" }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The new MaxErrors value to specify. MaxErrors is the maximum number of errors that are allowed before the task stops being scheduled.

For maintenance window tasks without a target specified, you cannot supply a value for this option. Instead, the system inserts a placeholder value of 1, which may be reported in the response to this command. This value does not affect the running of your task and can be ignored.

" + "documentation":"

The new MaxErrors value to specify. MaxErrors is the maximum number of errors that are allowed before the task stops being scheduled.

For maintenance window tasks without a target specified, you can't supply a value for this option. Instead, the system inserts a placeholder value of 1, which may be reported in the response to this command. This value doesn't affect the running of your task and can be ignored.

" }, "LoggingInfo":{ "shape":"LoggingInfo", - "documentation":"

The new logging location in Amazon S3 to specify.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" + "documentation":"

The new logging location in Amazon S3 to specify.

LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" }, "Name":{ "shape":"MaintenanceWindowName", @@ -15555,7 +15557,7 @@ }, "Replace":{ "shape":"Boolean", - "documentation":"

If True, then all fields that are required by the RegisterTaskWithMaintenanceWindow action are also required for this API request. Optional fields that are not specified are set to null.

", + "documentation":"

If True, then all fields that are required by the RegisterTaskWithMaintenanceWindow operation are also required for this API request. Optional fields that aren't specified are set to null.

", "box":true } } @@ -15581,7 +15583,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The ARN of the IAM service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.

" + "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.

" }, "TaskParameters":{ "shape":"MaintenanceWindowTaskParameters", @@ -15597,15 +15599,15 @@ }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The updated MaxConcurrency value.

" + "documentation":"

The updated MaxConcurrency value.

" }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The updated MaxErrors value.

" + "documentation":"

The updated MaxErrors value.

" }, "LoggingInfo":{ "shape":"LoggingInfo", - "documentation":"

The updated logging information in Amazon S3.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" + "documentation":"

The updated logging information in Amazon S3.

LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" }, "Name":{ "shape":"MaintenanceWindowName", @@ -15649,7 +15651,7 @@ }, "OperationalData":{ "shape":"OpsItemOperationalData", - "documentation":"

Add new keys or edit existing key-value pairs of the OperationalData map in the OpsItem object.

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. Operational data that is not searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view AWS CLI example commands that use these keys, see Creating OpsItems manually in the AWS Systems Manager User Guide.

" + "documentation":"

Add new keys or edit existing key-value pairs of the OperationalData map in the OpsItem object.

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API operation) can view and search on the specified data. Operational data that isn't searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API operation).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view Amazon Web Services CLI example commands that use these keys, see Creating OpsItems manually in the Amazon Web Services Systems Manager User Guide.

" }, "OperationalDataToDelete":{ "shape":"OpsItemOpsDataKeysList", @@ -15669,7 +15671,7 @@ }, "Status":{ "shape":"OpsItemStatus", - "documentation":"

The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem details in the AWS Systems Manager User Guide.

" + "documentation":"

The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide.

" }, "OpsItemId":{ "shape":"OpsItemId", @@ -15759,7 +15761,7 @@ }, "ApprovedPatches":{ "shape":"PatchIdList", - "documentation":"

A list of explicitly approved patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the AWS Systems Manager User Guide.

" + "documentation":"

A list of explicitly approved patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" }, "ApprovedPatchesComplianceLevel":{ "shape":"PatchComplianceLevel", @@ -15767,16 +15769,16 @@ }, "ApprovedPatchesEnableNonSecurity":{ "shape":"Boolean", - "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is 'false'. Applies to Linux instances only.

", + "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is false. Applies to Linux instances only.

", "box":true }, "RejectedPatches":{ "shape":"PatchIdList", - "documentation":"

A list of explicitly rejected patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the AWS Systems Manager User Guide.

" + "documentation":"

A list of explicitly rejected patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" }, "RejectedPatchesAction":{ "shape":"PatchAction", - "documentation":"

The action for Patch Manager to take on patches included in the RejectedPackages list.

  • ALLOW_AS_DEPENDENCY: A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as InstalledOther. This is the default action if no option is specified.

  • BLOCK: Packages in the RejectedPatches list, and packages that include them as dependencies, are not installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as InstalledRejected.

" + "documentation":"

The action for Patch Manager to take on patches included in the RejectedPackages list.

  • ALLOW_AS_DEPENDENCY : A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as InstalledOther. This is the default action if no option is specified.

  • BLOCK : Packages in the RejectedPatches list, and packages that include them as dependencies, aren't installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as InstalledRejected.

" }, "Description":{ "shape":"BaselineDescription", @@ -15788,7 +15790,7 @@ }, "Replace":{ "shape":"Boolean", - "documentation":"

If True, then all fields that are required by the CreatePatchBaseline action are also required for this API request. Optional fields that are not specified are set to null.

", + "documentation":"

If True, then all fields that are required by the CreatePatchBaseline operation are also required for this API request. Optional fields that aren't specified are set to null.

", "box":true } } @@ -15826,7 +15828,7 @@ }, "ApprovedPatchesEnableNonSecurity":{ "shape":"Boolean", - "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is 'false'. Applies to Linux instances only.

", + "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is false. Applies to Linux instances only.

", "box":true }, "RejectedPatches":{ @@ -15835,7 +15837,7 @@ }, "RejectedPatchesAction":{ "shape":"PatchAction", - "documentation":"

The action specified to take on patches included in the RejectedPatches list. A patch can be allowed only if it is a dependency of another package, or blocked entirely along with packages that include it as a dependency.

" + "documentation":"

The action specified to take on patches included in the RejectedPatches list. A patch can be allowed only if it is a dependency of another package, or blocked entirely along with packages that include it as a dependency.

" }, "CreatedDate":{ "shape":"DateTime", @@ -15847,7 +15849,7 @@ }, "Description":{ "shape":"BaselineDescription", - "documentation":"

A description of the Patch Baseline.

" + "documentation":"

A description of the patch baseline.

" }, "Sources":{ "shape":"PatchSourceList", @@ -15895,16 +15897,16 @@ }, "SettingValue":{ "shape":"ServiceSettingValue", - "documentation":"

The new value to specify for the service setting. For the /ssm/parameter-store/default-parameter-tier setting ID, the setting value can be one of the following.

  • Standard

  • Advanced

  • Intelligent-Tiering

For the /ssm/parameter-store/high-throughput-enabled, and /ssm/managed-instance/activation-tier setting IDs, the setting value can be true or false.

For the /ssm/automation/customer-script-log-destination setting ID, the setting value can be CloudWatch.

For the /ssm/automation/customer-script-log-group-name setting ID, the setting value can be the name of a CloudWatch Logs log group.

For the /ssm/documents/console/public-sharing-permission setting ID, the setting value can be Enable or Disable.

" + "documentation":"

The new value to specify for the service setting. For the /ssm/parameter-store/default-parameter-tier setting ID, the setting value can be one of the following.

  • Standard

  • Advanced

  • Intelligent-Tiering

For the /ssm/parameter-store/high-throughput-enabled, and /ssm/managed-instance/activation-tier setting IDs, the setting value can be true or false.

For the /ssm/automation/customer-script-log-destination setting ID, the setting value can be CloudWatch.

For the /ssm/automation/customer-script-log-group-name setting ID, the setting value can be the name of an Amazon CloudWatch Logs log group.

For the /ssm/documents/console/public-sharing-permission setting ID, the setting value can be Enable or Disable.

" } }, - "documentation":"

The request body of the UpdateServiceSetting API action.

" + "documentation":"

The request body of the UpdateServiceSetting API operation.

" }, "UpdateServiceSettingResult":{ "type":"structure", "members":{ }, - "documentation":"

The result body of the UpdateServiceSetting API action.

" + "documentation":"

The result body of the UpdateServiceSetting API operation.

" }, "Url":{"type":"string"}, "ValidNextStep":{ @@ -15921,5 +15923,5 @@ "pattern":"^[0-9]{1,6}(\\.[0-9]{1,6}){2,3}$" } }, - "documentation":"AWS Systems Manager

AWS Systems Manager is a collection of capabilities that helps you automate management tasks such as collecting system inventory, applying operating system (OS) patches, automating the creation of Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. Systems Manager lets you remotely and securely manage the configuration of your managed instances. A managed instance is any Amazon Elastic Compute Cloud instance (EC2 instance), or any on-premises server or virtual machine (VM) in your hybrid environment that has been configured for Systems Manager.

This reference is intended to be used with the AWS Systems Manager User Guide.

To get started, verify prerequisites and configure managed instances. For more information, see Setting up AWS Systems Manager in the AWS Systems Manager User Guide.

Related resources

" + "documentation":"

Amazon Web Services Systems Manager is a collection of capabilities that helps you automate management tasks such as collecting system inventory, applying operating system (OS) patches, automating the creation of Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. Systems Manager lets you remotely and securely manage the configuration of your managed instances. A managed instance is any Amazon Elastic Compute Cloud instance (EC2 instance), or any on-premises server or virtual machine (VM) in your hybrid environment that has been configured for Systems Manager.

This reference is intended to be used with the Amazon Web Services Systems Manager User Guide.

To get started, verify prerequisites and configure managed instances. For more information, see Setting up Amazon Web Services Systems Manager in the Amazon Web Services Systems Manager User Guide.

Related resources

" } diff --git a/services/ssmcontacts/pom.xml b/services/ssmcontacts/pom.xml index c44c1d840d90..aeeec4d5f732 100644 --- a/services/ssmcontacts/pom.xml +++ b/services/ssmcontacts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ssmcontacts AWS Java SDK :: Services :: SSM Contacts diff --git a/services/ssmcontacts/src/main/resources/codegen-resources/service-2.json b/services/ssmcontacts/src/main/resources/codegen-resources/service-2.json index fa1a9fe527e2..dd8be2218f35 100644 --- a/services/ssmcontacts/src/main/resources/codegen-resources/service-2.json +++ b/services/ssmcontacts/src/main/resources/codegen-resources/service-2.json @@ -496,6 +496,13 @@ "min":6, "pattern":"^[0-9]*$" }, + "AcceptCodeValidation":{ + "type":"string", + "enum":[ + "IGNORE", + "ENFORCE" + ] + }, "AcceptPageRequest":{ "type":"structure", "required":[ @@ -523,6 +530,10 @@ "AcceptCode":{ "shape":"AcceptCode", "documentation":"

The accept code is a 6-digit code used to acknowledge the page.

" + }, + "AcceptCodeValidation":{ + "shape":"AcceptCodeValidation", + "documentation":"

An optional field that Incident Manager uses to ENFORCE AcceptCode validation when acknowledging an page. Acknowledgement can occur by replying to a page, or when entering the AcceptCode in the console. Enforcing AcceptCode validation causes Incident Manager to verify that the code entered by the user matches the code sent by Incident Manager with the page.

Incident Manager can also IGNORE AcceptCode validation. Ignoring AcceptCode validation causes Incident Manager to accept any value entered for the AcceptCode.

" } } }, @@ -1687,7 +1698,7 @@ "members":{ "DurationInMinutes":{ "shape":"StageDurationInMins", - "documentation":"

The time to wait until beginning the next stage.

" + "documentation":"

The time to wait until beginning the next stage. The duration can only be set to 0 if a target is specified.

" }, "Targets":{ "shape":"TargetsList", @@ -1814,7 +1825,7 @@ "TagKeyList":{ "type":"list", "member":{"shape":"TagKey"}, - "max":200, + "max":50, "min":0 }, "TagResourceRequest":{ @@ -2021,5 +2032,5 @@ ] } }, - "documentation":"

" + "documentation":"

AWS Systems Manager Incident Manager is an incident management console designed to help users mitigate and recover from incidents affecting their AWS-hosted applications. An incident is any unplanned interruption or reduction in quality of services.

Incident Manager increases incident resolution by notifying responders of impact, highlighting relevant troubleshooting data, and providing collaboration tools to get services back up and running. To achieve the primary goal of reducing the time-to-resolution of critical incidents, Incident Manager automates response plans and enables responder team escalation.

" } diff --git a/services/ssmincidents/pom.xml b/services/ssmincidents/pom.xml index 57a90e1cb68f..23d5cbec354c 100644 --- a/services/ssmincidents/pom.xml +++ b/services/ssmincidents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ssmincidents AWS Java SDK :: Services :: SSM Incidents diff --git a/services/ssmincidents/src/main/resources/codegen-resources/service-2.json b/services/ssmincidents/src/main/resources/codegen-resources/service-2.json index 536cc7e0754d..3b9466b154f9 100644 --- a/services/ssmincidents/src/main/resources/codegen-resources/service-2.json +++ b/services/ssmincidents/src/main/resources/codegen-resources/service-2.json @@ -784,7 +784,6 @@ "CreateTimelineEventInput":{ "type":"structure", "required":[ - "clientToken", "eventData", "eventTime", "eventType", @@ -798,7 +797,7 @@ }, "eventData":{ "shape":"EventData", - "documentation":"

A short description of the event.

" + "documentation":"

A valid JSON string. There is no other schema imposed. A short description of the event.

" }, "eventTime":{ "shape":"Timestamp", @@ -1241,7 +1240,7 @@ }, "notificationTargets":{ "shape":"NotificationTargetSet", - "documentation":"

The SNS targets that AWS Chatbot uses to notify the chat channels and perform actions on the incident record.

" + "documentation":"

The SNS targets that are notified when updates are made to an incident.

" }, "resolvedTime":{ "shape":"Timestamp", @@ -1370,7 +1369,7 @@ }, "notificationTargets":{ "shape":"NotificationTargetSet", - "documentation":"

The SNS targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel using the SNS topics.

" + "documentation":"

The SNS targets that are notified when updates are made to an incident.

" }, "summary":{ "shape":"IncidentSummary", @@ -1664,7 +1663,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the SNS topic.

" } }, - "documentation":"

The SNS topic that's used by AWS Chatbot to notify the incidents chat channel.

", + "documentation":"

The SNS targets that are notified when updates are made to an incident.

", "union":true }, "NotificationTargetSet":{ @@ -1830,6 +1829,10 @@ "status" ], "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the replication set.

" + }, "createdBy":{ "shape":"Arn", "documentation":"

Details about who created the replication set.

" @@ -2391,7 +2394,7 @@ }, "notificationTargets":{ "shape":"NotificationTargetSet", - "documentation":"

The SNS targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel using the SNS topics.

Using multiple SNS topics creates redundancy in the case that a Region is down during the incident.

" + "documentation":"

The SNS targets that are notified when updates are made to an incident.

Using multiple SNS topics creates redundancy in the case that a Region is down during the incident.

" }, "status":{ "shape":"IncidentRecordStatus", @@ -2501,7 +2504,7 @@ }, "chatChannel":{ "shape":"ChatChannel", - "documentation":"

The AWS Chatbot chat channel used for collaboration during an incident.

" + "documentation":"

The AWS Chatbot chat channel used for collaboration during an incident.

Use the empty structure to remove the chat channel from the response plan.

" }, "clientToken":{ "shape":"ClientToken", @@ -2526,7 +2529,7 @@ }, "incidentTemplateNotificationTargets":{ "shape":"NotificationTargetSet", - "documentation":"

The SNS targets that AWS Chatbot uses to notify the chat channels and perform actions on the incident record.

" + "documentation":"

The SNS targets that are notified when updates are made to an incident.

" }, "incidentTemplateSummary":{ "shape":"IncidentSummary", @@ -2546,7 +2549,6 @@ "UpdateTimelineEventInput":{ "type":"structure", "required":[ - "clientToken", "eventId", "incidentRecordArn" ], diff --git a/services/sso/pom.xml b/services/sso/pom.xml index 037f1cb47663..d7d589a149c0 100644 --- a/services/sso/pom.xml +++ b/services/sso/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT sso AWS Java SDK :: Services :: SSO @@ -56,6 +56,11 @@ aws-json-protocol ${awsjavasdk.version} + + software.amazon.awssdk + json-utils + ${awsjavasdk.version} + software.amazon.awssdk profiles diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProvider.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProvider.java index 242208af9356..d94065db2522 100644 --- a/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProvider.java +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProvider.java @@ -17,7 +17,6 @@ import static java.time.temporal.ChronoUnit.MINUTES; -import com.fasterxml.jackson.databind.JsonNode; import java.io.IOException; import java.io.InputStream; import java.io.UncheckedIOException; @@ -25,10 +24,12 @@ import java.nio.file.Path; import java.time.Instant; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.util.json.JacksonUtils; +import software.amazon.awssdk.protocols.jsoncore.JsonNode; +import software.amazon.awssdk.protocols.jsoncore.JsonNodeParser; import software.amazon.awssdk.services.sso.auth.ExpiredTokenException; import software.amazon.awssdk.services.sso.auth.SsoCredentialsProvider; import software.amazon.awssdk.utils.IoUtils; +import software.amazon.awssdk.utils.Validate; /** * Resolve the access token from the cached token file. If the token has expired then throw out an exception to ask the users to @@ -37,8 +38,9 @@ */ @SdkInternalApi public final class SsoAccessTokenProvider { + private static final JsonNodeParser PARSER = JsonNodeParser.builder().removeErrorLocations(true).build(); - private Path cachedTokenFilePath; + private final Path cachedTokenFilePath; public SsoAccessTokenProvider(Path cachedTokenFilePath) { this.cachedTokenFilePath = cachedTokenFilePath; @@ -53,18 +55,22 @@ public String resolveAccessToken() { } private String getTokenFromJson(String json) { - JsonNode jsonNode = JacksonUtils.sensitiveJsonNodeOf(json); + JsonNode jsonNode = PARSER.parse(json); + String expiration = jsonNode.field("expiresAt").map(JsonNode::text).orElse(null); - if (validateToken(jsonNode.get("expiresAt").asText())) { + Validate.notNull(expiration, + "The SSO session's expiration time could not be determined. Please refresh your SSO session."); + + if (tokenIsInvalid(expiration)) { throw ExpiredTokenException.builder().message("The SSO session associated with this profile has expired or is" + " otherwise invalid. To refresh this SSO session run aws sso" + " login with the corresponding profile.").build(); } - return jsonNode.get("accessToken").asText(); + return jsonNode.asObject().get("accessToken").text(); } - private boolean validateToken(String expirationTime) { + private boolean tokenIsInvalid(String expirationTime) { return Instant.now().isAfter(Instant.parse(expirationTime).minus(15, MINUTES)); } diff --git a/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileTest.java b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileTest.java index ee08c541eaef..d6fd7565c8f8 100644 --- a/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileTest.java +++ b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileTest.java @@ -39,7 +39,7 @@ public void createSsoCredentialsProvider_SsoAccountIdMissing_throwException() { .type(ProfileFile.Type.CONFIGURATION) .build(); assertThat(profiles.profile("foo")).hasValueSatisfying(profile -> { - assertThatThrownBy(() -> new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()) + assertThatThrownBy(() -> new ProfileCredentialsUtils(profiles, profile, profiles::profile).credentialsProvider()) .hasMessageContaining("Profile property 'sso_account_id' was not configured"); }); } @@ -55,7 +55,7 @@ public void createSsoCredentialsProvider_SsoRegionMissing_throwException() { .type(ProfileFile.Type.CONFIGURATION) .build(); assertThat(profiles.profile("foo")).hasValueSatisfying(profile -> { - assertThatThrownBy(() -> new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()) + assertThatThrownBy(() -> new ProfileCredentialsUtils(profiles, profile, profiles::profile).credentialsProvider()) .hasMessageContaining("Profile property 'sso_region' was not configured"); }); } @@ -71,7 +71,7 @@ public void createSsoCredentialsProvider_SsoRoleNameMissing_throwException() { .type(ProfileFile.Type.CONFIGURATION) .build(); assertThat(profiles.profile("foo")).hasValueSatisfying(profile -> { - assertThatThrownBy(() -> new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()) + assertThatThrownBy(() -> new ProfileCredentialsUtils(profiles, profile, profiles::profile).credentialsProvider()) .hasMessageContaining("Profile property 'sso_role_name' was not configured"); }); } @@ -87,7 +87,7 @@ public void createSsoCredentialsProvider_SsoStartUrlMissing_throwException() { .type(ProfileFile.Type.CONFIGURATION) .build(); assertThat(profiles.profile("foo")).hasValueSatisfying(profile -> { - assertThatThrownBy(() -> new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()) + assertThatThrownBy(() -> new ProfileCredentialsUtils(profiles, profile, profiles::profile).credentialsProvider()) .hasMessageContaining("Profile property 'sso_start_url' was not configured"); }); } diff --git a/services/ssoadmin/pom.xml b/services/ssoadmin/pom.xml index 434385e25a5b..875e3e69fd07 100644 --- a/services/ssoadmin/pom.xml +++ b/services/ssoadmin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ssoadmin AWS Java SDK :: Services :: SSO Admin diff --git a/services/ssoadmin/src/main/resources/codegen-resources/service-2.json b/services/ssoadmin/src/main/resources/codegen-resources/service-2.json index 91dddf65ed2c..20b14cf3e896 100644 --- a/services/ssoadmin/src/main/resources/codegen-resources/service-2.json +++ b/services/ssoadmin/src/main/resources/codegen-resources/service-2.json @@ -31,7 +31,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ConflictException"} ], - "documentation":"

Attaches an IAM managed policy ARN to a permission set.

If the permission set is already referenced by one or more account assignments, you will need to call ProvisionPermissionSet after this action to apply the corresponding IAM policy updates to all assigned accounts.

" + "documentation":"

Attaches an IAM managed policy ARN to a permission set.

If the permission set is already referenced by one or more account assignments, you will need to call ProvisionPermissionSet after this operation. Calling ProvisionPermissionSet applies the corresponding IAM policy updates to all assigned accounts.

" }, "CreateAccountAssignment":{ "name":"CreateAccountAssignment", @@ -50,7 +50,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ConflictException"} ], - "documentation":"

Assigns access to a principal for a specified AWS account using a specified permission set.

The term principal here refers to a user or group that is defined in AWS SSO.

As part of a successful CreateAccountAssignment call, the specified permission set will automatically be provisioned to the account in the form of an IAM policy attached to the SSO-created IAM role. If the permission set is subsequently updated, the corresponding IAM policies attached to roles in your accounts will not be updated automatically. In this case, you will need to call ProvisionPermissionSet to make these updates.

" + "documentation":"

Assigns access to a principal for a specified Amazon Web Services account using a specified permission set.

The term principal here refers to a user or group that is defined in Amazon Web Services SSO.

As part of a successful CreateAccountAssignment call, the specified permission set will automatically be provisioned to the account in the form of an IAM policy. That policy is attached to the SSO-created IAM role. If the permission set is subsequently updated, the corresponding IAM policies attached to roles in your accounts will not be updated automatically. In this case, you must call ProvisionPermissionSet to make these updates.

" }, "CreateInstanceAccessControlAttributeConfiguration":{ "name":"CreateInstanceAccessControlAttributeConfiguration", @@ -68,7 +68,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], - "documentation":"

Enables the attributes-based access control (ABAC) feature for the specified AWS SSO instance. You can also specify new attributes to add to your ABAC configuration during the enabling process. For more information about ABAC, see Attribute-Based Access Control in the AWS SSO User Guide.

" + "documentation":"

Enables the attributes-based access control (ABAC) feature for the specified Amazon Web Services SSO instance. You can also specify new attributes to add to your ABAC configuration during the enabling process. For more information about ABAC, see Attribute-Based Access Control in the Amazon Web Services SSO User Guide.

" }, "CreatePermissionSet":{ "name":"CreatePermissionSet", @@ -87,7 +87,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ConflictException"} ], - "documentation":"

Creates a permission set within a specified SSO instance.

To grant users and groups access to AWS account resources, use CreateAccountAssignment .

" + "documentation":"

Creates a permission set within a specified SSO instance.

To grant users and groups access to Amazon Web Services account resources, use CreateAccountAssignment .

" }, "DeleteAccountAssignment":{ "name":"DeleteAccountAssignment", @@ -105,7 +105,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ConflictException"} ], - "documentation":"

Deletes a principal's access from a specified AWS account using a specified permission set.

" + "documentation":"

Deletes a principal's access from a specified Amazon Web Services account using a specified permission set.

" }, "DeleteInlinePolicyFromPermissionSet":{ "name":"DeleteInlinePolicyFromPermissionSet", @@ -141,7 +141,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], - "documentation":"

Disables the attributes-based access control (ABAC) feature for the specified AWS SSO instance and deletes all of the attribute mappings that have been configured. Once deleted, any attributes that are received from an identity source and any custom attributes you have previously configured will not be passed. For more information about ABAC, see Attribute-Based Access Control in the AWS SSO User Guide.

" + "documentation":"

Disables the attributes-based access control (ABAC) feature for the specified Amazon Web Services SSO instance and deletes all of the attribute mappings that have been configured. Once deleted, any attributes that are received from an identity source and any custom attributes you have previously configured will not be passed. For more information about ABAC, see Attribute-Based Access Control in the Amazon Web Services SSO User Guide.

" }, "DeletePermissionSet":{ "name":"DeletePermissionSet", @@ -210,7 +210,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the list of AWS SSO identity store attributes that have been configured to work with attributes-based access control (ABAC) for the specified AWS SSO instance. This will not return attributes configured and sent by an external identity provider. For more information about ABAC, see Attribute-Based Access Control in the AWS SSO User Guide.

" + "documentation":"

Returns the list of Amazon Web Services SSO identity store attributes that have been configured to work with attributes-based access control (ABAC) for the specified Amazon Web Services SSO instance. This will not return attributes configured and sent by an external identity provider. For more information about ABAC, see Attribute-Based Access Control in the Amazon Web Services SSO User Guide.

" }, "DescribePermissionSet":{ "name":"DescribePermissionSet", @@ -296,7 +296,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists the status of the AWS account assignment creation requests for a specified SSO instance.

" + "documentation":"

Lists the status of the Amazon Web Services account assignment creation requests for a specified SSO instance.

" }, "ListAccountAssignmentDeletionStatus":{ "name":"ListAccountAssignmentDeletionStatus", @@ -313,7 +313,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists the status of the AWS account assignment deletion requests for a specified SSO instance.

" + "documentation":"

Lists the status of the Amazon Web Services account assignment deletion requests for a specified SSO instance.

" }, "ListAccountAssignments":{ "name":"ListAccountAssignments", @@ -330,7 +330,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists the assignee of the specified AWS account with the specified permission set.

" + "documentation":"

Lists the assignee of the specified Amazon Web Services account with the specified permission set.

" }, "ListAccountsForProvisionedPermissionSet":{ "name":"ListAccountsForProvisionedPermissionSet", @@ -347,7 +347,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists all the AWS accounts where the specified permission set is provisioned.

" + "documentation":"

Lists all the Amazon Web Services accounts where the specified permission set is provisioned.

" }, "ListInstances":{ "name":"ListInstances", @@ -431,7 +431,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists all the permission sets that are provisioned to a specified AWS account.

" + "documentation":"

Lists all the permission sets that are provisioned to a specified Amazon Web Services account.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -540,7 +540,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], - "documentation":"

Updates the AWS SSO identity store attributes to use with the AWS SSO instance for attributes-based access control (ABAC). When using an external identity provider as an identity source, you can pass attributes through the SAML assertion as an alternative to configuring attributes from the AWS SSO identity store. If a SAML assertion passes any of these attributes, AWS SSO will replace the attribute value with the value from the AWS SSO identity store. For more information about ABAC, see Attribute-Based Access Control in the AWS SSO User Guide.

" + "documentation":"

Updates the Amazon Web Services SSO identity store attributes that you can use with the Amazon Web Services SSO instance for attributes-based access control (ABAC). When using an external identity provider as an identity source, you can pass attributes through the SAML assertion as an alternative to configuring attributes from the Amazon Web Services SSO identity store. If a SAML assertion passes any of these attributes, Amazon Web Services SSO replaces the attribute value with the value from the Amazon Web Services SSO identity store. For more information about ABAC, see Attribute-Based Access Control in the Amazon Web Services SSO User Guide.

" }, "UpdatePermissionSet":{ "name":"UpdatePermissionSet", @@ -571,14 +571,14 @@ "members":{ "Key":{ "shape":"AccessControlAttributeKey", - "documentation":"

The name of the attribute associated with your identities in your identity source. This is used to map a specified attribute in your identity source with an attribute in AWS SSO.

" + "documentation":"

The name of the attribute associated with your identities in your identity source. This is used to map a specified attribute in your identity source with an attribute in Amazon Web Services SSO.

" }, "Value":{ "shape":"AccessControlAttributeValue", "documentation":"

The value used for mapping a specified attribute to an identity source.

" } }, - "documentation":"

These are AWS SSO identity store attributes that you can configure for use in attributes-based access control (ABAC). You can create permission policies that determine who can access your AWS resources based upon the configured attribute value(s). When you enable ABAC and specify AccessControlAttributes, AWS SSO passes the attribute(s) value of the authenticated user into IAM for use in policy evaluation.

" + "documentation":"

These are Amazon Web Services SSO identity store attributes that you can configure for use in attributes-based access control (ABAC). You can create permissions policies that determine who can access your Amazon Web Services resources based upon the configured attribute values. When you enable ABAC and specify AccessControlAttributes, Amazon Web Services SSO passes the attribute values of the authenticated user into IAM for use in policy evaluation.

" }, "AccessControlAttributeKey":{ "type":"string", @@ -598,7 +598,7 @@ "members":{ "Source":{ "shape":"AccessControlAttributeValueSourceList", - "documentation":"

The identity source to use when mapping a specified attribute to AWS SSO.

" + "documentation":"

The identity source to use when mapping a specified attribute to Amazon Web Services SSO.

" } }, "documentation":"

The value used for mapping a specified attribute to an identity source.

" @@ -629,11 +629,11 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The identifier of the AWS account.

" + "documentation":"

The identifier of the Amazon Web Services account.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", - "documentation":"

The ARN of the permission set. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the permission set. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PrincipalType":{ "shape":"PrincipalType", @@ -641,10 +641,10 @@ }, "PrincipalId":{ "shape":"PrincipalId", - "documentation":"

An identifier for an object in AWS SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in AWS SSO, see the AWS SSO Identity Store API Reference.

" + "documentation":"

An identifier for an object in Amazon Web Services SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in Amazon Web Services SSO, see the Amazon Web Services SSO Identity Store API Reference.

" } }, - "documentation":"

The assignment that indicates a principal's limited access to a specified AWS account with a specified permission set.

The term principal here refers to a user or group that is defined in AWS SSO.

" + "documentation":"

The assignment that indicates a principal's limited access to a specified Amazon Web Services account with a specified permission set.

The term principal here refers to a user or group that is defined in Amazon Web Services SSO.

" }, "AccountAssignmentList":{ "type":"list", @@ -667,7 +667,7 @@ }, "TargetId":{ "shape":"TargetId", - "documentation":"

TargetID is an AWS account identifier, typically a 10-12 digit string (For example, 123456789012).

" + "documentation":"

TargetID is an Amazon Web Services account identifier, typically a 10-12 digit string (For example, 123456789012).

" }, "TargetType":{ "shape":"TargetType", @@ -675,7 +675,7 @@ }, "PermissionSetArn":{ "shape":"PermissionSetArn", - "documentation":"

The ARN of the permission set. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the permission set. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PrincipalType":{ "shape":"PrincipalType", @@ -683,7 +683,7 @@ }, "PrincipalId":{ "shape":"PrincipalId", - "documentation":"

An identifier for an object in AWS SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in AWS SSO, see the AWS SSO Identity Store API Reference.

" + "documentation":"

An identifier for an object in Amazon Web Services SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in Amazon Web Services SSO, see the Amazon Web Services SSO Identity Store API Reference.

" }, "CreatedDate":{ "shape":"Date", @@ -732,7 +732,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", @@ -758,7 +758,7 @@ }, "Arn":{ "shape":"ManagedPolicyArn", - "documentation":"

The ARN of the IAM managed policy. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the IAM managed policy. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" } }, "documentation":"

A structure that stores the details of the IAM managed policy.

" @@ -789,11 +789,11 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "TargetId":{ "shape":"TargetId", - "documentation":"

TargetID is an AWS account identifier, typically a 10-12 digit string (For example, 123456789012).

" + "documentation":"

TargetID is an Amazon Web Services account identifier, typically a 10-12 digit string (For example, 123456789012).

" }, "TargetType":{ "shape":"TargetType", @@ -809,7 +809,7 @@ }, "PrincipalId":{ "shape":"PrincipalId", - "documentation":"

An identifier for an object in AWS SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in AWS SSO, see the AWS SSO Identity Store API Reference.

" + "documentation":"

An identifier for an object in Amazon Web Services SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in Amazon Web Services SSO, see the Amazon Web Services SSO Identity Store API Reference.

" } } }, @@ -835,7 +835,7 @@ }, "InstanceAccessControlAttributeConfiguration":{ "shape":"InstanceAccessControlAttributeConfiguration", - "documentation":"

Specifies the AWS SSO identity store attributes to add to your ABAC configuration. When using an external identity provider as an identity source, you can pass attributes through the SAML assertion as an alternative to configuring attributes from the AWS SSO identity store. If a SAML assertion passes any of these attributes, AWS SSO will replace the attribute value with the value from the AWS SSO identity store.

" + "documentation":"

Specifies the Amazon Web Services SSO identity store attributes to add to your ABAC configuration. When using an external identity provider as an identity source, you can pass attributes through the SAML assertion. Doing so provides an alternative to configuring attributes from the Amazon Web Services SSO identity store. If a SAML assertion passes any of these attributes, Amazon Web Services SSO will replace the attribute value with the value from the Amazon Web Services SSO identity store.

" } } }, @@ -861,7 +861,7 @@ }, "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "SessionDuration":{ "shape":"Duration", @@ -882,7 +882,7 @@ "members":{ "PermissionSet":{ "shape":"PermissionSet", - "documentation":"

Defines the level of access on an AWS account.

" + "documentation":"

Defines the level of access on an Amazon Web Services account.

" } } }, @@ -900,11 +900,11 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "TargetId":{ "shape":"TargetId", - "documentation":"

TargetID is an AWS account identifier, typically a 10-12 digit string (For example, 123456789012).

" + "documentation":"

TargetID is an Amazon Web Services account identifier, typically a 10-12 digit string (For example, 123456789012).

" }, "TargetType":{ "shape":"TargetType", @@ -920,7 +920,7 @@ }, "PrincipalId":{ "shape":"PrincipalId", - "documentation":"

An identifier for an object in AWS SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in AWS SSO, see the AWS SSO Identity Store API Reference.

" + "documentation":"

An identifier for an object in Amazon Web Services SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in Amazon Web Services SSO, see the Amazon Web Services SSO Identity Store API Reference.

" } } }, @@ -942,7 +942,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", @@ -979,7 +979,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", @@ -1001,7 +1001,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "AccountAssignmentCreationRequestId":{ "shape":"UUId", @@ -1027,7 +1027,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "AccountAssignmentDeletionRequestId":{ "shape":"UUId", @@ -1067,7 +1067,7 @@ }, "InstanceAccessControlAttributeConfiguration":{ "shape":"InstanceAccessControlAttributeConfiguration", - "documentation":"

Gets the list of AWS SSO identity store attributes added to your ABAC configuration.

" + "documentation":"

Gets the list of Amazon Web Services SSO identity store attributes that have been added to your ABAC configuration.

" } } }, @@ -1080,7 +1080,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "ProvisionPermissionSetRequestId":{ "shape":"UUId", @@ -1106,7 +1106,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", @@ -1119,7 +1119,7 @@ "members":{ "PermissionSet":{ "shape":"PermissionSet", - "documentation":"

Describes the level of access on an AWS account.

" + "documentation":"

Describes the level of access on an Amazon Web Services account.

" } } }, @@ -1133,7 +1133,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", @@ -1171,7 +1171,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", @@ -1200,7 +1200,7 @@ "members":{ "AccessControlAttributes":{ "shape":"AccessControlAttributeList", - "documentation":"

Lists the attributes that are configured for ABAC in the specified AWS SSO instance.

" + "documentation":"

Lists the attributes that are configured for ABAC in the specified Amazon Web Services SSO instance.

" } }, "documentation":"

Specifies the attributes to add to your attribute-based access control (ABAC) configuration.

" @@ -1229,7 +1229,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "IdentityStoreId":{ "shape":"Id", @@ -1253,7 +1253,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "MaxResults":{ "shape":"MaxResults", @@ -1288,7 +1288,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "MaxResults":{ "shape":"MaxResults", @@ -1327,11 +1327,11 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "AccountId":{ "shape":"TargetId", - "documentation":"

The identifier of the AWS account from which to list the assignments.

" + "documentation":"

The identifier of the Amazon Web Services account from which to list the assignments.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", @@ -1352,7 +1352,7 @@ "members":{ "AccountAssignments":{ "shape":"AccountAssignmentList", - "documentation":"

The list of assignments that match the input AWS account and permission set.

" + "documentation":"

The list of assignments that match the input Amazon Web Services account and permission set.

" }, "NextToken":{ "shape":"Token", @@ -1369,15 +1369,15 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", - "documentation":"

The ARN of the PermissionSet from which the associated AWS accounts will be listed.

" + "documentation":"

The ARN of the PermissionSet from which the associated Amazon Web Services accounts will be listed.

" }, "ProvisioningStatus":{ "shape":"ProvisioningStatus", - "documentation":"

The permission set provisioning status for an AWS account.

" + "documentation":"

The permission set provisioning status for an Amazon Web Services account.

" }, "MaxResults":{ "shape":"MaxResults", @@ -1394,7 +1394,7 @@ "members":{ "AccountIds":{ "shape":"AccountList", - "documentation":"

The list of AWS AccountIds.

" + "documentation":"

The list of Amazon Web Services AccountIds.

" }, "NextToken":{ "shape":"Token", @@ -1437,7 +1437,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", @@ -1472,7 +1472,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "MaxResults":{ "shape":"MaxResults", @@ -1510,11 +1510,11 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "AccountId":{ "shape":"AccountId", - "documentation":"

The identifier of the AWS account from which to list the assignments.

" + "documentation":"

The identifier of the Amazon Web Services account from which to list the assignments.

" }, "ProvisioningStatus":{ "shape":"ProvisioningStatus", @@ -1539,7 +1539,7 @@ }, "PermissionSets":{ "shape":"PermissionSetList", - "documentation":"

Defines the level of access that an AWS account has.

" + "documentation":"

Defines the level of access that an Amazon Web Services account has.

" } } }, @@ -1549,7 +1549,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "NextToken":{ "shape":"Token", @@ -1566,7 +1566,7 @@ "members":{ "PermissionSets":{ "shape":"PermissionSetList", - "documentation":"

Defines the level of access on an AWS account.

" + "documentation":"

Defines the level of access on an Amazon Web Services account.

" }, "NextToken":{ "shape":"Token", @@ -1583,7 +1583,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceArn":{ "shape":"GeneralArn", @@ -1642,7 +1642,7 @@ }, "PermissionSetArn":{ "shape":"PermissionSetArn", - "documentation":"

The ARN of the permission set. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the permission set. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "Description":{ "shape":"PermissionSetDescription", @@ -1705,11 +1705,11 @@ }, "AccountId":{ "shape":"AccountId", - "documentation":"

The identifier of the AWS account from which to list the assignments.

" + "documentation":"

The identifier of the Amazon Web Services account from which to list the assignments.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", - "documentation":"

The ARN of the permission set that is being provisioned. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the permission set that is being provisioned. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "FailureReason":{ "shape":"Reason", @@ -1767,7 +1767,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", @@ -1775,7 +1775,7 @@ }, "TargetId":{ "shape":"TargetId", - "documentation":"

TargetID is an AWS account identifier, typically a 10-12 digit string (For example, 123456789012).

" + "documentation":"

TargetID is an Amazon Web Services account identifier, typically a 10-12 digit string (For example, 123456789012).

" }, "TargetType":{ "shape":"ProvisionTargetType", @@ -1816,7 +1816,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", @@ -1881,7 +1881,7 @@ "documentation":"

The value of the tag.

" } }, - "documentation":"

A set of key-value pairs that are used to manage the resource. Tags can only be applied to permission sets and cannot be applied to corresponding roles that AWS SSO creates in AWS accounts.

" + "documentation":"

A set of key-value pairs that are used to manage the resource. Tags can only be applied to permission sets and cannot be applied to corresponding roles that Amazon Web Services SSO creates in Amazon Web Services accounts.

" }, "TagKey":{ "type":"string", @@ -1911,7 +1911,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceArn":{ "shape":"GeneralArn", @@ -1970,7 +1970,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceArn":{ "shape":"GeneralArn", @@ -2018,7 +2018,7 @@ "members":{ "InstanceArn":{ "shape":"InstanceArn", - "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

" }, "PermissionSetArn":{ "shape":"PermissionSetArn", @@ -2052,5 +2052,6 @@ "exception":true }, "ValidationExceptionMessage":{"type":"string"} - } + }, + "documentation":"

Amazon Web Services Single Sign On (SSO) is a cloud SSO service that makes it easy to centrally manage SSO access to multiple Amazon Web Services accounts and business applications. This guide provides information on SSO operations which could be used for access management of Amazon Web Services accounts. For information about Amazon Web Services SSO features, see the Amazon Web Services Single Sign-On User Guide.

Many operations in the SSO APIs rely on identifiers for users and groups, known as principals. For more information about how to work with principals and principal IDs in Amazon Web Services SSO, see the Amazon Web Services SSO Identity Store API Reference.

" } diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml index 4e1240ea4fb6..e45fb25c90fd 100644 --- a/services/ssooidc/pom.xml +++ b/services/ssooidc/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ssooidc AWS Java SDK :: Services :: SSO OIDC diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index 452cee8d0e02..e0a50042a174 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT storagegateway AWS Java SDK :: Services :: AWS Storage Gateway diff --git a/services/sts/pom.xml b/services/sts/pom.xml index 3285613e24f6..1f35728ec18a 100644 --- a/services/sts/pom.xml +++ b/services/sts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT sts AWS Java SDK :: Services :: AWS STS diff --git a/services/sts/src/it/java/software/amazon/awssdk/services/sts/AssumeRoleIntegrationTest.java b/services/sts/src/it/java/software/amazon/awssdk/services/sts/AssumeRoleIntegrationTest.java index c47772c58b2f..c845bb770cf8 100644 --- a/services/sts/src/it/java/software/amazon/awssdk/services/sts/AssumeRoleIntegrationTest.java +++ b/services/sts/src/it/java/software/amazon/awssdk/services/sts/AssumeRoleIntegrationTest.java @@ -168,7 +168,7 @@ public void profileCredentialsProviderCanAssumeRoles() throws InterruptedExcepti .build(); Optional profile = profiles.profile("test"); AwsCredentialsProvider awsCredentialsProvider = - new ProfileCredentialsUtils(profile.get(), profiles::profile).credentialsProvider().get(); + new ProfileCredentialsUtils(profiles, profile.get(), profiles::profile).credentialsProvider().get(); // Try to assume the role until the eventual consistency catches up. @@ -200,7 +200,7 @@ public void profileCredentialProviderCanAssumeRolesWithEnvironmentCredentialSour .build(); Optional profile = profiles.profile("test"); AwsCredentialsProvider awsCredentialsProvider = - new ProfileCredentialsUtils(profile.get(), profiles::profile).credentialsProvider().get(); + new ProfileCredentialsUtils(profiles, profile.get(), profiles::profile).credentialsProvider().get(); // Try to assume the role until the eventual consistency catches up. @@ -237,7 +237,7 @@ public void profileCredentialProviderWithEnvironmentCredentialSourceAndSystemPro .build(); Optional profile = profiles.profile("test"); AwsCredentialsProvider awsCredentialsProvider = - new ProfileCredentialsUtils(profile.get(), profiles::profile).credentialsProvider().get(); + new ProfileCredentialsUtils(profiles, profile.get(), profiles::profile).credentialsProvider().get(); // Try to assume the role until the eventual consistency catches up. diff --git a/services/sts/src/test/java/software/amazon/awssdk/services/sts/internal/AssumeRoleProfileTest.java b/services/sts/src/test/java/software/amazon/awssdk/services/sts/internal/AssumeRoleProfileTest.java index fd2fa6ebb776..c78978163be8 100644 --- a/services/sts/src/test/java/software/amazon/awssdk/services/sts/internal/AssumeRoleProfileTest.java +++ b/services/sts/src/test/java/software/amazon/awssdk/services/sts/internal/AssumeRoleProfileTest.java @@ -45,7 +45,7 @@ public void createAssumeRoleCredentialsProviderViaProfileSucceeds() { .type(ProfileFile.Type.CONFIGURATION) .build(); assertThat(profiles.profile("test")).hasValueSatisfying(profile -> { - assertThat(new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()).hasValueSatisfying(credentialsProvider -> { + assertThat(new ProfileCredentialsUtils(profiles, profile, profiles::profile).credentialsProvider()).hasValueSatisfying(credentialsProvider -> { assertThat(credentialsProvider).isInstanceOf(SdkAutoCloseable.class); ((SdkAutoCloseable) credentialsProvider).close(); }); diff --git a/services/sts/src/test/java/software/amazon/awssdk/services/sts/internal/WebIdentityTokenCredentialProviderTest.java b/services/sts/src/test/java/software/amazon/awssdk/services/sts/internal/WebIdentityTokenCredentialProviderTest.java index 564d1dafae5e..b3b0a1541674 100644 --- a/services/sts/src/test/java/software/amazon/awssdk/services/sts/internal/WebIdentityTokenCredentialProviderTest.java +++ b/services/sts/src/test/java/software/amazon/awssdk/services/sts/internal/WebIdentityTokenCredentialProviderTest.java @@ -37,7 +37,7 @@ public void createAssumeRoleWithWebIdentityTokenCredentialsProviderViaProfileSuc .type(ProfileFile.Type.CONFIGURATION) .build(); assertThat(profiles.profile("test")).hasValueSatisfying(profile -> { - assertThat(new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()).hasValueSatisfying(credentialsProvider -> { + assertThat(new ProfileCredentialsUtils(profiles, profile, profiles::profile).credentialsProvider()).hasValueSatisfying(credentialsProvider -> { assertThat(credentialsProvider).isInstanceOf(SdkAutoCloseable.class); assertThat(credentialsProvider).hasFieldOrProperty("stsClient"); ((SdkAutoCloseable) credentialsProvider).close(); diff --git a/services/support/pom.xml b/services/support/pom.xml index 100dc5ee9cc8..133f99a3614a 100644 --- a/services/support/pom.xml +++ b/services/support/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT support AWS Java SDK :: Services :: AWS Support diff --git a/services/swf/pom.xml b/services/swf/pom.xml index 1afa10bce313..520c5a17c947 100644 --- a/services/swf/pom.xml +++ b/services/swf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT swf AWS Java SDK :: Services :: Amazon SWF diff --git a/services/synthetics/pom.xml b/services/synthetics/pom.xml index 975a59c5cf67..21516dce8b25 100644 --- a/services/synthetics/pom.xml +++ b/services/synthetics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT synthetics AWS Java SDK :: Services :: Synthetics diff --git a/services/synthetics/src/main/resources/codegen-resources/service-2.json b/services/synthetics/src/main/resources/codegen-resources/service-2.json index 255bc9bba01d..71603c73f6ba 100644 --- a/services/synthetics/src/main/resources/codegen-resources/service-2.json +++ b/services/synthetics/src/main/resources/codegen-resources/service-2.json @@ -174,7 +174,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Assigns one or more tags (key-value pairs) to the specified canary.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with a canary that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a canary.

" + "documentation":"

Assigns one or more tags (key-value pairs) to the specified canary.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values.

Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

You can use the TagResource action with a canary that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a canary.

" }, "UntagResource":{ "name":"UntagResource", @@ -209,6 +209,35 @@ } }, "shapes":{ + "BaseScreenshot":{ + "type":"structure", + "required":["ScreenshotName"], + "members":{ + "ScreenshotName":{ + "shape":"String", + "documentation":"

The name of the screenshot. This is generated the first time the canary is run after the UpdateCanary operation that specified for this canary to perform visual monitoring.

" + }, + "IgnoreCoordinates":{ + "shape":"BaseScreenshotIgnoreCoordinates", + "documentation":"

Coordinates that define the part of a screen to ignore during screenshot comparisons. To obtain the coordinates to use here, use the CloudWatch Logs console to draw the boundaries on the screen. For more information, see {LINK}

" + } + }, + "documentation":"

A structure representing a screenshot that is used as a baseline during visual monitoring comparisons made by the canary.

" + }, + "BaseScreenshotConfigIgnoreCoordinate":{ + "type":"string", + "pattern":"^(-?\\d{1,5}\\.?\\d{0,2},){3}(-?\\d{1,5}\\.?\\d{0,2}){1}$" + }, + "BaseScreenshotIgnoreCoordinates":{ + "type":"list", + "member":{"shape":"BaseScreenshotConfigIgnoreCoordinate"}, + "max":20, + "min":0 + }, + "BaseScreenshots":{ + "type":"list", + "member":{"shape":"BaseScreenshot"} + }, "Blob":{ "type":"blob", "max":10000000, @@ -272,6 +301,10 @@ "documentation":"

Specifies the runtime version to use for the canary. For more information about runtime versions, see Canary Runtime Versions.

" }, "VpcConfig":{"shape":"VpcConfigOutput"}, + "VisualReference":{ + "shape":"VisualReferenceOutput", + "documentation":"

If this canary performs visual monitoring by comparing screenshots, this structure contains the ID of the canary run to use as the baseline for screenshots, and the coordinates of any parts of the screen to ignore during the visual monitoring comparison.

" + }, "Tags":{ "shape":"TagMap", "documentation":"

The list of key-value pairs that are associated with the canary.

" @@ -291,7 +324,7 @@ "members":{ "S3Bucket":{ "shape":"String", - "documentation":"

If your canary script is located in S3, specify the full bucket name here. The bucket must already exist. Specify the full bucket name, including s3:// as the start of the bucket name.

" + "documentation":"

If your canary script is located in S3, specify the bucket name here. Do not include s3:// as the start of the bucket name.

" }, "S3Key":{ "shape":"String", @@ -303,11 +336,11 @@ }, "ZipFile":{ "shape":"Blob", - "documentation":"

If you input your canary script directly into the canary instead of referring to an S3 location, the value of this parameter is the .zip file that contains the script. It can be up to 5 MB.

" + "documentation":"

If you input your canary script directly into the canary instead of referring to an S3 location, the value of this parameter is the base64-encoded contents of the .zip file that contains the script. It must be smaller than 256 Kb.

" }, "Handler":{ "shape":"String", - "documentation":"

The entry point to use for the source code when running the canary. This value must end with the string .handler.

" + "documentation":"

The entry point to use for the source code when running the canary. This value must end with the string .handler. The string is limited to 29 characters or fewer.

" } }, "documentation":"

Use this structure to input your script code for the canary. This structure contains the Lambda handler with the location where the canary should start running the script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included. If the script was passed into the canary directly, the script code is contained in the value of Zipfile.

" @@ -385,7 +418,7 @@ }, "ActiveTracing":{ "shape":"NullableBoolean", - "documentation":"

Specifies whether this canary is to use active AWS X-Ray tracing when it runs. Active tracing enables this canary run to be displayed in the ServiceLens and X-Ray service maps even if the canary does not hit an endpoint that has X-ray tracing enabled. Using X-Ray tracing incurs charges. For more information, see Canaries and X-Ray tracing.

You can enable active tracing only for canaries that use version syn-nodejs-2.0 or later for their canary runtime.

" + "documentation":"

Specifies whether this canary is to use active X-Ray tracing when it runs. Active tracing enables this canary run to be displayed in the ServiceLens and X-Ray service maps even if the canary does not hit an endpoint that has X-Ray tracing enabled. Using X-Ray tracing incurs charges. For more information, see Canaries and X-Ray tracing.

You can enable active tracing only for canaries that use version syn-nodejs-2.0 or later for their canary runtime.

" }, "EnvironmentVariables":{ "shape":"EnvironmentVariablesMap", @@ -407,7 +440,7 @@ }, "ActiveTracing":{ "shape":"NullableBoolean", - "documentation":"

Displays whether this canary run used active AWS X-Ray tracing.

" + "documentation":"

Displays whether this canary run used active X-Ray tracing.

" } }, "documentation":"

A structure that contains information about a canary run.

" @@ -469,7 +502,7 @@ "members":{ "Expression":{ "shape":"String", - "documentation":"

A rate expression that defines how often the canary is to run. The syntax is rate(number unit). unit can be minute, minutes, or hour.

For example, rate(1 minute) runs the canary once a minute, rate(10 minutes) runs it once every 10 minutes, and rate(1 hour) runs it once every hour. You can specify a frequency between rate(1 minute) and rate(1 hour).

Specifying rate(0 minute) or rate(0 hour) is a special value that causes the canary to run only once when it is started.

" + "documentation":"

A rate expression or a cron expression that defines how often the canary is to run.

For a rate expression, The syntax is rate(number unit). unit can be minute, minutes, or hour.

For example, rate(1 minute) runs the canary once a minute, rate(10 minutes) runs it once every 10 minutes, and rate(1 hour) runs it once every hour. You can specify a frequency between rate(1 minute) and rate(1 hour).

Specifying rate(0 minute) or rate(0 hour) is a special value that causes the canary to run only once when it is started.

Use cron(expression) to specify a cron expression. You can't schedule a canary to wait for more than a year before running. For information about the syntax for cron expressions, see Scheduling canary runs using cron.

" }, "DurationInSeconds":{ "shape":"MaxOneYearInSeconds", @@ -483,7 +516,7 @@ "members":{ "Expression":{ "shape":"String", - "documentation":"

A rate expression that defines how often the canary is to run. The syntax is rate(number unit). unit can be minute, minutes, or hour.

For example, rate(1 minute) runs the canary once a minute, rate(10 minutes) runs it once every 10 minutes, and rate(1 hour) runs it once every hour.

Specifying rate(0 minute) or rate(0 hour) is a special value that causes the canary to run only once when it is started.

" + "documentation":"

A rate expression or a cron expression that defines how often the canary is to run.

For a rate expression, The syntax is rate(number unit). unit can be minute, minutes, or hour.

For example, rate(1 minute) runs the canary once a minute, rate(10 minutes) runs it once every 10 minutes, and rate(1 hour) runs it once every hour. You can specify a frequency between rate(1 minute) and rate(1 hour).

Specifying rate(0 minute) or rate(0 hour) is a special value that causes the canary to run only once when it is started.

Use cron(expression) to specify a cron expression. For information about the syntax for cron expressions, see Scheduling canary runs using cron.

" }, "DurationInSeconds":{ "shape":"MaxOneYearInSeconds", @@ -580,7 +613,7 @@ }, "ArtifactS3Location":{ "shape":"String", - "documentation":"

The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files.

" + "documentation":"

The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files. The name of the S3 bucket can't include a period (.).

" }, "ExecutionRoleArn":{ "shape":"RoleArn", @@ -1070,6 +1103,10 @@ "VpcConfig":{ "shape":"VpcConfigInput", "documentation":"

If this canary is to test an endpoint in a VPC, this structure contains information about the subnet and security groups of the VPC endpoint. For more information, see Running a Canary in a VPC.

" + }, + "VisualReference":{ + "shape":"VisualReferenceInput", + "documentation":"

Defines the screenshots to use as the baseline for comparisons during visual monitoring comparisons during future runs of this canary. If you omit this parameter, no changes are made to any baseline screenshots that the canary might be using already.

Visual monitoring is supported only on canaries running the syn-puppeteer-node-3.2 runtime or later. For more information, see Visual monitoring and Visual monitoring blueprint

" } } }, @@ -1087,6 +1124,35 @@ "error":{"httpStatusCode":400}, "exception":true }, + "VisualReferenceInput":{ + "type":"structure", + "required":["BaseCanaryRunId"], + "members":{ + "BaseScreenshots":{ + "shape":"BaseScreenshots", + "documentation":"

An array of screenshots that will be used as the baseline for visual monitoring in future runs of this canary. If there is a screenshot that you don't want to be used for visual monitoring, remove it from this array.

" + }, + "BaseCanaryRunId":{ + "shape":"String", + "documentation":"

Specifies which canary run to use the screenshots from as the baseline for future visual monitoring with this canary. Valid values are nextrun to use the screenshots from the next run after this update is made, lastrun to use the screenshots from the most recent run before this update was made, or the value of Id in the CanaryRun from any past run of this canary.

" + } + }, + "documentation":"

An object that specifies what screenshots to use as a baseline for visual monitoring by this canary, and optionally the parts of the screenshots to ignore during the visual monitoring comparison.

Visual monitoring is supported only on canaries running the syn-puppeteer-node-3.2 runtime or later. For more information, see Visual monitoring and Visual monitoring blueprint

" + }, + "VisualReferenceOutput":{ + "type":"structure", + "members":{ + "BaseScreenshots":{ + "shape":"BaseScreenshots", + "documentation":"

An array of screenshots that are used as the baseline for comparisons during visual monitoring.

" + }, + "BaseCanaryRunId":{ + "shape":"String", + "documentation":"

The ID of the canary run that produced the screenshots that are used as the baseline for visual monitoring comparisons during future runs of this canary.

" + } + }, + "documentation":"

If this canary performs visual monitoring by comparing screenshots, this structure contains the ID of the canary run that is used as the baseline for screenshots, and the coordinates of any parts of those screenshots that are ignored during visual monitoring comparison.

Visual monitoring is supported only on canaries running the syn-puppeteer-node-3.2 runtime or later.

" + }, "VpcConfigInput":{ "type":"structure", "members":{ diff --git a/services/textract/pom.xml b/services/textract/pom.xml index 5bcacdec7dc2..475fb16961e5 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT textract AWS Java SDK :: Services :: Textract diff --git a/services/textract/src/main/resources/codegen-resources/service-2.json b/services/textract/src/main/resources/codegen-resources/service-2.json index 9b070d759948..2d52315cb159 100644 --- a/services/textract/src/main/resources/codegen-resources/service-2.json +++ b/services/textract/src/main/resources/codegen-resources/service-2.json @@ -34,6 +34,27 @@ ], "documentation":"

Analyzes an input document for relationships between detected items.

The types of information returned are as follows:

  • Form data (key-value pairs). The related information is returned in two Block objects, each of type KEY_VALUE_SET: a KEY Block object and a VALUE Block object. For example, Name: Ana Silva Carolina contains a key and value. Name: is the key. Ana Silva Carolina is the value.

  • Table and table cell data. A TABLE Block object contains information about a detected table. A CELL Block object is returned for each cell in a table.

  • Lines and words of text. A LINE Block object contains one or more WORD Block objects. All lines and words that are detected in the document are returned (including text that doesn't have a relationship with the value of FeatureTypes).

Selection elements such as check boxes and option buttons (radio buttons) can be detected in form data and in tables. A SELECTION_ELEMENT Block object contains information about a selection element, including the selection status.

You can choose which type of analysis to perform by specifying the FeatureTypes list.

The output is returned in a list of Block objects.

AnalyzeDocument is a synchronous operation. To analyze documents asynchronously, use StartDocumentAnalysis.

For more information, see Document Text Analysis.

" }, + "AnalyzeExpense":{ + "name":"AnalyzeExpense", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AnalyzeExpenseRequest"}, + "output":{"shape":"AnalyzeExpenseResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidS3ObjectException"}, + {"shape":"UnsupportedDocumentException"}, + {"shape":"DocumentTooLargeException"}, + {"shape":"BadDocumentException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Analyzes an input document for financially related relationships between text.

Information is returned as ExpenseDocuments and seperated as follows.

  • LineItemGroups- A data set containing LineItems which store information about the lines of text, such as an item purchased and its price on a receipt.

  • SummaryFields- Contains all other information a receipt, such as header information or the vendors name.

" + }, "DetectDocumentText":{ "name":"DetectDocumentText", "http":{ @@ -70,7 +91,8 @@ {"shape":"InvalidJobIdException"}, {"shape":"InternalServerError"}, {"shape":"ThrottlingException"}, - {"shape":"InvalidS3ObjectException"} + {"shape":"InvalidS3ObjectException"}, + {"shape":"InvalidKMSKeyException"} ], "documentation":"

Gets the results for an Amazon Textract asynchronous operation that analyzes text in a document.

You start asynchronous text analysis by calling StartDocumentAnalysis, which returns a job identifier (JobId). When the text analysis operation finishes, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that's registered in the initial call to StartDocumentAnalysis. To get the results of the text-detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentAnalysis, and pass the job identifier (JobId) from the initial call to StartDocumentAnalysis.

GetDocumentAnalysis returns an array of Block objects. The following types of information are returned:

  • Form data (key-value pairs). The related information is returned in two Block objects, each of type KEY_VALUE_SET: a KEY Block object and a VALUE Block object. For example, Name: Ana Silva Carolina contains a key and value. Name: is the key. Ana Silva Carolina is the value.

  • Table and table cell data. A TABLE Block object contains information about a detected table. A CELL Block object is returned for each cell in a table.

  • Lines and words of text. A LINE Block object contains one or more WORD Block objects. All lines and words that are detected in the document are returned (including text that doesn't have a relationship with the value of the StartDocumentAnalysis FeatureTypes input parameter).

Selection elements such as check boxes and option buttons (radio buttons) can be detected in form data and in tables. A SELECTION_ELEMENT Block object contains information about a selection element, including the selection status.

Use the MaxResults parameter to limit the number of blocks that are returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetDocumentAnalysis, and populate the NextToken request parameter with the token value that's returned from the previous call to GetDocumentAnalysis.

For more information, see Document Text Analysis.

" }, @@ -89,7 +111,8 @@ {"shape":"InvalidJobIdException"}, {"shape":"InternalServerError"}, {"shape":"ThrottlingException"}, - {"shape":"InvalidS3ObjectException"} + {"shape":"InvalidS3ObjectException"}, + {"shape":"InvalidKMSKeyException"} ], "documentation":"

Gets the results for an Amazon Textract asynchronous operation that detects text in a document. Amazon Textract can detect lines of text and the words that make up a line of text.

You start asynchronous text detection by calling StartDocumentTextDetection, which returns a job identifier (JobId). When the text detection operation finishes, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that's registered in the initial call to StartDocumentTextDetection. To get the results of the text-detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentTextDetection, and pass the job identifier (JobId) from the initial call to StartDocumentTextDetection.

GetDocumentTextDetection returns an array of Block objects.

Each document page has as an associated Block of type PAGE. Each PAGE Block object is the parent of LINE Block objects that represent the lines of detected text on a page. A LINE Block object is a parent for each word that makes up the line. Words are represented by Block objects of type WORD.

Use the MaxResults parameter to limit the number of blocks that are returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetDocumentTextDetection, and populate the NextToken request parameter with the token value that's returned from the previous call to GetDocumentTextDetection.

For more information, see Document Text Detection.

" }, @@ -192,6 +215,23 @@ } } }, + "AnalyzeExpenseRequest":{ + "type":"structure", + "required":["Document"], + "members":{ + "Document":{"shape":"Document"} + } + }, + "AnalyzeExpenseResponse":{ + "type":"structure", + "members":{ + "DocumentMetadata":{"shape":"DocumentMetadata"}, + "ExpenseDocuments":{ + "shape":"ExpenseDocumentList", + "documentation":"

The expenses detected by Amazon Textract.

" + } + } + }, "BadDocumentException":{ "type":"structure", "members":{ @@ -397,6 +437,83 @@ "member":{"shape":"EntityType"} }, "ErrorCode":{"type":"string"}, + "ExpenseDetection":{ + "type":"structure", + "members":{ + "Text":{ + "shape":"String", + "documentation":"

The word or line of text recognized by Amazon Textract

" + }, + "Geometry":{"shape":"Geometry"}, + "Confidence":{ + "shape":"Percent", + "documentation":"

The confidence in detection, as a percentage

" + } + }, + "documentation":"

An object used to store information about the Value or Label detected by Amazon Textract.

" + }, + "ExpenseDocument":{ + "type":"structure", + "members":{ + "ExpenseIndex":{ + "shape":"UInteger", + "documentation":"

Denotes which invoice or receipt in the document the information is coming from. First document will be 1, the second 2, and so on.

" + }, + "SummaryFields":{ + "shape":"ExpenseFieldList", + "documentation":"

Any information found outside of a table by Amazon Textract.

" + }, + "LineItemGroups":{ + "shape":"LineItemGroupList", + "documentation":"

Information detected on each table of a document, seperated into LineItems.

" + } + }, + "documentation":"

The structure holding all the information returned by AnalyzeExpense

" + }, + "ExpenseDocumentList":{ + "type":"list", + "member":{"shape":"ExpenseDocument"} + }, + "ExpenseField":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"ExpenseType", + "documentation":"

The implied label of a detected element. Present alongside LabelDetection for explicit elements.

" + }, + "LabelDetection":{ + "shape":"ExpenseDetection", + "documentation":"

The explicitly stated label of a detected element.

" + }, + "ValueDetection":{ + "shape":"ExpenseDetection", + "documentation":"

The value of a detected element. Present in explicit and implicit elements.

" + }, + "PageNumber":{ + "shape":"UInteger", + "documentation":"

The page number the value was detected on.

" + } + }, + "documentation":"

Breakdown of detected information, seperated into the catagories Type, LableDetection, and ValueDetection

" + }, + "ExpenseFieldList":{ + "type":"list", + "member":{"shape":"ExpenseField"} + }, + "ExpenseType":{ + "type":"structure", + "members":{ + "Text":{ + "shape":"String", + "documentation":"

The word or line of text detected by Amazon Textract.

" + }, + "Confidence":{ + "shape":"Percent", + "documentation":"

The confidence of accuracy, as a percentage.

" + } + }, + "documentation":"

An object used to store information about the Type detected by Amazon Textract.

" + }, "FeatureType":{ "type":"string", "enum":[ @@ -705,6 +822,38 @@ "documentation":"

An Amazon Textract service limit was exceeded. For example, if you start too many asynchronous jobs concurrently, calls to start operations (StartDocumentTextDetection, for example) raise a LimitExceededException exception (HTTP status code: 400) until the number of concurrently running jobs is below the Amazon Textract service limit.

", "exception":true }, + "LineItemFields":{ + "type":"structure", + "members":{ + "LineItemExpenseFields":{ + "shape":"ExpenseFieldList", + "documentation":"

ExpenseFields used to show information from detected lines on a table.

" + } + }, + "documentation":"

A structure that holds information about the different lines found in a document's tables.

" + }, + "LineItemGroup":{ + "type":"structure", + "members":{ + "LineItemGroupIndex":{ + "shape":"UInteger", + "documentation":"

The number used to identify a specific table in a document. The first table encountered will have a LineItemGroupIndex of 1, the second 2, etc.

" + }, + "LineItems":{ + "shape":"LineItemList", + "documentation":"

The breakdown of information on a particular line of a table.

" + } + }, + "documentation":"

A grouping of tables which contain LineItems, with each table identified by the table's LineItemGroupIndex.

" + }, + "LineItemGroupList":{ + "type":"list", + "member":{"shape":"LineItemGroup"} + }, + "LineItemList":{ + "type":"list", + "member":{"shape":"LineItemFields"} + }, "MaxResults":{ "type":"integer", "min":1 @@ -744,7 +893,7 @@ "documentation":"

The prefix of the object key that the output will be saved to. When not enabled, the prefix will be “textract_output\".

" } }, - "documentation":"

Sets whether or not your output will go to a user created bucket. Used to set the name of the bucket, and the prefix on the output file.

" + "documentation":"

Sets whether or not your output will go to a user created bucket. Used to set the name of the bucket, and the prefix on the output file.

OutputConfig is an optional parameter which lets you adjust where your output will be placed. By default, Amazon Textract will store the results internally and can only be accessed by the Get API operations. With OutputConfig enabled, you can set the name of the bucket the output will be sent to and the file prefix of the results where you can download your results. Additionally, you can set the KMSKeyID parameter to a customer master key (CMK) to encrypt your output. Without this parameter set Amazon Textract will encrypt server-side using the AWS managed CMK for Amazon S3.

Decryption of Customer Content is necessary for processing of the documents by Amazon Textract. If your account is opted out under an AI services opt out policy then all unencrypted Customer Content is immediately and permanently deleted after the Customer Content has been processed by the service. No copy of of the output is retained by Amazon Textract. For information about how to opt out, see Managing AI services opt-out policy.

For more information on data privacy, see the Data Privacy FAQ.

" }, "Pages":{ "type":"list", @@ -829,7 +978,7 @@ "members":{ "Bucket":{ "shape":"S3Bucket", - "documentation":"

The name of the S3 bucket.

" + "documentation":"

The name of the S3 bucket. Note that the # character is not valid in the file name.

" }, "Name":{ "shape":"S3ObjectName", diff --git a/services/timestreamquery/pom.xml b/services/timestreamquery/pom.xml index bfa6865298ca..9220a97990a7 100644 --- a/services/timestreamquery/pom.xml +++ b/services/timestreamquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT timestreamquery AWS Java SDK :: Services :: Timestream Query diff --git a/services/timestreamwrite/pom.xml b/services/timestreamwrite/pom.xml index 8a627efb5515..66142223e94d 100644 --- a/services/timestreamwrite/pom.xml +++ b/services/timestreamwrite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT timestreamwrite AWS Java SDK :: Services :: Timestream Write diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index 0a36773318c1..162f3a92933e 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT transcribe AWS Java SDK :: Services :: Transcribe diff --git a/services/transcribe/src/main/resources/codegen-resources/paginators-1.json b/services/transcribe/src/main/resources/codegen-resources/paginators-1.json index 6ec63652b3be..b3c227203657 100644 --- a/services/transcribe/src/main/resources/codegen-resources/paginators-1.json +++ b/services/transcribe/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,15 @@ { "pagination": { + "ListCallAnalyticsCategories": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListCallAnalyticsJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListLanguageModels": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/transcribe/src/main/resources/codegen-resources/service-2.json b/services/transcribe/src/main/resources/codegen-resources/service-2.json index c16b1bdc58b1..152342c81219 100644 --- a/services/transcribe/src/main/resources/codegen-resources/service-2.json +++ b/services/transcribe/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,22 @@ "uid":"transcribe-2017-10-26" }, "operations":{ + "CreateCallAnalyticsCategory":{ + "name":"CreateCallAnalyticsCategory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCallAnalyticsCategoryRequest"}, + "output":{"shape":"CreateCallAnalyticsCategoryResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Creates an analytics category. Amazon Transcribe applies the conditions specified by your analytics categories to your call analytics jobs. For each analytics category, you specify one or more rules. For example, you can specify a rule that the customer sentiment was neutral or negative within that category. If you start a call analytics job, Amazon Transcribe applies the category to the analytics job that you've specified.

" + }, "CreateLanguageModel":{ "name":"CreateLanguageModel", "http":{ @@ -77,6 +93,37 @@ ], "documentation":"

Creates a new vocabulary filter that you can use to filter words, such as profane words, from the output of a transcription job.

" }, + "DeleteCallAnalyticsCategory":{ + "name":"DeleteCallAnalyticsCategory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCallAnalyticsCategoryRequest"}, + "output":{"shape":"DeleteCallAnalyticsCategoryResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Deletes a call analytics category using its name.

" + }, + "DeleteCallAnalyticsJob":{ + "name":"DeleteCallAnalyticsJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCallAnalyticsJobRequest"}, + "output":{"shape":"DeleteCallAnalyticsJobResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Deletes a call analytics job using its name.

" + }, "DeleteLanguageModel":{ "name":"DeleteLanguageModel", "http":{ @@ -178,7 +225,39 @@ {"shape":"InternalFailureException"}, {"shape":"NotFoundException"} ], - "documentation":"

Gets information about a single custom language model. Use this information to see details about the language model in your AWS account. You can also see whether the base language model used to create your custom language model has been updated. If Amazon Transcribe has updated the base model, you can create a new custom language model using the updated base model. If the language model wasn't created, you can use this operation to understand why Amazon Transcribe couldn't create it.

" + "documentation":"

Gets information about a single custom language model. Use this information to see details about the language model in your Amazon Web Services account. You can also see whether the base language model used to create your custom language model has been updated. If Amazon Transcribe has updated the base model, you can create a new custom language model using the updated base model. If the language model wasn't created, you can use this operation to understand why Amazon Transcribe couldn't create it.

" + }, + "GetCallAnalyticsCategory":{ + "name":"GetCallAnalyticsCategory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCallAnalyticsCategoryRequest"}, + "output":{"shape":"GetCallAnalyticsCategoryResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Retrieves information about a call analytics category.

" + }, + "GetCallAnalyticsJob":{ + "name":"GetCallAnalyticsJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCallAnalyticsJobRequest"}, + "output":{"shape":"GetCallAnalyticsJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Returns information about a call analytics job. To see the status of the job, check the CallAnalyticsJobStatus field. If the status is COMPLETED, the job is finished and you can find the results at the location specified in the TranscriptFileUri field. If you enable personally identifiable information (PII) redaction, the redacted transcript appears in the RedactedTranscriptFileUri field.

" }, "GetMedicalTranscriptionJob":{ "name":"GetMedicalTranscriptionJob", @@ -260,6 +339,36 @@ ], "documentation":"

Returns information about a vocabulary filter.

" }, + "ListCallAnalyticsCategories":{ + "name":"ListCallAnalyticsCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCallAnalyticsCategoriesRequest"}, + "output":{"shape":"ListCallAnalyticsCategoriesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Provides more information about the call analytics categories that you've created. You can use the information in this list to find a specific category. You can then use the operation to get more information about it.

" + }, + "ListCallAnalyticsJobs":{ + "name":"ListCallAnalyticsJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCallAnalyticsJobsRequest"}, + "output":{"shape":"ListCallAnalyticsJobsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

List call analytics jobs with a specified status or substring that matches their names.

" + }, "ListLanguageModels":{ "name":"ListLanguageModels", "http":{ @@ -350,6 +459,22 @@ ], "documentation":"

Gets information about vocabulary filters.

" }, + "StartCallAnalyticsJob":{ + "name":"StartCallAnalyticsJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartCallAnalyticsJobRequest"}, + "output":{"shape":"StartCallAnalyticsJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Starts an asynchronous analytics job that not only transcribes the audio recording of a caller and agent, but also returns additional insights. These insights include how quickly or loudly the caller or agent was speaking. To retrieve additional insights with your analytics jobs, create categories. A category is a way to classify analytics jobs based on attributes, such as a customer's sentiment or a particular phrase being used during the call. For more information, see the operation.

" + }, "StartMedicalTranscriptionJob":{ "name":"StartMedicalTranscriptionJob", "http":{ @@ -382,6 +507,23 @@ ], "documentation":"

Starts an asynchronous job to transcribe speech to text.

" }, + "UpdateCallAnalyticsCategory":{ + "name":"UpdateCallAnalyticsCategory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateCallAnalyticsCategoryRequest"}, + "output":{"shape":"UpdateCallAnalyticsCategoryResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Updates the call analytics category with new values. The UpdateCallAnalyticsCategory operation overwrites all of the existing information with the values that you provide in the request.

" + }, "UpdateMedicalVocabulary":{ "name":"UpdateMedicalVocabulary", "http":{ @@ -434,6 +576,28 @@ } }, "shapes":{ + "AbsoluteTimeRange":{ + "type":"structure", + "members":{ + "StartTime":{ + "shape":"TimestampMilliseconds", + "documentation":"

A value that indicates the beginning of the time range in seconds. To set absolute time range, you must specify a start time and an end time. For example, if you specify the following values:

  • StartTime - 10000

  • Endtime - 50000

The time range is set between 10,000 milliseconds and 50,000 milliseconds into the call.

" + }, + "EndTime":{ + "shape":"TimestampMilliseconds", + "documentation":"

A value that indicates the end of the time range in milliseconds. To set absolute time range, you must specify a start time and an end time. For example, if you specify the following values:

  • StartTime - 10000

  • Endtime - 50000

The time range is set between 10,000 milliseconds and 50,000 milliseconds into the call.

" + }, + "First":{ + "shape":"TimestampMilliseconds", + "documentation":"

A time range from the beginning of the call to the value that you've specified. For example, if you specify 100000, the time range is set to the first 100,000 milliseconds of the call.

" + }, + "Last":{ + "shape":"TimestampMilliseconds", + "documentation":"

A time range from the value that you've specified to the end of the call. For example, if you specify 100000, the time range is set to the last 100,000 milliseconds of the call.

" + } + }, + "documentation":"

A time range, set in seconds, between two points in the call.

" + }, "BadRequestException":{ "type":"structure", "members":{ @@ -460,6 +624,203 @@ "en-AU" ] }, + "CallAnalyticsJob":{ + "type":"structure", + "members":{ + "CallAnalyticsJobName":{ + "shape":"CallAnalyticsJobName", + "documentation":"

The name of the call analytics job.

" + }, + "CallAnalyticsJobStatus":{ + "shape":"CallAnalyticsJobStatus", + "documentation":"

The status of the analytics job.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

If you know the language spoken between the customer and the agent, specify a language code for this field.

If you don't know the language, you can leave this field blank, and Amazon Transcribe will use machine learning to automatically identify the language. To improve the accuracy of language identification, you can provide an array containing the possible language codes for the language spoken in your audio.

The following list shows the supported languages and corresponding language codes for call analytics jobs:

  • Gulf Arabic (ar-AE)

  • Mandarin Chinese, Mainland (zh-CN)

  • Australian English (en-AU)

  • British English (en-GB)

  • Indian English (en-IN)

  • Irish English (en-IE)

  • Scottish English (en-AB)

  • US English (en-US)

  • Welsh English (en-WL)

  • Spanish (es-ES)

  • US Spanish (es-US)

  • French (fr-FR)

  • Canadian French (fr-CA)

  • German (de-DE)

  • Swiss German (de-CH)

  • Indian Hindi (hi-IN)

  • Italian (it-IT)

  • Japanese (ja-JP)

  • Korean (ko-KR)

  • Portuguese (pt-PT)

  • Brazilian Portuguese (pt-BR)

" + }, + "MediaSampleRateHertz":{ + "shape":"MediaSampleRateHertz", + "documentation":"

The sample rate, in Hertz, of the audio.

" + }, + "MediaFormat":{ + "shape":"MediaFormat", + "documentation":"

The format of the input audio file. Note: for call analytics jobs, only the following media formats are supported: MP3, MP4, WAV, FLAC, OGG, and WebM.

" + }, + "Media":{"shape":"Media"}, + "Transcript":{"shape":"Transcript"}, + "StartTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the analytics job started processing.

" + }, + "CreationTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the analytics job was created.

" + }, + "CompletionTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the analytics job was completed.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the AnalyticsJobStatus is FAILED, this field contains information about why the job failed.

The FailureReason field can contain one of the following values:

  • Unsupported media format: The media format specified in the MediaFormat field of the request isn't valid. See the description of the MediaFormat field for a list of valid values.

  • The media format provided does not match the detected media format: The media format of the audio file doesn't match the format specified in the MediaFormat field in the request. Check the media format of your media file and make sure the two values match.

  • Invalid sample rate for audio file: The sample rate specified in the MediaSampleRateHertz of the request isn't valid. The sample rate must be between 8000 and 48000 Hertz.

  • The sample rate provided does not match the detected sample rate: The sample rate in the audio file doesn't match the sample rate specified in the MediaSampleRateHertz field in the request. Check the sample rate of your media file and make sure that the two values match.

  • Invalid file size: file size too large: The size of your audio file is larger than what Amazon Transcribe Medical can process. For more information, see Guidelines and Quotas in the Amazon Transcribe Medical Guide

  • Invalid number of channels: number of channels too large: Your audio contains more channels than Amazon Transcribe Medical is configured to process. To request additional channels, see Amazon Transcribe Medical Endpoints and Quotas in the Amazon Web Services General Reference.

" + }, + "DataAccessRoleArn":{ + "shape":"DataAccessRoleArn", + "documentation":"

The Amazon Resource Number (ARN) that you use to get access to the analytics job.

" + }, + "IdentifiedLanguageScore":{ + "shape":"IdentifiedLanguageScore", + "documentation":"

A value between zero and one that Amazon Transcribe assigned to the language that it identified in the source audio. This value appears only when you don't provide a single language code. Larger values indicate that Amazon Transcribe has higher confidence in the language that it identified

" + }, + "Settings":{ + "shape":"CallAnalyticsJobSettings", + "documentation":"

Provides information about the settings used to run a transcription job.

" + }, + "ChannelDefinitions":{ + "shape":"ChannelDefinitions", + "documentation":"

Shows numeric values to indicate the channel assigned to the agent's audio and the channel assigned to the customer's audio.

" + } + }, + "documentation":"

Describes an asynchronous analytics job that was created with the StartAnalyticsJob operation.

" + }, + "CallAnalyticsJobName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z._-]+" + }, + "CallAnalyticsJobSettings":{ + "type":"structure", + "members":{ + "VocabularyName":{ + "shape":"VocabularyName", + "documentation":"

The name of a vocabulary to use when processing the call analytics job.

" + }, + "VocabularyFilterName":{ + "shape":"VocabularyFilterName", + "documentation":"

The name of the vocabulary filter to use when running a call analytics job. The filter that you specify must have the same language code as the analytics job.

" + }, + "VocabularyFilterMethod":{ + "shape":"VocabularyFilterMethod", + "documentation":"

Set to mask to remove filtered text from the transcript and replace it with three asterisks (\"***\") as placeholder text. Set to remove to remove filtered text from the transcript without using placeholder text. Set to tag to mark the word in the transcription output that matches the vocabulary filter. When you set the filter method to tag, the words matching your vocabulary filter are not masked or removed.

" + }, + "LanguageModelName":{ + "shape":"ModelName", + "documentation":"

The structure used to describe a custom language model.

" + }, + "ContentRedaction":{"shape":"ContentRedaction"}, + "LanguageOptions":{ + "shape":"LanguageOptions", + "documentation":"

When you run a call analytics job, you can specify the language spoken in the audio, or you can have Amazon Transcribe identify the language for you.

To specify a language, specify an array with one language code. If you don't know the language, you can leave this field blank and Amazon Transcribe will use machine learning to identify the language for you. To improve the ability of Amazon Transcribe to correctly identify the language, you can provide an array of the languages that can be present in the audio.

The following list shows the supported languages and corresponding language codes for call analytics jobs:

  • Gulf Arabic (ar-AE)

  • Mandarin Chinese, Mainland (zh-CN)

  • Australian English (en-AU)

  • British English (en-GB)

  • Indian English (en-IN)

  • Irish English (en-IE)

  • Scottish English (en-AB)

  • US English (en-US)

  • Welsh English (en-WL)

  • Spanish (es-ES)

  • US Spanish (es-US)

  • French (fr-FR)

  • Canadian French (fr-CA)

  • German (de-DE)

  • Swiss German (de-CH)

  • Indian Hindi (hi-IN)

  • Italian (it-IT)

  • Japanese (ja-JP)

  • Korean (ko-KR)

  • Portuguese (pt-PT)

  • Brazilian Portuguese (pt-BR)

" + } + }, + "documentation":"

Provides optional settings for the CallAnalyticsJob operation.

" + }, + "CallAnalyticsJobStatus":{ + "type":"string", + "enum":[ + "QUEUED", + "IN_PROGRESS", + "FAILED", + "COMPLETED" + ] + }, + "CallAnalyticsJobSummaries":{ + "type":"list", + "member":{"shape":"CallAnalyticsJobSummary"} + }, + "CallAnalyticsJobSummary":{ + "type":"structure", + "members":{ + "CallAnalyticsJobName":{ + "shape":"CallAnalyticsJobName", + "documentation":"

The name of the call analytics job.

" + }, + "CreationTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the call analytics job was created.

" + }, + "StartTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the job began processing.

" + }, + "CompletionTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the job was completed.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language of the transcript in the source audio file.

" + }, + "CallAnalyticsJobStatus":{ + "shape":"CallAnalyticsJobStatus", + "documentation":"

The status of the call analytics job.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the CallAnalyticsJobStatus is FAILED, a description of the error.

" + } + }, + "documentation":"

Provides summary information about a call analytics job.

" + }, + "CategoryName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z._-]+" + }, + "CategoryProperties":{ + "type":"structure", + "members":{ + "CategoryName":{ + "shape":"CategoryName", + "documentation":"

The name of the call analytics category.

" + }, + "Rules":{ + "shape":"RuleList", + "documentation":"

The rules used to create a call analytics category.

" + }, + "CreateTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the call analytics category was created.

" + }, + "LastUpdateTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the call analytics category was most recently updated.

" + } + }, + "documentation":"

An object that contains the rules and additional information about a call analytics category.

" + }, + "CategoryPropertiesList":{ + "type":"list", + "member":{"shape":"CategoryProperties"} + }, + "ChannelDefinition":{ + "type":"structure", + "members":{ + "ChannelId":{ + "shape":"ChannelId", + "documentation":"

A value that indicates the audio channel.

" + }, + "ParticipantRole":{ + "shape":"ParticipantRole", + "documentation":"

Indicates whether the person speaking on the audio channel is the agent or customer.

" + } + }, + "documentation":"

For a call analytics job, an object that indicates the audio channel that belongs to the agent and the audio channel that belongs to the customer.

" + }, + "ChannelDefinitions":{ + "type":"list", + "member":{"shape":"ChannelDefinition"}, + "max":2, + "min":2 + }, + "ChannelId":{ + "type":"integer", + "max":1, + "min":0 + }, "ConflictException":{ "type":"structure", "members":{ @@ -486,6 +847,32 @@ }, "documentation":"

Settings for content redaction within a transcription job.

" }, + "CreateCallAnalyticsCategoryRequest":{ + "type":"structure", + "required":[ + "CategoryName", + "Rules" + ], + "members":{ + "CategoryName":{ + "shape":"CategoryName", + "documentation":"

The name that you choose for your category when you create it.

" + }, + "Rules":{ + "shape":"RuleList", + "documentation":"

To create a category, you must specify between 1 and 20 rules. For each rule, you specify a filter to be applied to the attributes of the call. For example, you can specify a sentiment filter to detect if the customer's sentiment was negative or neutral.

" + } + } + }, + "CreateCallAnalyticsCategoryResponse":{ + "type":"structure", + "members":{ + "CategoryProperties":{ + "shape":"CategoryProperties", + "documentation":"

The rules and associated metadata used to create a category.

" + } + } + }, "CreateLanguageModelRequest":{ "type":"structure", "required":[ @@ -548,7 +935,7 @@ "members":{ "VocabularyName":{ "shape":"VocabularyName", - "documentation":"

The name of the custom vocabulary. This case-sensitive name must be unique within an AWS account. If you try to create a vocabulary with the same name as a previous vocabulary, you get a ConflictException error.

" + "documentation":"

The name of the custom vocabulary. This case-sensitive name must be unique within an Amazon Web Services account. If you try to create a vocabulary with the same name as a previous vocabulary, you get a ConflictException error.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -556,7 +943,7 @@ }, "VocabularyFileUri":{ "shape":"Uri", - "documentation":"

The location in Amazon S3 of the text file you use to define your custom vocabulary. The URI must be in the same AWS Region as the resource that you're calling. Enter information about your VocabularyFileUri in the following format:

https://s3.<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

The following is an example URI for a vocabulary file that is stored in Amazon S3:

https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt

For more information about Amazon S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Medical Custom Vocabularies.

" + "documentation":"

The location in Amazon S3 of the text file you use to define your custom vocabulary. The URI must be in the same Amazon Web Services Region as the resource that you're calling. Enter information about your VocabularyFileUri in the following format:

https://s3.<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

The following is an example URI for a vocabulary file that is stored in Amazon S3:

https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt

For more information about Amazon S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Medical Custom Vocabularies.

" } } }, @@ -565,7 +952,7 @@ "members":{ "VocabularyName":{ "shape":"VocabularyName", - "documentation":"

The name of the vocabulary. The name must be unique within an AWS account and is case sensitive.

" + "documentation":"

The name of the vocabulary. The name must be unique within an Amazon Web Services account and is case sensitive.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -636,7 +1023,7 @@ "members":{ "VocabularyName":{ "shape":"VocabularyName", - "documentation":"

The name of the vocabulary. The name must be unique within an AWS account. The name is case sensitive. If you try to create a vocabulary with the same name as a previous vocabulary you will receive a ConflictException error.

" + "documentation":"

The name of the vocabulary. The name must be unique within an Amazon Web Services account. The name is case sensitive. If you try to create a vocabulary with the same name as a previous vocabulary you will receive a ConflictException error.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -648,7 +1035,7 @@ }, "VocabularyFileUri":{ "shape":"Uri", - "documentation":"

The S3 location of the text file that contains the definition of the custom vocabulary. The URI must be in the same region as the API endpoint that you are calling. The general form is

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Custom Vocabularies.

" + "documentation":"

The S3 location of the text file that contains the definition of the custom vocabulary. The URI must be in the same region as the API endpoint that you are calling. The general form is

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Custom Vocabularies.

" } } }, @@ -684,6 +1071,36 @@ "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso-{0,1}[a-z]{0,1}):iam::[0-9]{0,63}:role/[A-Za-z0-9:_/+=,@.-]{0,1024}$" }, "DateTime":{"type":"timestamp"}, + "DeleteCallAnalyticsCategoryRequest":{ + "type":"structure", + "required":["CategoryName"], + "members":{ + "CategoryName":{ + "shape":"CategoryName", + "documentation":"

The name of the call analytics category that you're choosing to delete. The value is case sensitive.

" + } + } + }, + "DeleteCallAnalyticsCategoryResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteCallAnalyticsJobRequest":{ + "type":"structure", + "required":["CallAnalyticsJobName"], + "members":{ + "CallAnalyticsJobName":{ + "shape":"CallAnalyticsJobName", + "documentation":"

The name of the call analytics job you want to delete.

" + } + } + }, + "DeleteCallAnalyticsJobResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteLanguageModelRequest":{ "type":"structure", "required":["ModelName"], @@ -764,6 +1181,44 @@ } }, "FailureReason":{"type":"string"}, + "GetCallAnalyticsCategoryRequest":{ + "type":"structure", + "required":["CategoryName"], + "members":{ + "CategoryName":{ + "shape":"CategoryName", + "documentation":"

The name of the category you want information about. This value is case sensitive.

" + } + } + }, + "GetCallAnalyticsCategoryResponse":{ + "type":"structure", + "members":{ + "CategoryProperties":{ + "shape":"CategoryProperties", + "documentation":"

The rules you've defined for a category.

" + } + } + }, + "GetCallAnalyticsJobRequest":{ + "type":"structure", + "required":["CallAnalyticsJobName"], + "members":{ + "CallAnalyticsJobName":{ + "shape":"CallAnalyticsJobName", + "documentation":"

The name of the analytics job you want information about. This value is case sensitive.

" + } + } + }, + "GetCallAnalyticsJobResponse":{ + "type":"structure", + "members":{ + "CallAnalyticsJob":{ + "shape":"CallAnalyticsJob", + "documentation":"

An object that contains the results of your call analytics job.

" + } + } + }, "GetMedicalTranscriptionJobRequest":{ "type":"structure", "required":["MedicalTranscriptionJobName"], @@ -943,6 +1398,32 @@ "exception":true, "fault":true }, + "InterruptionFilter":{ + "type":"structure", + "members":{ + "Threshold":{ + "shape":"TimestampMilliseconds", + "documentation":"

The duration of the interruption.

" + }, + "ParticipantRole":{ + "shape":"ParticipantRole", + "documentation":"

Indicates whether the caller or customer was interrupting.

" + }, + "AbsoluteTimeRange":{ + "shape":"AbsoluteTimeRange", + "documentation":"

An object you can use to specify a time range (in milliseconds) for when you'd want to find the interruption. For example, you could search for an interruption between the 30,000 millisecond mark and the 45,000 millisecond mark. You could also specify the time period as the first 15,000 milliseconds or the last 15,000 milliseconds.

" + }, + "RelativeTimeRange":{ + "shape":"RelativeTimeRange", + "documentation":"

An object that allows percentages to specify the proportion of the call where there was a interruption. For example, you can specify the first half of the call. You can also specify the period of time between halfway through to three-quarters of the way through the call. Because the length of conversation can vary between calls, you can apply relative time ranges across all calls.

" + }, + "Negate":{ + "shape":"Boolean", + "documentation":"

Set to TRUE to look for a time period where there was no interruption.

" + } + }, + "documentation":"

An object that enables you to configure your category to be applied to call analytics jobs where either the customer or agent was interrupted.

" + }, "JobExecutionSettings":{ "type":"structure", "members":{ @@ -1049,7 +1530,7 @@ "LanguageOptions":{ "type":"list", "member":{"shape":"LanguageCode"}, - "min":2 + "min":1 }, "LimitExceededException":{ "type":"structure", @@ -1059,6 +1540,70 @@ "documentation":"

Either you have sent too many requests or your input file is too long. Wait before you resend your request, or use a smaller file and resend the request.

", "exception":true }, + "ListCallAnalyticsCategoriesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

When included, NextTokenfetches the next set of categories if the result of the previous request was truncated.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of categories to return in the response. If there are fewer results in the list, the response contains only the actual results.

" + } + } + }, + "ListCallAnalyticsCategoriesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

The operation returns a page of jobs at a time. The maximum size of the list is set by the MaxResults parameter. If there are more categories in the list than the page size, Amazon Transcribe returns the NextPage token. Include the token in the next request to the operation to return the next page of analytics categories.

" + }, + "Categories":{ + "shape":"CategoryPropertiesList", + "documentation":"

A list of objects containing information about analytics categories.

" + } + } + }, + "ListCallAnalyticsJobsRequest":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"CallAnalyticsJobStatus", + "documentation":"

When specified, returns only call analytics jobs with the specified status. Jobs are ordered by creation date, with the most recent jobs returned first. If you don't specify a status, Amazon Transcribe returns all analytics jobs ordered by creation date.

" + }, + "JobNameContains":{ + "shape":"CallAnalyticsJobName", + "documentation":"

When specified, the jobs returned in the list are limited to jobs whose name contains the specified string.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If you receive a truncated result in the previous request of , include NextToken to fetch the next set of jobs.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of call analytics jobs to return in the response. If there are fewer results in the list, this response contains only the actual results.

" + } + } + }, + "ListCallAnalyticsJobsResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"CallAnalyticsJobStatus", + "documentation":"

When specified, returns only call analytics jobs with that status. Jobs are ordered by creation date, with the most recent jobs returned first. If you don't specify a status, Amazon Transcribe returns all transcription jobs ordered by creation date.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The operation returns a page of jobs at a time. The maximum size of the page is set by the MaxResults parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the NextPage token. Include the token in your next request to the operation to return next page of jobs.

" + }, + "CallAnalyticsJobSummaries":{ + "shape":"CallAnalyticsJobSummaries", + "documentation":"

A list of objects containing summary information for a transcription job.

" + } + } + }, "ListLanguageModelsRequest":{ "type":"structure", "members":{ @@ -1295,7 +1840,11 @@ "members":{ "MediaFileUri":{ "shape":"Uri", - "documentation":"

The S3 object location of the input media file. The URI must be in the same region as the API endpoint that you are calling. The general form is:

For example:

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

" + "documentation":"

The S3 object location of the input media file. The URI must be in the same region as the API endpoint that you are calling. The general form is:

For example:

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

" + }, + "RedactedMediaFileUri":{ + "shape":"Uri", + "documentation":"

The S3 object location for your redacted output media file. This is only supported for call analytics jobs.

" } }, "documentation":"

Describes the input media file in a transcription request.

" @@ -1348,7 +1897,7 @@ }, "MediaSampleRateHertz":{ "shape":"MediaSampleRateHertz", - "documentation":"

The sample rate, in Hertz, of the source audio containing medical information.

If you don't specify the sample rate, Amazon Transcribe Medical determines it for you. If you choose to specify the sample rate, it must match the rate detected by Amazon Transcribe Medical. In most cases, you should leave the MediaSampleHertz blank and let Amazon Transcribe Medical determine the sample rate.

" + "documentation":"

The sample rate, in Hertz, of the source audio containing medical information.

If you don't specify the sample rate, Amazon Transcribe Medical determines it for you. If you choose to specify the sample rate, it must match the rate detected by Amazon Transcribe Medical. In most cases, you should leave the MedicalMediaSampleHertz blank and let Amazon Transcribe Medical determine the sample rate.

" }, "MediaFormat":{ "shape":"MediaFormat", @@ -1389,7 +1938,7 @@ }, "Type":{ "shape":"Type", - "documentation":"

The type of speech in the transcription job. CONVERSATION is generally used for patient-physician dialogues. DICTATION is the setting for physicians speaking their notes after seeing a patient. For more information, see how-it-works-med

" + "documentation":"

The type of speech in the transcription job. CONVERSATION is generally used for patient-physician dialogues. DICTATION is the setting for physicians speaking their notes after seeing a patient. For more information, see What is Amazon Transcribe Medical?.

" } }, "documentation":"

The data structure that contains the information for a medical transcription job.

" @@ -1511,6 +2060,34 @@ "max":8192, "pattern":".+" }, + "NonEmptyString":{ + "type":"string", + "max":2000, + "min":1, + "pattern":".*\\S.*" + }, + "NonTalkTimeFilter":{ + "type":"structure", + "members":{ + "Threshold":{ + "shape":"TimestampMilliseconds", + "documentation":"

The duration of the period when neither the customer nor agent was talking.

" + }, + "AbsoluteTimeRange":{ + "shape":"AbsoluteTimeRange", + "documentation":"

An object you can use to specify a time range (in milliseconds) for when no one is talking. For example, you could specify a time period between the 30,000 millisecond mark and the 45,000 millisecond mark. You could also specify the time period as the first 15,000 milliseconds or the last 15,000 milliseconds.

" + }, + "RelativeTimeRange":{ + "shape":"RelativeTimeRange", + "documentation":"

An object that allows percentages to specify the proportion of the call where there was silence. For example, you can specify the first half of the call. You can also specify the period of time between halfway through to three-quarters of the way through the call. Because the length of conversation can vary between calls, you can apply relative time ranges across all calls.

" + }, + "Negate":{ + "shape":"Boolean", + "documentation":"

Set to TRUE to look for a time period when people were talking.

" + } + }, + "documentation":"

An object that enables you to configure your category to be applied to call analytics jobs where either the customer or agent was interrupted.

" + }, "NotFoundException":{ "type":"structure", "members":{ @@ -1537,6 +2114,18 @@ "SERVICE_BUCKET" ] }, + "ParticipantRole":{ + "type":"string", + "enum":[ + "AGENT", + "CUSTOMER" + ] + }, + "Percentage":{ + "type":"integer", + "max":100, + "min":0 + }, "Phrase":{ "type":"string", "max":256, @@ -1558,6 +2147,98 @@ "type":"string", "enum":["PII"] }, + "RelativeTimeRange":{ + "type":"structure", + "members":{ + "StartPercentage":{ + "shape":"Percentage", + "documentation":"

A value that indicates the percentage of the beginning of the time range. To set a relative time range, you must specify a start percentage and an end percentage. For example, if you specify the following values:

  • StartPercentage - 10

  • EndPercentage - 50

This looks at the time range starting from 10% of the way into the call to 50% of the way through the call. For a call that lasts 100,000 milliseconds, this example range would apply from the 10,000 millisecond mark to the 50,000 millisecond mark.

" + }, + "EndPercentage":{ + "shape":"Percentage", + "documentation":"

A value that indicates the percentage of the end of the time range. To set a relative time range, you must specify a start percentage and an end percentage. For example, if you specify the following values:

  • StartPercentage - 10

  • EndPercentage - 50

This looks at the time range starting from 10% of the way into the call to 50% of the way through the call. For a call that lasts 100,000 milliseconds, this example range would apply from the 10,000 millisecond mark to the 50,000 millisecond mark.

" + }, + "First":{ + "shape":"Percentage", + "documentation":"

A range that takes the portion of the call up to the time in milliseconds set by the value that you've specified. For example, if you specify 120000, the time range is set for the first 120,000 milliseconds of the call.

" + }, + "Last":{ + "shape":"Percentage", + "documentation":"

A range that takes the portion of the call from the time in milliseconds set by the value that you've specified to the end of the call. For example, if you specify 120000, the time range is set for the last 120,000 milliseconds of the call.

" + } + }, + "documentation":"

An object that allows percentages to specify the proportion of the call where you would like to apply a filter. For example, you can specify the first half of the call. You can also specify the period of time between halfway through to three-quarters of the way through the call. Because the length of conversation can vary between calls, you can apply relative time ranges across all calls.

" + }, + "Rule":{ + "type":"structure", + "members":{ + "NonTalkTimeFilter":{ + "shape":"NonTalkTimeFilter", + "documentation":"

A condition for a time period when neither the customer nor the agent was talking.

" + }, + "InterruptionFilter":{ + "shape":"InterruptionFilter", + "documentation":"

A condition for a time period when either the customer or agent was interrupting the other person.

" + }, + "TranscriptFilter":{ + "shape":"TranscriptFilter", + "documentation":"

A condition that catches particular words or phrases based on a exact match. For example, if you set the phrase \"I want to speak to the manager\", only that exact phrase will be returned.

" + }, + "SentimentFilter":{ + "shape":"SentimentFilter", + "documentation":"

A condition that is applied to a particular customer sentiment.

" + } + }, + "documentation":"

A condition in the call between the customer and the agent that you want to filter for.

", + "union":true + }, + "RuleList":{ + "type":"list", + "member":{"shape":"Rule"}, + "max":20, + "min":1 + }, + "SentimentFilter":{ + "type":"structure", + "required":["Sentiments"], + "members":{ + "Sentiments":{ + "shape":"SentimentValueList", + "documentation":"

An array that enables you to specify sentiments for the customer or agent. You can specify one or more values.

" + }, + "AbsoluteTimeRange":{ + "shape":"AbsoluteTimeRange", + "documentation":"

The time range, measured in seconds, of the sentiment.

" + }, + "RelativeTimeRange":{ + "shape":"RelativeTimeRange", + "documentation":"

The time range, set in percentages, that correspond to proportion of the call.

" + }, + "ParticipantRole":{ + "shape":"ParticipantRole", + "documentation":"

A value that determines whether the sentiment belongs to the customer or the agent.

" + }, + "Negate":{ + "shape":"Boolean", + "documentation":"

Set to TRUE to look for sentiments that weren't specified in the request.

" + } + }, + "documentation":"

An object that enables you to specify a particular customer or agent sentiment. If at least 50 percent of the conversation turns (the back-and-forth between two speakers) in a specified time period match the specified sentiment, Amazon Transcribe will consider the sentiment a match.

" + }, + "SentimentValue":{ + "type":"string", + "enum":[ + "POSITIVE", + "NEGATIVE", + "NEUTRAL", + "MIXED" + ] + }, + "SentimentValueList":{ + "type":"list", + "member":{"shape":"SentimentValue"}, + "min":1 + }, "Settings":{ "type":"structure", "members":{ @@ -1600,6 +2281,50 @@ "type":"string", "enum":["PRIMARYCARE"] }, + "StartCallAnalyticsJobRequest":{ + "type":"structure", + "required":[ + "CallAnalyticsJobName", + "Media", + "DataAccessRoleArn" + ], + "members":{ + "CallAnalyticsJobName":{ + "shape":"CallAnalyticsJobName", + "documentation":"

The name of the call analytics job. You can't use the string \".\" or \"..\" by themselves as the job name. The name must also be unique within an AWS account. If you try to create a call analytics job with the same name as a previous call analytics job, you get a ConflictException error.

" + }, + "Media":{"shape":"Media"}, + "OutputLocation":{ + "shape":"Uri", + "documentation":"

The Amazon S3 location where the output of the call analytics job is stored. You can provide the following location types to store the output of call analytics job:

  • s3://DOC-EXAMPLE-BUCKET1

    If you specify a bucket, Amazon Transcribe saves the output of the analytics job as a JSON file at the root level of the bucket.

  • s3://DOC-EXAMPLE-BUCKET1/folder/

    f you specify a path, Amazon Transcribe saves the output of the analytics job as s3://DOC-EXAMPLE-BUCKET1/folder/your-transcription-job-name.json

    If you specify a folder, you must provide a trailing slash.

  • s3://DOC-EXAMPLE-BUCKET1/folder/filename.json

    If you provide a path that has the filename specified, Amazon Transcribe saves the output of the analytics job as s3://DOC-EXAMPLEBUCKET1/folder/filename.json

You can specify an AWS Key Management Service key to encrypt the output of our analytics job using the OutputEncryptionKMSKeyId parameter. If you don't specify a KMS key, Amazon Transcribe uses the default Amazon S3 key for server-side encryption of the analytics job output that is placed in your S3 bucket.

" + }, + "OutputEncryptionKMSKeyId":{ + "shape":"KMSKeyId", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Key Management Service key used to encrypt the output of the call analytics job. The user calling the operation must have permission to use the specified KMS key.

You use either of the following to identify an AWS KMS key in the current account:

  • KMS Key ID: \"1234abcd-12ab-34cd-56ef-1234567890ab\"

  • KMS Key Alias: \"alias/ExampleAlias\"

You can use either of the following to identify a KMS key in the current account or another account:

  • Amazon Resource Name (ARN) of a KMS key in the current account or another account: \"arn:aws:kms:region:account ID:key/1234abcd-12ab-34cd-56ef1234567890ab\"

  • ARN of a KMS Key Alias: \"arn:aws:kms:region:account ID:alias/ExampleAlias\"

If you don't specify an encryption key, the output of the call analytics job is encrypted with the default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your output, you must also specify an output location in the OutputLocation parameter.

" + }, + "DataAccessRoleArn":{ + "shape":"DataAccessRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of a role that has access to the S3 bucket that contains your input files. Amazon Transcribe assumes this role to read queued audio files. If you have specified an output S3 bucket for your transcription results, this role should have access to the output bucket as well.

" + }, + "Settings":{ + "shape":"CallAnalyticsJobSettings", + "documentation":"

A Settings object that provides optional settings for a call analytics job.

" + }, + "ChannelDefinitions":{ + "shape":"ChannelDefinitions", + "documentation":"

When you start a call analytics job, you must pass an array that maps the agent and the customer to specific audio channels. The values you can assign to a channel are 0 and 1. The agent and the customer must each have their own channel. You can't assign more than one channel to an agent or customer.

" + } + } + }, + "StartCallAnalyticsJobResponse":{ + "type":"structure", + "members":{ + "CallAnalyticsJob":{ + "shape":"CallAnalyticsJob", + "documentation":"

An object containing the details of the asynchronous call analytics job.

" + } + } + }, "StartMedicalTranscriptionJobRequest":{ "type":"structure", "required":[ @@ -1613,7 +2338,7 @@ "members":{ "MedicalTranscriptionJobName":{ "shape":"TranscriptionJobName", - "documentation":"

The name of the medical transcription job. You can't use the strings \".\" or \"..\" by themselves as the job name. The name must also be unique within an AWS account. If you try to create a medical transcription job with the same name as a previous medical transcription job, you get a ConflictException error.

" + "documentation":"

The name of the medical transcription job. You can't use the strings \".\" or \"..\" by themselves as the job name. The name must also be unique within an Amazon Web Services account. If you try to create a medical transcription job with the same name as a previous medical transcription job, you get a ConflictException error.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -1630,7 +2355,7 @@ "Media":{"shape":"Media"}, "OutputBucketName":{ "shape":"OutputBucketName", - "documentation":"

The Amazon S3 location where the transcription is stored.

You must set OutputBucketName for Amazon Transcribe Medical to store the transcription results. Your transcript appears in the S3 location you specify. When you call the GetMedicalTranscriptionJob, the operation returns this location in the TranscriptFileUri field. The S3 bucket must have permissions that allow Amazon Transcribe Medical to put files in the bucket. For more information, see Permissions Required for IAM User Roles.

You can specify an AWS Key Management Service (KMS) key to encrypt the output of your transcription using the OutputEncryptionKMSKeyId parameter. If you don't specify a KMS key, Amazon Transcribe Medical uses the default Amazon S3 key for server-side encryption of transcripts that are placed in your S3 bucket.

" + "documentation":"

The Amazon S3 location where the transcription is stored.

You must set OutputBucketName for Amazon Transcribe Medical to store the transcription results. Your transcript appears in the S3 location you specify. When you call the GetMedicalTranscriptionJob, the operation returns this location in the TranscriptFileUri field. The S3 bucket must have permissions that allow Amazon Transcribe Medical to put files in the bucket. For more information, see Permissions Required for IAM User Roles.

You can specify an Amazon Web Services Key Management Service (KMS) key to encrypt the output of your transcription using the OutputEncryptionKMSKeyId parameter. If you don't specify a KMS key, Amazon Transcribe Medical uses the default Amazon S3 key for server-side encryption of transcripts that are placed in your S3 bucket.

" }, "OutputKey":{ "shape":"OutputKey", @@ -1638,7 +2363,7 @@ }, "OutputEncryptionKMSKeyId":{ "shape":"KMSKeyId", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key used to encrypt the output of the transcription job. The user calling the StartMedicalTranscriptionJob operation must have permission to use the specified KMS key.

You use either of the following to identify a KMS key in the current account:

  • KMS Key ID: \"1234abcd-12ab-34cd-56ef-1234567890ab\"

  • KMS Key Alias: \"alias/ExampleAlias\"

You can use either of the following to identify a KMS key in the current account or another account:

  • Amazon Resource Name (ARN) of a KMS key in the current account or another account: \"arn:aws:kms:region:account ID:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

  • ARN of a KMS Key Alias: \"arn:aws:kms:region:account ID:alias/ExampleAlias\"

If you don't specify an encryption key, the output of the medical transcription job is encrypted with the default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your output, you must also specify an output location in the OutputBucketName parameter.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Key Management Service (KMS) key used to encrypt the output of the transcription job. The user calling the StartMedicalTranscriptionJob operation must have permission to use the specified KMS key.

You use either of the following to identify a KMS key in the current account:

  • KMS Key ID: \"1234abcd-12ab-34cd-56ef-1234567890ab\"

  • KMS Key Alias: \"alias/ExampleAlias\"

You can use either of the following to identify a KMS key in the current account or another account:

  • Amazon Resource Name (ARN) of a KMS key in the current account or another account: \"arn:aws:kms:region:account ID:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

  • ARN of a KMS Key Alias: \"arn:aws:kms:region:account ID:alias/ExampleAlias\"

If you don't specify an encryption key, the output of the medical transcription job is encrypted with the default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your output, you must also specify an output location in the OutputBucketName parameter.

" }, "Settings":{ "shape":"MedicalTranscriptionSetting", @@ -1676,7 +2401,7 @@ "members":{ "TranscriptionJobName":{ "shape":"TranscriptionJobName", - "documentation":"

The name of the job. You can't use the strings \".\" or \"..\" by themselves as the job name. The name must also be unique within an AWS account. If you try to create a transcription job with the same name as a previous transcription job, you get a ConflictException error.

" + "documentation":"

The name of the job. You can't use the strings \".\" or \"..\" by themselves as the job name. The name must also be unique within an Amazon Web Services account. If you try to create a transcription job with the same name as a previous transcription job, you get a ConflictException error.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -1696,7 +2421,7 @@ }, "OutputBucketName":{ "shape":"OutputBucketName", - "documentation":"

The location where the transcription is stored.

If you set the OutputBucketName, Amazon Transcribe puts the transcript in the specified S3 bucket. When you call the GetTranscriptionJob operation, the operation returns this location in the TranscriptFileUri field. If you enable content redaction, the redacted transcript appears in RedactedTranscriptFileUri. If you enable content redaction and choose to output an unredacted transcript, that transcript's location still appears in the TranscriptFileUri. The S3 bucket must have permissions that allow Amazon Transcribe to put files in the bucket. For more information, see Permissions Required for IAM User Roles.

You can specify an AWS Key Management Service (KMS) key to encrypt the output of your transcription using the OutputEncryptionKMSKeyId parameter. If you don't specify a KMS key, Amazon Transcribe uses the default Amazon S3 key for server-side encryption of transcripts that are placed in your S3 bucket.

If you don't set the OutputBucketName, Amazon Transcribe generates a pre-signed URL, a shareable URL that provides secure access to your transcription, and returns it in the TranscriptFileUri field. Use this URL to download the transcription.

" + "documentation":"

The location where the transcription is stored.

If you set the OutputBucketName, Amazon Transcribe puts the transcript in the specified S3 bucket. When you call the GetTranscriptionJob operation, the operation returns this location in the TranscriptFileUri field. If you enable content redaction, the redacted transcript appears in RedactedTranscriptFileUri. If you enable content redaction and choose to output an unredacted transcript, that transcript's location still appears in the TranscriptFileUri. The S3 bucket must have permissions that allow Amazon Transcribe to put files in the bucket. For more information, see Permissions Required for IAM User Roles.

You can specify an Amazon Web Services Key Management Service (KMS) key to encrypt the output of your transcription using the OutputEncryptionKMSKeyId parameter. If you don't specify a KMS key, Amazon Transcribe uses the default Amazon S3 key for server-side encryption of transcripts that are placed in your S3 bucket.

If you don't set the OutputBucketName, Amazon Transcribe generates a pre-signed URL, a shareable URL that provides secure access to your transcription, and returns it in the TranscriptFileUri field. Use this URL to download the transcription.

" }, "OutputKey":{ "shape":"OutputKey", @@ -1704,7 +2429,7 @@ }, "OutputEncryptionKMSKeyId":{ "shape":"KMSKeyId", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key used to encrypt the output of the transcription job. The user calling the StartTranscriptionJob operation must have permission to use the specified KMS key.

You can use either of the following to identify a KMS key in the current account:

  • KMS Key ID: \"1234abcd-12ab-34cd-56ef-1234567890ab\"

  • KMS Key Alias: \"alias/ExampleAlias\"

You can use either of the following to identify a KMS key in the current account or another account:

  • Amazon Resource Name (ARN) of a KMS Key: \"arn:aws:kms:region:account ID:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

  • ARN of a KMS Key Alias: \"arn:aws:kms:region:account ID:alias/ExampleAlias\"

If you don't specify an encryption key, the output of the transcription job is encrypted with the default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your output, you must also specify an output location in the OutputBucketName parameter.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Key Management Service (KMS) key used to encrypt the output of the transcription job. The user calling the StartTranscriptionJob operation must have permission to use the specified KMS key.

You can use either of the following to identify a KMS key in the current account:

  • KMS Key ID: \"1234abcd-12ab-34cd-56ef-1234567890ab\"

  • KMS Key Alias: \"alias/ExampleAlias\"

You can use either of the following to identify a KMS key in the current account or another account:

  • Amazon Resource Name (ARN) of a KMS Key: \"arn:aws:kms:region:account ID:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

  • ARN of a KMS Key Alias: \"arn:aws:kms:region:account ID:alias/ExampleAlias\"

If you don't specify an encryption key, the output of the transcription job is encrypted with the default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your output, you must also specify an output location in the OutputBucketName parameter.

" }, "Settings":{ "shape":"Settings", @@ -1728,7 +2453,7 @@ }, "LanguageOptions":{ "shape":"LanguageOptions", - "documentation":"

An object containing a list of languages that might be present in your collection of audio files. Automatic language identification chooses a language that best matches the source audio from that list.

" + "documentation":"

An object containing a list of languages that might be present in your collection of audio files. Automatic language identification chooses a language that best matches the source audio from that list.

To transcribe speech in Modern Standard Arabic (ar-SA), your audio or video file must be encoded at a sample rate of 16000 Hz or higher.

" } } }, @@ -1742,6 +2467,16 @@ } }, "String":{"type":"string"}, + "StringTargetList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "min":1 + }, + "TimestampMilliseconds":{ + "type":"long", + "max":14400000, + "min":0 + }, "Transcript":{ "type":"structure", "members":{ @@ -1756,6 +2491,44 @@ }, "documentation":"

Identifies the location of a transcription.

" }, + "TranscriptFilter":{ + "type":"structure", + "required":[ + "TranscriptFilterType", + "Targets" + ], + "members":{ + "TranscriptFilterType":{ + "shape":"TranscriptFilterType", + "documentation":"

Matches the phrase to the transcription output in a word for word fashion. For example, if you specify the phrase \"I want to speak to the manager.\" Amazon Transcribe attempts to match that specific phrase to the transcription.

" + }, + "AbsoluteTimeRange":{ + "shape":"AbsoluteTimeRange", + "documentation":"

A time range, set in seconds, between two points in the call.

" + }, + "RelativeTimeRange":{ + "shape":"RelativeTimeRange", + "documentation":"

An object that allows percentages to specify the proportion of the call where you would like to apply a filter. For example, you can specify the first half of the call. You can also specify the period of time between halfway through to three-quarters of the way through the call. Because the length of conversation can vary between calls, you can apply relative time ranges across all calls.

" + }, + "ParticipantRole":{ + "shape":"ParticipantRole", + "documentation":"

Determines whether the customer or the agent is speaking the phrases that you've specified.

" + }, + "Negate":{ + "shape":"Boolean", + "documentation":"

If TRUE, the rule that you specify is applied to everything except for the phrases that you specify.

" + }, + "Targets":{ + "shape":"StringTargetList", + "documentation":"

The phrases that you're specifying for the transcript filter to match.

" + } + }, + "documentation":"

Matches the output of the transcription to either the specific phrases that you specify, or the intent of the phrases that you specify.

" + }, + "TranscriptFilterType":{ + "type":"string", + "enum":["EXACT"] + }, "TranscriptionJob":{ "type":"structure", "members":{ @@ -1911,6 +2684,32 @@ "DICTATION" ] }, + "UpdateCallAnalyticsCategoryRequest":{ + "type":"structure", + "required":[ + "CategoryName", + "Rules" + ], + "members":{ + "CategoryName":{ + "shape":"CategoryName", + "documentation":"

The name of the analytics category to update. The name is case sensitive. If you try to update a call analytics category with the same name as a previous category you will receive a ConflictException error.

" + }, + "Rules":{ + "shape":"RuleList", + "documentation":"

The rules used for the updated analytics category. The rules that you provide in this field replace the ones that are currently being used.

" + } + } + }, + "UpdateCallAnalyticsCategoryResponse":{ + "type":"structure", + "members":{ + "CategoryProperties":{ + "shape":"CategoryProperties", + "documentation":"

The attributes describing the analytics category. You can see information such as the rules that you've used to update the category and when the category was originally created.

" + } + } + }, "UpdateMedicalVocabularyRequest":{ "type":"structure", "required":[ @@ -1928,7 +2727,7 @@ }, "VocabularyFileUri":{ "shape":"Uri", - "documentation":"

The location in Amazon S3 of the text file that contains the you use for your custom vocabulary. The URI must be in the same AWS Region as the resource that you are calling. The following is the format for a URI:

https://s3.<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

For example:

https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt

For more information about Amazon S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies in Amazon Transcribe Medical, see Medical Custom Vocabularies.

" + "documentation":"

The location in Amazon S3 of the text file that contains the you use for your custom vocabulary. The URI must be in the same Amazon Web Services Region as the resource that you are calling. The following is the format for a URI:

https://s3.<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

For example:

https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt

For more information about Amazon S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies in Amazon Transcribe Medical, see Medical Custom Vocabularies.

" } } }, @@ -2009,7 +2808,7 @@ }, "VocabularyFileUri":{ "shape":"Uri", - "documentation":"

The S3 location of the text file that contains the definition of the custom vocabulary. The URI must be in the same region as the API endpoint that you are calling. The general form is

For example:

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Custom Vocabularies.

" + "documentation":"

The S3 location of the text file that contains the definition of the custom vocabulary. The URI must be in the same region as the API endpoint that you are calling. The general form is

For example:

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Custom Vocabularies.

" } } }, diff --git a/services/transcribestreaming/pom.xml b/services/transcribestreaming/pom.xml index 374dba47a25e..b2b3eb17ec08 100644 --- a/services/transcribestreaming/pom.xml +++ b/services/transcribestreaming/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT transcribestreaming AWS Java SDK :: Services :: AWS Transcribe Streaming diff --git a/services/transfer/pom.xml b/services/transfer/pom.xml index abfda87fdc48..95e385c783c2 100644 --- a/services/transfer/pom.xml +++ b/services/transfer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT transfer AWS Java SDK :: Services :: Transfer diff --git a/services/translate/pom.xml b/services/translate/pom.xml index 8a2225163167..43e338ff3795 100644 --- a/services/translate/pom.xml +++ b/services/translate/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 translate diff --git a/services/waf/pom.xml b/services/waf/pom.xml index 368aced81926..d828eef391a3 100644 --- a/services/waf/pom.xml +++ b/services/waf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT waf AWS Java SDK :: Services :: AWS WAF diff --git a/services/wafv2/pom.xml b/services/wafv2/pom.xml index d5948adbe3f3..242fa64855c3 100644 --- a/services/wafv2/pom.xml +++ b/services/wafv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT wafv2 AWS Java SDK :: Services :: WAFV2 diff --git a/services/wafv2/src/main/resources/codegen-resources/service-2.json b/services/wafv2/src/main/resources/codegen-resources/service-2.json index 8c530a70f544..8c0e34a2de44 100644 --- a/services/wafv2/src/main/resources/codegen-resources/service-2.json +++ b/services/wafv2/src/main/resources/codegen-resources/service-2.json @@ -45,7 +45,8 @@ {"shape":"WAFLimitsExceededException"}, {"shape":"WAFInvalidResourceException"}, {"shape":"WAFUnavailableEntityException"}, - {"shape":"WAFSubscriptionNotFoundException"} + {"shape":"WAFSubscriptionNotFoundException"}, + {"shape":"WAFExpiredManagedRuleGroupVersionException"} ], "documentation":"

Returns the web ACL capacity unit (WCU) requirements for a specified scope and set of rules. You can use this to check the capacity requirements for the rules you want to use in a RuleGroup or WebACL.

WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.

" }, @@ -278,7 +279,8 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFInvalidResourceException"}, {"shape":"WAFNonexistentItemException"}, - {"shape":"WAFInvalidOperationException"} + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFExpiredManagedRuleGroupVersionException"} ], "documentation":"

Provides high-level information for a managed rule group, including descriptions of the rules.

" }, @@ -330,6 +332,22 @@ ], "documentation":"

Returns the LoggingConfiguration for the specified web ACL.

" }, + "GetManagedRuleSet":{ + "name":"GetManagedRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetManagedRuleSetRequest"}, + "output":{"shape":"GetManagedRuleSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

Retrieves the specified managed rule set.

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

" + }, "GetPermissionPolicy":{ "name":"GetPermissionPolicy", "http":{ @@ -441,6 +459,21 @@ ], "documentation":"

Retrieves the WebACL for the specified resource.

" }, + "ListAvailableManagedRuleGroupVersions":{ + "name":"ListAvailableManagedRuleGroupVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAvailableManagedRuleGroupVersionsRequest"}, + "output":{"shape":"ListAvailableManagedRuleGroupVersionsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

Returns a list of the available versions for the specified managed rule group.

" + }, "ListAvailableManagedRuleGroups":{ "name":"ListAvailableManagedRuleGroups", "http":{ @@ -454,7 +487,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

Retrieves an array of managed rule groups that are available for you to use. This list includes all Amazon Web Services Managed Rules rule groups and the Marketplace managed rule groups that you're subscribed to.

" + "documentation":"

Retrieves an array of managed rule groups that are available for you to use. This list includes all Amazon Web Services Managed Rules rule groups and all of the Marketplace managed rule groups that you're subscribed to.

" }, "ListIPSets":{ "name":"ListIPSets", @@ -486,6 +519,21 @@ ], "documentation":"

Retrieves an array of your LoggingConfiguration objects.

" }, + "ListManagedRuleSets":{ + "name":"ListManagedRuleSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListManagedRuleSetsRequest"}, + "output":{"shape":"ListManagedRuleSetsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

Retrieves the managed rule sets that you own.

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

" + }, "ListRegexPatternSets":{ "name":"ListRegexPatternSets", "http":{ @@ -584,6 +632,23 @@ ], "documentation":"

Enables the specified LoggingConfiguration, to start logging from a web ACL, according to the configuration provided.

You can access information about all traffic that WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose.

    Create the data firehose with a PUT source and in the Region that you are operating. If you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Give the data firehose a name that starts with the prefix aws-waf-logs-. For example, aws-waf-logs-us-east-2-analytics.

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the WAF Developer Guide.

This operation completely replaces the mutable specifications that you already have for the logging configuration with the ones that you provide to this call. To modify the logging configuration, retrieve it by calling GetLoggingConfiguration, update the settings as needed, and then provide the complete logging configuration specification to this call.

" }, + "PutManagedRuleSetVersions":{ + "name":"PutManagedRuleSetVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutManagedRuleSetVersionsRequest"}, + "output":{"shape":"PutManagedRuleSetVersionsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

Defines the versions of your managed rule set that you are offering to the customers. Customers see your offerings as managed rule groups with versioning.

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

Customers retrieve their managed rule group list by calling ListAvailableManagedRuleGroups. The name that you provide here for your managed rule set is the name the customer sees for the corresponding managed rule group. Customers can retrieve the available versions for a managed rule group by calling ListAvailableManagedRuleGroupVersions. You provide a rule group specification for each version. For each managed rule set, you must specify a version that you recommend using.

To initiate the expiration of a managed rule group version, use UpdateManagedRuleSetVersionExpiryDate.

" + }, "PutPermissionPolicy":{ "name":"PutPermissionPolicy", "http":{ @@ -656,6 +721,23 @@ ], "documentation":"

Updates the specified IPSet.

This operation completely replaces the mutable specifications that you already have for the IP set with the ones that you provide to this call. To modify the IP set, retrieve it by calling GetIPSet, update the settings as needed, and then provide the complete IP set specification to this call.

" }, + "UpdateManagedRuleSetVersionExpiryDate":{ + "name":"UpdateManagedRuleSetVersionExpiryDate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateManagedRuleSetVersionExpiryDateRequest"}, + "output":{"shape":"UpdateManagedRuleSetVersionExpiryDateResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

Updates the expiration information for your managed rule set. Use this to initiate the expiration of a managed rule group version. After you initiate expiration for a version, WAF excludes it from the reponse to ListAvailableManagedRuleGroupVersions for the managed rule group.

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

" + }, "UpdateRegexPatternSet":{ "name":"UpdateRegexPatternSet", "http":{ @@ -714,7 +796,8 @@ {"shape":"WAFInvalidResourceException"}, {"shape":"WAFUnavailableEntityException"}, {"shape":"WAFSubscriptionNotFoundException"}, - {"shape":"WAFInvalidOperationException"} + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFExpiredManagedRuleGroupVersionException"} ], "documentation":"

Updates the specified WebACL.

This operation completely replaces the mutable specifications that you already have for the web ACL with the ones that you provide to this call. To modify the web ACL, retrieve it by calling GetWebACL, update the settings as needed, and then provide the complete web ACL specification to this call.

A web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resources can be an Amazon CloudFront distribution, an Amazon API Gateway REST API, an Application Load Balancer, or an AppSync GraphQL API.

" } @@ -1205,7 +1288,7 @@ }, "IPAddressVersion":{ "shape":"IPAddressVersion", - "documentation":"

Specify IPV4 or IPV6.

" + "documentation":"

The version of the IP addresses, either IPV4 or IPV6.

" }, "Addresses":{ "shape":"IPAddresses", @@ -1485,7 +1568,7 @@ }, "WebACLLockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -1494,7 +1577,7 @@ "members":{ "NextWebACLLockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -1521,7 +1604,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -1583,7 +1666,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -1615,7 +1698,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -1647,7 +1730,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -1675,12 +1758,24 @@ "Scope":{ "shape":"Scope", "documentation":"

Specifies whether this is for an Amazon CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, or an AppSync GraphQL API.

To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

  • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

  • API and SDKs - For all calls, use the Region endpoint us-east-1.

" + }, + "VersionName":{ + "shape":"VersionKeyString", + "documentation":"

The version of the rule group. You can only use a version that is not scheduled for expiration. If you don't provide this, WAF uses the vendor's default version.

" } } }, "DescribeManagedRuleGroupResponse":{ "type":"structure", "members":{ + "VersionName":{ + "shape":"VersionKeyString", + "documentation":"

The managed rule group's version.

" + }, + "SnsTopicArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon resource name (ARN) of the Amazon Simple Notification Service SNS topic that's used to record changes to the managed rule group. You can subscribe to the SNS topic to receive notifications when the managed rule group is modified, such as for new versions and for version expiration. For more information, see the Amazon Simple Notification Service Developer Guide.

" + }, "Capacity":{ "shape":"CapacityUnit", "documentation":"

The web ACL capacity units (WCUs) required for this rule group. WAF uses web ACL capacity units (WCU) to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. WAF calculates capacity differently for each rule type, to reflect each rule's relative cost. Rule group capacity is fixed at creation, so users can plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.

" @@ -1974,7 +2069,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -1997,6 +2092,41 @@ } } }, + "GetManagedRuleSetRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

The name of the managed rule set. You use this, along with the rule set ID, to identify the rule set.

This name is assigned to the corresponding managed rule group, which your customers can access and use.

" + }, + "Scope":{ + "shape":"Scope", + "documentation":"

Specifies whether this is for an Amazon CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, or an AppSync GraphQL API.

To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

  • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

  • API and SDKs - For all calls, use the Region endpoint us-east-1.

" + }, + "Id":{ + "shape":"EntityId", + "documentation":"

A unique identifier for the managed rule set. The ID is returned in the responses to commands like list. You provide it to operations like get and update.

" + } + } + }, + "GetManagedRuleSetResponse":{ + "type":"structure", + "members":{ + "ManagedRuleSet":{ + "shape":"ManagedRuleSet", + "documentation":"

The managed rule set that you requested.

" + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + } + } + }, "GetPermissionPolicyRequest":{ "type":"structure", "required":["ResourceArn"], @@ -2087,7 +2217,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -2121,7 +2251,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -2224,7 +2354,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -2325,7 +2455,7 @@ }, "IPAddressVersion":{ "shape":"IPAddressVersion", - "documentation":"

Specify IPV4 or IPV6.

" + "documentation":"

The version of the IP addresses, either IPV4 or IPV6.

" }, "Addresses":{ "shape":"IPAddresses", @@ -2393,7 +2523,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" }, "ARN":{ "shape":"ResourceArn", @@ -2420,7 +2550,7 @@ }, "InvalidFallbackBehavior":{ "shape":"BodyParsingFallbackBehavior", - "documentation":"

What WAF should do if it fails to completely parse the JSON body. The options are the following:

  • EVALUATE_AS_STRING - Inspect the body as plain text. WAF applies the text transformations and inspection criteria that you defined for the JSON inspection to the body text string.

  • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

  • NO_MATCH - Treat the web request as not matching the rule statement.

If you don't provide this setting, WAF parses and evaluates the content only up to the first parsing failure that it encounters.

WAF does its best to parse the entire JSON body, but might be forced to stop for reasons such as characters that aren't valid, duplicate keys, truncation, and any content whose root node isn't an object or an array.

WAF parses the JSON in the following examples as two valid key, value pairs:

  • Missing comma: {\"key1\":\"value1\"\"key2\":\"value2\"}

  • Missing colon: {\"key1\":\"value1\",\"key2\"\"value2\"}

  • Extra colons: {\"key1\"::\"value1\",\"key2\"\"value2\"}

" + "documentation":"

What WAF should do if it fails to completely parse the JSON body. The options are the following:

  • EVALUATE_AS_STRING - Inspect the body as plain text. WAF applies the text transformations and inspection criteria that you defined for the JSON inspection to the body text string.

  • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

  • NO_MATCH - Treat the web request as not matching the rule statement.

If you don't provide this setting, WAF parses and evaluates the content only up to the first parsing failure that it encounters.

WAF does its best to parse the entire JSON body, but might be forced to stop for reasons such as invalid characters, duplicate keys, truncation, and any content whose root node isn't an object or an array.

WAF parses the JSON in the following examples as two valid key, value pairs:

  • Missing comma: {\"key1\":\"value1\"\"key2\":\"value2\"}

  • Missing colon: {\"key1\":\"value1\",\"key2\"\"value2\"}

  • Extra colons: {\"key1\"::\"value1\",\"key2\"\"value2\"}

" } }, "documentation":"

The body of a web request, inspected as JSON. The body immediately follows the request headers. This is used in the FieldToMatch specification.

Use the specifications in this object to indicate which parts of the JSON body to inspect using the rule's inspection criteria. WAF inspects only the parts of the JSON that result from the matches that you indicate.

" @@ -2535,6 +2665,49 @@ "type":"list", "member":{"shape":"Label"} }, + "ListAvailableManagedRuleGroupVersionsRequest":{ + "type":"structure", + "required":[ + "VendorName", + "Name", + "Scope" + ], + "members":{ + "VendorName":{ + "shape":"VendorName", + "documentation":"

The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.

" + }, + "Name":{ + "shape":"EntityName", + "documentation":"

The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.

" + }, + "Scope":{ + "shape":"Scope", + "documentation":"

Specifies whether this is for an Amazon CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, or an AppSync GraphQL API.

To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

  • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

  • API and SDKs - For all calls, use the Region endpoint us-east-1.

" + }, + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

" + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

The maximum number of objects that you want WAF to return for this request. If more objects are available, in the response, WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.

" + } + } + }, + "ListAvailableManagedRuleGroupVersionsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

" + }, + "Versions":{ + "shape":"ManagedRuleGroupVersions", + "documentation":"

The versions that are currently available for the specified managed rule group.

" + } + } + }, "ListAvailableManagedRuleGroupsRequest":{ "type":"structure", "required":["Scope"], @@ -2627,6 +2800,37 @@ } } }, + "ListManagedRuleSetsRequest":{ + "type":"structure", + "required":["Scope"], + "members":{ + "Scope":{ + "shape":"Scope", + "documentation":"

Specifies whether this is for an Amazon CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, or an AppSync GraphQL API.

To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

  • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

  • API and SDKs - For all calls, use the Region endpoint us-east-1.

" + }, + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

" + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

The maximum number of objects that you want WAF to return for this request. If more objects are available, in the response, WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.

" + } + } + }, + "ListManagedRuleSetsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

" + }, + "ManagedRuleSets":{ + "shape":"ManagedRuleSetSummaries", + "documentation":"

Your managed rule sets.

" + } + } + }, "ListMaxItems":{ "type":"long", "max":500, @@ -2858,6 +3062,10 @@ "shape":"EntityName", "documentation":"

The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.

" }, + "Version":{ + "shape":"VersionKeyString", + "documentation":"

The version of the managed rule group to use. If you specify this, the version setting is fixed until you change it. If you don't specify this, WAF uses the vendor's default version, and then keeps the version at the vendor's default when the vendor updates the managed rule group settings.

" + }, "ExcludedRules":{ "shape":"ExcludedRules", "documentation":"

The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.

" @@ -2891,6 +3099,127 @@ }, "documentation":"

High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups. This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include Amazon Web Services Managed Rules rule groups, which are free of charge to WAF customers, and Marketplace managed rule groups, which you can subscribe to through Marketplace.

" }, + "ManagedRuleGroupVersion":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"VersionKeyString", + "documentation":"

The version name.

" + }, + "LastUpdateTimestamp":{ + "shape":"Timestamp", + "documentation":"

The date and time that the managed rule group owner updated the rule group version information.

" + } + }, + "documentation":"

Describes a single version of a managed rule group.

" + }, + "ManagedRuleGroupVersions":{ + "type":"list", + "member":{"shape":"ManagedRuleGroupVersion"} + }, + "ManagedRuleSet":{ + "type":"structure", + "required":[ + "Name", + "Id", + "ARN" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

The name of the managed rule set. You use this, along with the rule set ID, to identify the rule set.

This name is assigned to the corresponding managed rule group, which your customers can access and use.

" + }, + "Id":{ + "shape":"EntityId", + "documentation":"

A unique identifier for the managed rule set. The ID is returned in the responses to commands like list. You provide it to operations like get and update.

" + }, + "ARN":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the entity.

" + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

A description of the set that helps with identification.

" + }, + "PublishedVersions":{ + "shape":"PublishedVersions", + "documentation":"

The versions of this managed rule set that are available for use by customers.

" + }, + "RecommendedVersion":{ + "shape":"VersionKeyString", + "documentation":"

The version that you would like your customers to use.

" + }, + "LabelNamespace":{ + "shape":"LabelName", + "documentation":"

The label namespace prefix for the managed rule groups that are offered to customers from this managed rule set. All labels that are added by rules in the managed rule group have this prefix.

  • The syntax for the label namespace prefix for a managed rule group is the following:

    awswaf:managed:<vendor>:<rule group name>:

  • When a rule with a label matches a web request, WAF adds the fully qualified label to the request. A fully qualified label is made up of the label namespace from the rule group or web ACL where the rule is defined and the label from the rule, separated by a colon:

    <label namespace>:<label from rule>

" + } + }, + "documentation":"

A set of rules that is managed by Amazon Web Services and Marketplace sellers to provide versioned managed rule groups for customers of WAF.

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

" + }, + "ManagedRuleSetSummaries":{ + "type":"list", + "member":{"shape":"ManagedRuleSetSummary"} + }, + "ManagedRuleSetSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

The name of the managed rule set. You use this, along with the rule set ID, to identify the rule set.

This name is assigned to the corresponding managed rule group, which your customers can access and use.

" + }, + "Id":{ + "shape":"EntityId", + "documentation":"

A unique identifier for the managed rule set. The ID is returned in the responses to commands like list. You provide it to operations like get and update.

" + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

A description of the set that helps with identification.

" + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + }, + "ARN":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the entity.

" + }, + "LabelNamespace":{ + "shape":"LabelName", + "documentation":"

The label namespace prefix for the managed rule groups that are offered to customers from this managed rule set. All labels that are added by rules in the managed rule group have this prefix.

  • The syntax for the label namespace prefix for a managed rule group is the following:

    awswaf:managed:<vendor>:<rule group name>:

  • When a rule with a label matches a web request, WAF adds the fully qualified label to the request. A fully qualified label is made up of the label namespace from the rule group or web ACL where the rule is defined and the label from the rule, separated by a colon:

    <label namespace>:<label from rule>

" + } + }, + "documentation":"

High-level information for a managed rule set.

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

" + }, + "ManagedRuleSetVersion":{ + "type":"structure", + "members":{ + "AssociatedRuleGroupArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the vendor rule group that's used to define the published version of your managed rule group.

" + }, + "Capacity":{ + "shape":"CapacityUnit", + "documentation":"

The web ACL capacity units (WCUs) required for this rule group.

WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.

" + }, + "ForecastedLifetime":{ + "shape":"TimeWindowDay", + "documentation":"

The amount of time you expect this version of your managed rule group to last, in days.

" + }, + "PublishTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time that you first published this version.

Times are in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\".

" + }, + "LastUpdateTimestamp":{ + "shape":"Timestamp", + "documentation":"

The last time that you updated this version.

Times are in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\".

" + }, + "ExpiryTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time that this version is set to expire.

Times are in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\".

" + } + }, + "documentation":"

Information for a single version of a managed rule set.

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

" + }, "Method":{ "type":"structure", "members":{ @@ -3035,6 +3364,11 @@ "CONTAINS_WORD" ] }, + "PublishedVersions":{ + "type":"map", + "key":{"shape":"VersionKeyString"}, + "value":{"shape":"ManagedRuleSetVersion"} + }, "PutLoggingConfigurationRequest":{ "type":"structure", "required":["LoggingConfiguration"], @@ -3054,6 +3388,50 @@ } } }, + "PutManagedRuleSetVersionsRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id", + "LockToken" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

The name of the managed rule set. You use this, along with the rule set ID, to identify the rule set.

This name is assigned to the corresponding managed rule group, which your customers can access and use.

" + }, + "Scope":{ + "shape":"Scope", + "documentation":"

Specifies whether this is for an Amazon CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, or an AppSync GraphQL API.

To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

  • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

  • API and SDKs - For all calls, use the Region endpoint us-east-1.

" + }, + "Id":{ + "shape":"EntityId", + "documentation":"

A unique identifier for the managed rule set. The ID is returned in the responses to commands like list. You provide it to operations like get and update.

" + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + }, + "RecommendedVersion":{ + "shape":"VersionKeyString", + "documentation":"

The version of the named managed rule group that you'd like your customers to choose, from among your version offerings.

" + }, + "VersionsToPublish":{ + "shape":"VersionsToPublish", + "documentation":"

The versions of the named managed rule group that you want to offer to your customers.

" + } + } + }, + "PutManagedRuleSetVersionsResponse":{ + "type":"structure", + "members":{ + "NextLockToken":{ + "shape":"LockToken", + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + } + } + }, "PutPermissionPolicyRequest":{ "type":"structure", "required":[ @@ -3118,7 +3496,10 @@ "RateBasedStatementManagedKeysIPSet":{ "type":"structure", "members":{ - "IPAddressVersion":{"shape":"IPAddressVersion"}, + "IPAddressVersion":{ + "shape":"IPAddressVersion", + "documentation":"

The version of the IP addresses, either IPV4 or IPV6.

" + }, "Addresses":{ "shape":"IPAddresses", "documentation":"

The IP addresses that are currently blocked.

" @@ -3216,7 +3597,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" }, "ARN":{ "shape":"ResourceArn", @@ -3423,7 +3804,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" }, "ARN":{ "shape":"ResourceArn", @@ -3744,7 +4125,7 @@ }, "Type":{ "shape":"TextTransformationType", - "documentation":"

You can specify the following transformation types:

BASE64_DECODE - Decode a Base64-encoded string.

BASE64_DECODE_EXT - Decode a Base64-encoded string, but use a forgiving implementation that ignores characters that aren't valid.

CMD_LINE - Command-line transformations. These are helpful in reducing effectiveness of attackers who inject an operating system command-line command and use unusual formatting to disguise some or all of the command.

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE - Replace these characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • Non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

CSS_DECODE - Decode characters that were encoded using CSS 2.x escape rules syndata.html#characters. This function uses up to two bytes in the decoding process, so it can help to uncover ASCII characters that were encoded using CSS encoding that wouldn’t typically be encoded. It's also useful in countering evasion, which is a combination of a backslash and non-hexadecimal characters. For example, ja\\vascript for javascript.

ESCAPE_SEQ_DECODE - Decode the following ANSI C escape sequences: \\a, \\b, \\f, \\n, \\r, \\t, \\v, \\\\, \\?, \\', \\\", \\xHH (hexadecimal), \\0OOO (octal). Encodings that aren't valid remain in the output.

HEX_DECODE - Decode a string of hexadecimal characters into a binary.

HTML_ENTITY_DECODE - Replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs these operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

JS_DECODE - Decode JavaScript escape sequences. If a \\ u HHHH code is in the full-width ASCII code range of FF01-FF5E, then the higher byte is used to detect and adjust the lower byte. If not, only the lower byte is used and the higher byte is zeroed, causing a possible loss of information.

LOWERCASE - Convert uppercase letters (A-Z) to lowercase (a-z).

MD5 - Calculate an MD5 hash from the data in the input. The computed hash is in a raw binary form.

NONE - Specify NONE if you don't want any text transformations.

NORMALIZE_PATH - Remove multiple slashes, directory self-references, and directory back-references that are not at the beginning of the input from an input string.

NORMALIZE_PATH_WIN - This is the same as NORMALIZE_PATH, but first converts backslash characters to forward slashes.

REMOVE_NULLS - Remove all NULL bytes from the input.

REPLACE_COMMENTS - Replace each occurrence of a C-style comment (/* ... */) with a single space. Multiple consecutive occurrences are not compressed. Unterminated comments are also replaced with a space (ASCII 0x20). However, a standalone termination of a comment (*/) is not acted upon.

REPLACE_NULLS - Replace NULL bytes in the input with space characters (ASCII 0x20).

SQL_HEX_DECODE - Decode the following ANSI C escape sequences: \\a, \\b, \\f, \\n, \\r, \\t, \\v, \\\\, \\?, \\', \\\", \\xHH (hexadecimal), \\0OOO (octal). Encodings that aren't valid remain in the output.

URL_DECODE - Decode a URL-encoded value.

URL_DECODE_UNI - Like URL_DECODE, but with support for Microsoft-specific %u encoding. If the code is in the full-width ASCII code range of FF01-FF5E, the higher byte is used to detect and adjust the lower byte. Otherwise, only the lower byte is used and the higher byte is zeroed.

UTF8_TO_UNICODE - Convert all UTF-8 character sequences to Unicode. This helps input normalization, and minimizing false-positives and false-negatives for non-English languages.

" + "documentation":"

You can specify the following transformation types:

BASE64_DECODE - Decode a Base64-encoded string.

BASE64_DECODE_EXT - Decode a Base64-encoded string, but use a forgiving implementation that ignores characters that aren't valid.

CMD_LINE - Command-line transformations. These are helpful in reducing effectiveness of attackers who inject an operating system command-line command and use unusual formatting to disguise some or all of the command.

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE - Replace these characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • Non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

CSS_DECODE - Decode characters that were encoded using CSS 2.x escape rules syndata.html#characters. This function uses up to two bytes in the decoding process, so it can help to uncover ASCII characters that were encoded using CSS encoding that wouldn’t typically be encoded. It's also useful in countering evasion, which is a combination of a backslash and non-hexadecimal characters. For example, ja\\vascript for javascript.

ESCAPE_SEQ_DECODE - Decode the following ANSI C escape sequences: \\a, \\b, \\f, \\n, \\r, \\t, \\v, \\\\, \\?, \\', \\\", \\xHH (hexadecimal), \\0OOO (octal). Encodings that aren't valid remain in the output.

HEX_DECODE - Decode a string of hexadecimal characters into a binary.

HTML_ENTITY_DECODE - Replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs these operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

JS_DECODE - Decode JavaScript escape sequences. If a \\ u HHHH code is in the full-width ASCII code range of FF01-FF5E, then the higher byte is used to detect and adjust the lower byte. If not, only the lower byte is used and the higher byte is zeroed, causing a possible loss of information.

LOWERCASE - Convert uppercase letters (A-Z) to lowercase (a-z).

MD5 - Calculate an MD5 hash from the data in the input. The computed hash is in a raw binary form.

NONE - Specify NONE if you don't want any text transformations.

NORMALIZE_PATH - Remove multiple slashes, directory self-references, and directory back-references that are not at the beginning of the input from an input string.

NORMALIZE_PATH_WIN - This is the same as NORMALIZE_PATH, but first converts backslash characters to forward slashes.

REMOVE_NULLS - Remove all NULL bytes from the input.

REPLACE_COMMENTS - Replace each occurrence of a C-style comment (/* ... */) with a single space. Multiple consecutive occurrences are not compressed. Unterminated comments are also replaced with a space (ASCII 0x20). However, a standalone termination of a comment (*/) is not acted upon.

REPLACE_NULLS - Replace NULL bytes in the input with space characters (ASCII 0x20).

SQL_HEX_DECODE - Decode SQL hex data. Example (0x414243) will be decoded to (ABC).

URL_DECODE - Decode a URL-encoded value.

URL_DECODE_UNI - Like URL_DECODE, but with support for Microsoft-specific %u encoding. If the code is in the full-width ASCII code range of FF01-FF5E, the higher byte is used to detect and adjust the lower byte. Otherwise, only the lower byte is used and the higher byte is zeroed.

UTF8_TO_UNICODE - Convert all UTF-8 character sequences to Unicode. This helps input normalization, and minimizing false-positives and false-negatives for non-English languages.

" } }, "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.

" @@ -3802,6 +4183,10 @@ }, "documentation":"

In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want WAF to return a sample of web requests.

You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which WAF actually returned a sample of web requests. WAF gets the specified number of requests from among the first 5,000 requests that your Amazon Web Services resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, WAF stops sampling after the 5,000th request. In that case, EndTime is the time that WAF received the 5,000th request.

" }, + "TimeWindowDay":{ + "type":"integer", + "min":1 + }, "Timestamp":{"type":"timestamp"}, "URIString":{"type":"string"}, "UntagResourceRequest":{ @@ -3858,7 +4243,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -3867,7 +4252,61 @@ "members":{ "NextLockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken.

" + "documentation":"

A token used for optimistic locking. WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken.

" + } + } + }, + "UpdateManagedRuleSetVersionExpiryDateRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id", + "LockToken", + "VersionToExpire", + "ExpiryTimestamp" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

The name of the managed rule set. You use this, along with the rule set ID, to identify the rule set.

This name is assigned to the corresponding managed rule group, which your customers can access and use.

" + }, + "Scope":{ + "shape":"Scope", + "documentation":"

Specifies whether this is for an Amazon CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, or an AppSync GraphQL API.

To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

  • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

  • API and SDKs - For all calls, use the Region endpoint us-east-1.

" + }, + "Id":{ + "shape":"EntityId", + "documentation":"

A unique identifier for the managed rule set. The ID is returned in the responses to commands like list. You provide it to operations like get and update.

" + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + }, + "VersionToExpire":{ + "shape":"VersionKeyString", + "documentation":"

The version that you want to remove from your list of offerings for the named managed rule group.

" + }, + "ExpiryTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time that you want the version to expire.

Times are in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\".

" + } + } + }, + "UpdateManagedRuleSetVersionExpiryDateResponse":{ + "type":"structure", + "members":{ + "ExpiringVersion":{ + "shape":"VersionKeyString", + "documentation":"

The version that is set to expire.

" + }, + "ExpiryTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time that the version will expire.

Times are in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\".

" + }, + "NextLockToken":{ + "shape":"LockToken", + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -3903,7 +4342,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" } } }, @@ -3912,7 +4351,7 @@ "members":{ "NextLockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken.

" + "documentation":"

A token used for optimistic locking. WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken.

" } } }, @@ -3952,7 +4391,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" }, "CustomResponseBodies":{ "shape":"CustomResponseBodies", @@ -3965,7 +4404,7 @@ "members":{ "NextLockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken.

" + "documentation":"

A token used for optimistic locking. WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken.

" } } }, @@ -4010,7 +4449,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" }, "CustomResponseBodies":{ "shape":"CustomResponseBodies", @@ -4023,7 +4462,7 @@ "members":{ "NextLockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken.

" + "documentation":"

A token used for optimistic locking. WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken.

" } } }, @@ -4039,6 +4478,31 @@ "min":1, "pattern":".*\\S.*" }, + "VersionKeyString":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[\\w#:\\.\\-/]+$" + }, + "VersionToPublish":{ + "type":"structure", + "members":{ + "AssociatedRuleGroupArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the vendor's rule group that's used in the published managed rule group version.

" + }, + "ForecastedLifetime":{ + "shape":"TimeWindowDay", + "documentation":"

The amount of time the vendor expects this version of the managed rule group to last, in days.

" + } + }, + "documentation":"

A version of the named managed rule group, that the rule group's vendor publishes for use by customers.

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

" + }, + "VersionsToPublish":{ + "type":"map", + "key":{"shape":"VersionKeyString"}, + "value":{"shape":"VersionToPublish"} + }, "VisibilityConfig":{ "type":"structure", "required":[ @@ -4078,6 +4542,14 @@ "documentation":"

WAF couldn’t perform the operation because the resource that you tried to save is a duplicate of an existing one.

", "exception":true }, + "WAFExpiredManagedRuleGroupVersionException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The operation failed because the specified version for the managed rule group has expired. You can retrieve the available versions for the managed rule group by calling ListAvailableManagedRuleGroupVersions.

", + "exception":true + }, "WAFInternalErrorException":{ "type":"structure", "members":{ @@ -4099,9 +4571,18 @@ "type":"structure", "members":{ "message":{"shape":"ErrorMessage"}, - "Field":{"shape":"ParameterExceptionField"}, - "Parameter":{"shape":"ParameterExceptionParameter"}, - "Reason":{"shape":"ErrorReason"} + "Field":{ + "shape":"ParameterExceptionField", + "documentation":"

The settings where the invalid parameter was found.

" + }, + "Parameter":{ + "shape":"ParameterExceptionParameter", + "documentation":"

The invalid parameter that resulted in the exception.

" + }, + "Reason":{ + "shape":"ErrorReason", + "documentation":"

Additional information about the exception.

" + } }, "documentation":"

The operation failed because WAF didn't recognize a parameter in the request. For example:

  • You specified a parameter name or value that isn't valid.

  • Your nested statement isn't valid. You might have tried to nest a statement that can’t be nested.

  • You tried to update a WebACL with a DefaultAction that isn't among the types available at DefaultAction.

  • Your request references an ARN that is malformed, or corresponds to a resource with which a web ACL can't be associated.

", "exception":true @@ -4127,7 +4608,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

WAF couldn’t perform the operation because you exceeded your resource limit. For example, the maximum number of WebACL objects that you can create for an account. For more information, see Limits in the WAF Developer Guide.

", + "documentation":"

WAF couldn’t perform the operation because you exceeded your resource limit. For example, the maximum number of WebACL objects that you can create for an Amazon Web Services account. For more information, see WAF quotas in the WAF Developer Guide.

", "exception":true }, "WAFNonexistentItemException":{ @@ -4273,7 +4754,7 @@ }, "LockToken":{ "shape":"LockToken", - "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" + "documentation":"

A token used for optimistic locking. WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

" }, "ARN":{ "shape":"ResourceArn", @@ -4301,5 +4782,5 @@ "documentation":"

A rule statement that defines a cross-site scripting (XSS) match search for WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want WAF to search and text transformations to use on the search area before WAF searches for character sequences that are likely to be malicious strings.

" } }, - "documentation":"WAF

This is the latest version of the WAF API, released in November, 2019. The names of the entities that you use to access this API, like endpoints and namespaces, all have the versioning information added, like \"V2\" or \"v2\", to distinguish from the prior version. We recommend migrating your resources to this version, because it has a number of significant improvements.

If you used WAF prior to this release, you can't use this WAFV2 API to access any WAF resources that you created before. You can access your old rules, web ACLs, and other WAF resources only through the WAF Classic APIs. The WAF Classic APIs have retained the prior names, endpoints, and namespaces.

For information, including how to migrate your WAF resources to this version, see the WAF Developer Guide.

WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AppSync GraphQL API. WAF also lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, the Amazon API Gateway REST API, CloudFront distribution, the Application Load Balancer, or the AppSync GraphQL API responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You also can configure CloudFront to return a custom error page when a request is blocked.

This API guide is for developers who need detailed information about WAF API actions, data types, and errors. For detailed information about WAF features and an overview of how to use WAF, see the WAF Developer Guide.

You can make calls using the endpoints listed in Amazon Web Services Service Endpoints for WAF.

  • For regional applications, you can use any of the endpoints in the list. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, or an AppSync GraphQL API.

  • For Amazon CloudFront applications, you must use the API endpoint listed for US East (N. Virginia): us-east-1.

Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs.

We currently provide two versions of the WAF API: this API and the prior versions, the classic WAF APIs. This new API provides the same functionality as the older versions, with the following major improvements:

  • You use one API for both global and regional applications. Where you need to distinguish the scope, you specify a Scope parameter and set it to CLOUDFRONT or REGIONAL.

  • You can define a web ACL or rule group with a single call, and update it with a single call. You define all rule specifications in JSON format, and pass them to your rule group or web ACL calls.

  • The limits WAF places on the use of rules more closely reflects the cost of running each type of rule. Rule groups include capacity settings, so you know the maximum cost of a rule group when you use it.

" + "documentation":"WAF

This is the latest version of the WAF API, released in November, 2019. The names of the entities that you use to access this API, like endpoints and namespaces, all have the versioning information added, like \"V2\" or \"v2\", to distinguish from the prior version. We recommend migrating your resources to this version, because it has a number of significant improvements.

If you used WAF prior to this release, you can't use this WAFV2 API to access any WAF resources that you created before. You can access your old rules, web ACLs, and other WAF resources only through the WAF Classic APIs. The WAF Classic APIs have retained the prior names, endpoints, and namespaces.

For information, including how to migrate your WAF resources to this version, see the WAF Developer Guide.

WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AppSync GraphQL API. WAF also lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, the Amazon API Gateway REST API, CloudFront distribution, the Application Load Balancer, or the AppSync GraphQL API responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You also can configure CloudFront to return a custom error page when a request is blocked.

This API guide is for developers who need detailed information about WAF API actions, data types, and errors. For detailed information about WAF features and an overview of how to use WAF, see the WAF Developer Guide.

You can make calls using the endpoints listed in WAF endpoints and quotas.

  • For regional applications, you can use any of the endpoints in the list. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, or an AppSync GraphQL API.

  • For Amazon CloudFront applications, you must use the API endpoint listed for US East (N. Virginia): us-east-1.

Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs.

We currently provide two versions of the WAF API: this API and the prior versions, the classic WAF APIs. This new API provides the same functionality as the older versions, with the following major improvements:

  • You use one API for both global and regional applications. Where you need to distinguish the scope, you specify a Scope parameter and set it to CLOUDFRONT or REGIONAL.

  • You can define a web ACL or rule group with a single call, and update it with a single call. You define all rule specifications in JSON format, and pass them to your rule group or web ACL calls.

  • The limits WAF places on the use of rules more closely reflects the cost of running each type of rule. Rule groups include capacity settings, so you know the maximum cost of a rule group when you use it.

" } diff --git a/services/wellarchitected/pom.xml b/services/wellarchitected/pom.xml index b45b22386015..5a39f7de31cf 100644 --- a/services/wellarchitected/pom.xml +++ b/services/wellarchitected/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT wellarchitected AWS Java SDK :: Services :: Well Architected diff --git a/services/wellarchitected/src/main/resources/codegen-resources/service-2.json b/services/wellarchitected/src/main/resources/codegen-resources/service-2.json index 85712a93cedb..30b24e1ad156 100644 --- a/services/wellarchitected/src/main/resources/codegen-resources/service-2.json +++ b/services/wellarchitected/src/main/resources/codegen-resources/service-2.json @@ -152,7 +152,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Get lens review.

" + "documentation":"

Get the answer to a specific question in a workload review.

" }, "GetLensReview":{ "name":"GetLensReview", @@ -428,7 +428,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes specified tags from a resource.

" + "documentation":"

Deletes specified tags from a resource.

To specify multiple tags, use separate tagKeys parameters, for example:

DELETE /tags/WorkloadArn?tagKeys=key1&tagKeys=key2

" }, "UpdateAnswer":{ "name":"UpdateAnswer", @@ -560,12 +560,30 @@ "HelpfulResourceUrl":{"shape":"HelpfulResourceUrl"}, "Choices":{"shape":"Choices"}, "SelectedChoices":{"shape":"SelectedChoices"}, + "ChoiceAnswers":{ + "shape":"ChoiceAnswers", + "documentation":"

A list of selected choices to a question in your workload.

" + }, "IsApplicable":{"shape":"IsApplicable"}, "Risk":{"shape":"Risk"}, - "Notes":{"shape":"Notes"} + "Notes":{"shape":"Notes"}, + "Reason":{ + "shape":"AnswerReason", + "documentation":"

The reason why the question is not applicable to your workload.

" + } }, "documentation":"

An answer of the question.

" }, + "AnswerReason":{ + "type":"string", + "enum":[ + "OUT_OF_SCOPE", + "BUSINESS_PRIORITIES", + "ARCHITECTURE_CONSTRAINTS", + "OTHER", + "NONE" + ] + }, "AnswerSummaries":{ "type":"list", "member":{"shape":"AnswerSummary"}, @@ -579,8 +597,16 @@ "QuestionTitle":{"shape":"QuestionTitle"}, "Choices":{"shape":"Choices"}, "SelectedChoices":{"shape":"SelectedChoices"}, + "ChoiceAnswerSummaries":{ + "shape":"ChoiceAnswerSummaries", + "documentation":"

A list of selected choices to a question in your workload.

" + }, "IsApplicable":{"shape":"IsApplicable"}, - "Risk":{"shape":"Risk"} + "Risk":{"shape":"Risk"}, + "Reason":{ + "shape":"AnswerReason", + "documentation":"

The reason why a choice is non-applicable to a question in your workload.

" + } }, "documentation":"

An answer summary of a lens review in a workload.

" }, @@ -623,6 +649,48 @@ }, "documentation":"

A choice available to answer question.

" }, + "ChoiceAnswer":{ + "type":"structure", + "members":{ + "ChoiceId":{"shape":"ChoiceId"}, + "Status":{ + "shape":"ChoiceStatus", + "documentation":"

The status of a choice.

" + }, + "Reason":{ + "shape":"ChoiceReason", + "documentation":"

The reason why a choice is non-applicable to a question in your workload.

" + }, + "Notes":{ + "shape":"ChoiceNotes", + "documentation":"

The notes associated with a choice.

" + } + }, + "documentation":"

A choice that has been answered on a question in your workload.

" + }, + "ChoiceAnswerSummaries":{ + "type":"list", + "member":{"shape":"ChoiceAnswerSummary"} + }, + "ChoiceAnswerSummary":{ + "type":"structure", + "members":{ + "ChoiceId":{"shape":"ChoiceId"}, + "Status":{ + "shape":"ChoiceStatus", + "documentation":"

The status of a choice.

" + }, + "Reason":{ + "shape":"ChoiceReason", + "documentation":"

The reason why a choice is non-applicable to a question in your workload.

" + } + }, + "documentation":"

A choice summary that has been answered on a question in your workload.

" + }, + "ChoiceAnswers":{ + "type":"list", + "member":{"shape":"ChoiceAnswer"} + }, "ChoiceDescription":{ "type":"string", "documentation":"

The description of a choice.

", @@ -635,12 +703,58 @@ "max":64, "min":1 }, + "ChoiceNotes":{ + "type":"string", + "max":250 + }, + "ChoiceReason":{ + "type":"string", + "enum":[ + "OUT_OF_SCOPE", + "BUSINESS_PRIORITIES", + "ARCHITECTURE_CONSTRAINTS", + "OTHER", + "NONE" + ] + }, + "ChoiceStatus":{ + "type":"string", + "enum":[ + "SELECTED", + "NOT_APPLICABLE", + "UNSELECTED" + ] + }, "ChoiceTitle":{ "type":"string", "documentation":"

The title of a choice.

", "max":512, "min":1 }, + "ChoiceUpdate":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"ChoiceStatus", + "documentation":"

The status of a choice.

" + }, + "Reason":{ + "shape":"ChoiceReason", + "documentation":"

The reason why a choice is non-applicable to a question in your workload.

" + }, + "Notes":{ + "shape":"ChoiceNotes", + "documentation":"

The notes associated with a choice.

" + } + }, + "documentation":"

A list of choices to be updated.

" + }, + "ChoiceUpdates":{ + "type":"map", + "key":{"shape":"ChoiceId"}, + "value":{"shape":"ChoiceUpdate"} + }, "Choices":{ "type":"list", "member":{"shape":"Choice"}, @@ -1057,7 +1171,7 @@ }, "ImprovementPlanUrl":{ "type":"string", - "documentation":"

The improvement plan URL for a question.

", + "documentation":"

The improvement plan URL for a question.

This value is only available if the question has been answered.

", "max":2048, "min":1 }, @@ -1946,7 +2060,7 @@ }, "TagKeys":{ "shape":"TagKeyList", - "documentation":"

The keys of the tags to be removed.

", + "documentation":"

A list of tag keys. Existing tags of the resource whose keys are members of this list are removed from the resource.

", "location":"querystring", "locationName":"tagKeys" } @@ -1981,8 +2095,16 @@ "locationName":"QuestionId" }, "SelectedChoices":{"shape":"SelectedChoices"}, + "ChoiceUpdates":{ + "shape":"ChoiceUpdates", + "documentation":"

A list of choices to update on a question in your workload. The String key corresponds to the choice ID to be updated.

" + }, "Notes":{"shape":"Notes"}, - "IsApplicable":{"shape":"IsApplicable"} + "IsApplicable":{"shape":"IsApplicable"}, + "Reason":{ + "shape":"AnswerReason", + "documentation":"

The reason why a question is not applicable to your workload.

" + } }, "documentation":"

Input to update answer.

" }, diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index 8af1a52b379a..328aeadb35fc 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT workdocs AWS Java SDK :: Services :: Amazon WorkDocs diff --git a/services/worklink/pom.xml b/services/worklink/pom.xml index 47d7d22bf8e4..36616aac3efe 100644 --- a/services/worklink/pom.xml +++ b/services/worklink/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT worklink AWS Java SDK :: Services :: WorkLink diff --git a/services/workmail/pom.xml b/services/workmail/pom.xml index bc3d8c8d5336..0d22a16094e5 100644 --- a/services/workmail/pom.xml +++ b/services/workmail/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 workmail diff --git a/services/workmailmessageflow/pom.xml b/services/workmailmessageflow/pom.xml index e454ebf3c075..ad40f4f8e6ba 100644 --- a/services/workmailmessageflow/pom.xml +++ b/services/workmailmessageflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT workmailmessageflow AWS Java SDK :: Services :: WorkMailMessageFlow diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index 8cb2e8d1f3f0..d8a2ebbb28e6 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT workspaces AWS Java SDK :: Services :: Amazon WorkSpaces diff --git a/services/xray/pom.xml b/services/xray/pom.xml index 6e2871efa7e8..649d018fd0fd 100644 --- a/services/xray/pom.xml +++ b/services/xray/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT xray AWS Java SDK :: Services :: AWS X-Ray diff --git a/test/auth-sts-testing/pom.xml b/test/auth-sts-testing/pom.xml new file mode 100644 index 000000000000..d5abf4ce668f --- /dev/null +++ b/test/auth-sts-testing/pom.xml @@ -0,0 +1,100 @@ + + + + + + aws-sdk-java-pom + software.amazon.awssdk + 2.17.16-SNAPSHOT + ../../pom.xml + + 4.0.0 + + auth-sts-testing + AWS Java SDK :: Test :: Auth Sts Testing + Test package for testing sts and the auth package together. + + + + software.amazon.awssdk + bom-internal + ${project.version} + pom + import + + + + + + + software.amazon.awssdk + auth + ${awsjavasdk.version} + test + + + software.amazon.awssdk + sts + ${awsjavasdk.version} + test + + + software.amazon.awssdk + utils + ${awsjavasdk.version} + test + + + software.amazon.awssdk + profiles + ${awsjavasdk.version} + test + + + software.amazon.awssdk + sdk-core + ${awsjavasdk.version} + test + + + org.assertj + assertj-core + test + + + org.hamcrest + hamcrest-all + test + + + com.github.tomakehurst + wiremock + test + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + test + + + + diff --git a/test/auth-sts-testing/src/it/java/software/amazon/awssdk/authststesting/ProfileCredentialsProviderIntegrationTest.java b/test/auth-sts-testing/src/it/java/software/amazon/awssdk/authststesting/ProfileCredentialsProviderIntegrationTest.java new file mode 100644 index 000000000000..7d56f164fa37 --- /dev/null +++ b/test/auth-sts-testing/src/it/java/software/amazon/awssdk/authststesting/ProfileCredentialsProviderIntegrationTest.java @@ -0,0 +1,88 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.authststesting; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.put; +import static com.github.tomakehurst.wiremock.client.WireMock.putRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.Instant; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider; +import software.amazon.awssdk.core.util.SdkUserAgent; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.services.sts.model.StsException; +import software.amazon.awssdk.utils.DateUtils; + +public class ProfileCredentialsProviderIntegrationTest { + private static final String TOKEN_RESOURCE_PATH = "/latest/api/token"; + private static final String CREDENTIALS_RESOURCE_PATH = "/latest/meta-data/iam/security-credentials/"; + private static final String STUB_CREDENTIALS = "{\"AccessKeyId\":\"ACCESS_KEY_ID\",\"SecretAccessKey\":\"SECRET_ACCESS_KEY\"," + + "\"Expiration\":\"" + DateUtils.formatIso8601Date(Instant.now().plus(Duration.ofDays(1))) + + "\"}"; + + @Test + public void profileWithCredentialSourceUsingEc2InstanceMetadataAndCustomEndpoint_usesEndpointInSourceProfile() { + String testFileContentsTemplate = "" + + "[profile a]\n" + + "role_arn=arn:aws:iam::123456789012:role/testRole3\n" + + "credential_source = ec2instancemetadata\n" + + "ec2_metadata_service_endpoint = http://localhost:%d\n"; + + WireMockServer mockMetadataEndpoint = new WireMockServer(WireMockConfiguration.options().dynamicPort()); + mockMetadataEndpoint.start(); + + String profileFileContents = String.format(testFileContentsTemplate, mockMetadataEndpoint.port()); + + ProfileFile profileFile = ProfileFile.builder() + .type(ProfileFile.Type.CONFIGURATION) + .content(new ByteArrayInputStream(profileFileContents.getBytes(StandardCharsets.UTF_8))) + .build(); + + ProfileCredentialsProvider profileCredentialsProvider = ProfileCredentialsProvider.builder() + .profileFile(profileFile) + .profileName("a") + .build(); + + String stubToken = "some-token"; + mockMetadataEndpoint.stubFor(put(urlPathEqualTo(TOKEN_RESOURCE_PATH)).willReturn(aResponse().withBody(stubToken))); + mockMetadataEndpoint.stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).willReturn(aResponse().withBody("some-profile"))); + mockMetadataEndpoint.stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).willReturn(aResponse().withBody(STUB_CREDENTIALS))); + + try { + profileCredentialsProvider.resolveCredentials(); + + } catch (StsException e) { + // ignored + } finally { + mockMetadataEndpoint.stop(); + } + + String userAgentHeader = "User-Agent"; + String userAgent = SdkUserAgent.create().userAgent(); + mockMetadataEndpoint.verify(putRequestedFor(urlPathEqualTo(TOKEN_RESOURCE_PATH)).withHeader(userAgentHeader, equalTo(userAgent))); + mockMetadataEndpoint.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).withHeader(userAgentHeader, equalTo(userAgent))); + mockMetadataEndpoint.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).withHeader(userAgentHeader, equalTo(userAgent))); + } +} diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index f9dbb9e52aab..260442b234fe 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json index 63a6d17e22d3..2e34a85c17d1 100644 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json @@ -244,7 +244,8 @@ "ListOfBlobs":{"shape":"ListOfBlobsType"}, "RecursiveStruct":{"shape":"RecursiveStructType"}, "PolymorphicTypeWithSubTypes":{"shape":"BaseType"}, - "PolymorphicTypeWithoutSubTypes":{"shape":"SubTypeOne"} + "PolymorphicTypeWithoutSubTypes":{"shape":"SubTypeOne"}, + "SetPrefixedMember":{"shape":"String"} } }, "BaseType":{ diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index f092891c9ced..0fb038080161 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml http-client-tests diff --git a/test/module-path-tests/pom.xml b/test/module-path-tests/pom.xml index 4d18af4d057b..0b34cfd09254 100644 --- a/test/module-path-tests/pom.xml +++ b/test/module-path-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/module-path-tests/src/main/java/module-info.java b/test/module-path-tests/src/main/java/module-info.java index 489563a381f5..9b2d7c36c49d 100644 --- a/test/module-path-tests/src/main/java/module-info.java +++ b/test/module-path-tests/src/main/java/module-info.java @@ -30,5 +30,4 @@ requires org.slf4j; requires slf4j.simple; - requires com.fasterxml.jackson.core; } diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index 58a435735f58..a311f3f04bbd 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml 4.0.0 @@ -67,11 +67,6 @@ utils ${awsjavasdk.version}
- - software.amazon.ion - ion-java - compile - org.apache.httpcomponents httpclient @@ -133,4 +128,4 @@ jackson-annotations - \ No newline at end of file + diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/HeadersAssertion.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/HeadersAssertion.java index c4eb088fa845..07b3f7ec1ea6 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/HeadersAssertion.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/HeadersAssertion.java @@ -17,6 +17,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import com.github.tomakehurst.wiremock.http.HttpHeaders; import com.github.tomakehurst.wiremock.verification.LoggedRequest; @@ -28,11 +29,11 @@ */ public class HeadersAssertion extends MarshallingAssertion { - private Map contains; + private Map> contains; private List doesNotContain; - public void setContains(Map contains) { + public void setContains(Map> contains) { this.contains = contains; } @@ -51,8 +52,11 @@ protected void doAssert(LoggedRequest actual) throws Exception { } private void assertHeadersContains(HttpHeaders actual) { - contains.entrySet().forEach(e -> { - assertEquals(e.getValue(), actual.getHeader(e.getKey()).firstValue()); + contains.forEach((expectedKey, expectedValues) -> { + assertTrue(String.format("Header '%s' was expected to be present. Actual headers: %s", expectedKey, actual), + actual.getHeader(expectedKey).isPresent()); + List actualValues = actual.getHeader(expectedKey).values(); + assertEquals(expectedValues, actualValues); }); } diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/IonBodyAssertion.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/IonBodyAssertion.java deleted file mode 100644 index 84fabc6460cf..000000000000 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/IonBodyAssertion.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocol.asserts.marshalling; - -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -import com.github.tomakehurst.wiremock.verification.LoggedRequest; -import java.math.BigInteger; -import java.util.Objects; -import software.amazon.ion.Decimal; -import software.amazon.ion.IonReader; -import software.amazon.ion.IonSystem; -import software.amazon.ion.IonType; -import software.amazon.ion.Timestamp; -import software.amazon.ion.system.IonSystemBuilder; - -public class IonBodyAssertion extends MarshallingAssertion { - private static final double DOUBLE_DELTA = 0.0001d; - private static final IonSystem ION_SYSTEM = IonSystemBuilder.standard().build(); - - private final String ionEquals; - - public IonBodyAssertion(String ionEquals) { - this.ionEquals = ionEquals; - } - - @Override - protected void doAssert(LoggedRequest request) throws Exception { - IonReader expected = ION_SYSTEM.newReader(ionEquals); - IonReader actual = ION_SYSTEM.newReader(request.getBody()); - assertIonReaderEquals(expected, actual); - } - - private void assertIonReaderEquals(IonReader x, IonReader y) { - for (int token = 0; ; token++) { - IonType xType = x.next(); - IonType yType = y.next(); - - if (xType == null && yType == null) { - if (x.getDepth() == 0 && y.getDepth() == 0) { - return; - } else { - x.stepOut(); - y.stepOut(); - continue; - } - } - - if (!Objects.equals(xType, yType)) { - fail(String.format("Types (%s, %s) are unequal at token %s", xType, yType, token)); - } - - if (x.isInStruct() && y.isInStruct()) { - String xFieldName = x.getFieldName(); - String yFieldName = y.getFieldName(); - assertEquals( - String.format("Unequal field names (%s, %s) at token %s", xFieldName, yFieldName, token), - xFieldName, - yFieldName); - } - - boolean xNull = x.isNullValue(); - boolean yNull = y.isNullValue(); - if ((xNull && !yNull) || (yNull && !xNull)) { - fail(String.format("One value is null but the other is not at token %s", token)); - } else if (xNull && yNull) { - continue; - } - - switch (xType) { - case BLOB: - case CLOB: - int sizeX = x.byteSize(); - int sizeY = y.byteSize(); - assertEquals( - String.format("Unequal LOB sizes (%s, %s) at token %s", sizeX, sizeY, token), - sizeX, - sizeY); - - byte[] bufferX = new byte[sizeX]; - byte[] bufferY = new byte[sizeY]; - - x.getBytes(bufferX, 0, sizeX); - y.getBytes(bufferY, 0, sizeY); - - assertArrayEquals( - String.format("Unequal LOBs at token %s", token), - bufferX, - bufferY); - break; - - case BOOL: - boolean xBoolean = x.booleanValue(); - boolean yBoolean = y.booleanValue(); - assertEquals( - String.format("Unequal boolean values (%s, %s) at token %s", xBoolean, yBoolean, token), - xBoolean, - yBoolean); - break; - - case DECIMAL: - Decimal xDecimal = x.decimalValue(); - Decimal yDecimal = y.decimalValue(); - assertEquals( - String.format("Unequal decimal values (%s, %s) at token %s", xDecimal, yDecimal, token), - xDecimal, - yDecimal); - break; - - case FLOAT: - double xDouble = x.doubleValue(); - double yDouble = y.doubleValue(); - assertEquals( - String.format("Unequal float values (%s, %s) at token %s", xDouble, yDouble, token), - xDouble, - yDouble, - DOUBLE_DELTA); - break; - - case INT: - BigInteger xInteger = x.bigIntegerValue(); - BigInteger yInteger = y.bigIntegerValue(); - assertEquals( - String.format("Unequal integer values (%s, %s) at token %s", xInteger, yInteger, token), - xInteger, - yInteger); - break; - - case NULL: - throw new IllegalStateException("We should never fall through to the IonType.NULL block due to previous " + - "assertions for equal types and nullness"); - - case STRING: - case SYMBOL: - String xString = x.stringValue(); - String yString = y.stringValue(); - assertEquals( - String.format("Unequal string values (%s, %s) at token %s", xString, yString, token), - xString, - yString); - break; - - case LIST: - case SEXP: - case STRUCT: - x.stepIn(); - y.stepIn(); - break; - - case TIMESTAMP: - Timestamp xTimestamp = x.timestampValue(); - Timestamp yTimestamp = y.timestampValue(); - assertEquals( - String.format("Unequal timestamp values (%s, %s) at token %s", xTimestamp, yTimestamp, token), - xTimestamp, - yTimestamp); - break; - - default: - fail(String.format("Unrecognized IonType %s", xType)); - } - } - } -} diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/MarshallingAssertion.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/MarshallingAssertion.java index 9ec645a77e6c..767e5e59cf83 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/MarshallingAssertion.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/MarshallingAssertion.java @@ -29,9 +29,11 @@ public abstract class MarshallingAssertion { * @throws AssertionError If any assertions fail */ public final void assertMatches(LoggedRequest actual) throws AssertionError { - // Catches the exception to play nicer with lambda's + // Wrap checked exceptions to play nicer with lambda's try { doAssert(actual); + } catch (Error | RuntimeException e) { + throw e; } catch (Exception e) { throw new RuntimeException(e); } diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/RequestBodyAssertion.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/RequestBodyAssertion.java index 701643ba2d7a..d93d93dc2149 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/RequestBodyAssertion.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/marshalling/RequestBodyAssertion.java @@ -32,8 +32,4 @@ public void setXmlEquals(String xmlEquals) { public void setEquals(String equals) { addAssertion(new RawBodyAssertion(equals)); } - - public void setIonEquals(String ionEquals) { - addAssertion(new IonBodyAssertion(ionEquals)); - } } diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/GivenResponse.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/GivenResponse.java index bc35f4050be7..5dfbd15e6a81 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/GivenResponse.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/GivenResponse.java @@ -16,13 +16,14 @@ package software.amazon.awssdk.protocol.model; import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.List; import java.util.Map; public class GivenResponse { @JsonProperty(value = "status_code") private Integer statusCode; - private Map headers; + private Map> headers; private String body; public Integer getStatusCode() { @@ -33,11 +34,11 @@ public void setStatusCode(Integer statusCode) { this.statusCode = statusCode; } - public Map getHeaders() { + public Map> getHeaders() { return headers; } - public void setHeaders(Map headers) { + public void setHeaders(Map> headers) { this.headers = headers; } diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/reflect/document/JsonNodeToDocumentConvertor.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/reflect/document/JsonNodeToDocumentConvertor.java index 391a12b571e9..850f40965f46 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/reflect/document/JsonNodeToDocumentConvertor.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/reflect/document/JsonNodeToDocumentConvertor.java @@ -23,9 +23,8 @@ import software.amazon.awssdk.core.SdkNumber; import software.amazon.awssdk.core.document.Document; -public class JsonNodeToDocumentConvertor implements JsonNodeVisitor { +public class JsonNodeToDocumentConvertor { - @Override public Document visit(JsonNode jsonNode) { if (jsonNode.isObject()) { return visitMap(jsonNode); diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/reflect/document/JsonNodeVisitor.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/reflect/document/JsonNodeVisitor.java deleted file mode 100644 index 30d3fba4ce36..000000000000 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/reflect/document/JsonNodeVisitor.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.protocol.reflect.document; - -import com.fasterxml.jackson.databind.JsonNode; - -public interface JsonNodeVisitor { - - - R visit(JsonNode sdkJsonNode); - -} diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/runners/UnmarshallingTestRunner.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/runners/UnmarshallingTestRunner.java index 7dbd9846b0b4..4560184e88aa 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/runners/UnmarshallingTestRunner.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/runners/UnmarshallingTestRunner.java @@ -99,7 +99,9 @@ private ResponseDefinitionBuilder toResponseBuilder(GivenResponse givenResponse) ResponseDefinitionBuilder responseBuilder = aResponse().withStatus(200); if (givenResponse.getHeaders() != null) { - givenResponse.getHeaders().forEach(responseBuilder::withHeader); + givenResponse.getHeaders().forEach((key, values) -> { + responseBuilder.withHeader(key, values.toArray(new String[0])); + }); } if (givenResponse.getStatusCode() != null) { responseBuilder.withStatus(givenResponse.getStatusCode()); diff --git a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/ion-input.ion b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/ion-input.ion deleted file mode 100644 index a102330cf0ab..000000000000 --- a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/ion-input.ion +++ /dev/null @@ -1,345 +0,0 @@ -[ - { - "description": "Scalar Members are marshalled correctly", - "given": { - "input": { - "StringMember": "someVal", - "IntegerMember": 42, - "FloatMember": 1.234, - "DoubleMember": 5.678, - "LongMember": 2147483647 - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{StringMember:\"someVal\",IntegerMember:42,FloatMember:1.234e0,DoubleMember:5.678e0,LongMember:2147483647 }" - } - } - } - }, - { - "description": "Boolean member with value true is marshalled correctly", - "given": { - "input": { - "BooleanMember": true - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{BooleanMember:true}" - } - } - } - }, - { - "description": "Boolean member with value false is marshalled correctly", - "given": { - "input": { - "BooleanMember": false - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{BooleanMember:false}" - } - } - } - }, - { - "description": "Timestamp member in the payload is marshalled as seconds with millisecond precision", - "given": { - "input": { - "TimestampMember": 1422172801123 - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{TimestampMember: 2015-01-25T08:00:01.123Z}" - } - } - } - }, - { - "description": "Blob member in payload is marshalled as Base64 encoded text", - "given": { - "input": { - "BlobArg": "foo" - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{BlobArg: {{Zm9v}}}" - } - } - } - }, - { - "description": "Nested blob member in payload is marshalled as Base64 encoded text", - "given": { - "input": { - "StructWithNestedBlob": { - "NestedBlob": "foo" - } - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{StructWithNestedBlob: {NestedBlob: {{Zm9v}}}}" - } - } - } - }, - { - "description": "Blob member as map value is marshalled as Base64 encoded text", - "given": { - "input": { - "BlobMap": { - "key1": "foo", - "key2": "bar" - } - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{BlobMap: {key1: {{Zm9v}}, key2: {{YmFy}}}}" - } - } - } - }, - { - "description": "Blob as list member is marshalled as Base64 encoded text", - "given": { - "input": { - "ListOfBlobs": [ - "foo", - "bar" - ] - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{ListOfBlobs: [{{Zm9v}}, {{YmFy}}]}" - } - } - } - }, - { - "description": "Recursive structure with recursive member not set is marshalled correctly", - "given": { - "input": { - "RecursiveStruct": { - "NoRecurse": "foo" - } - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{RecursiveStruct: {NoRecurse: \"foo\"}}" - } - } - } - }, - { - "description": "Recursive structure with one level of recursion is marshalled correctly", - "given": { - "input": { - "RecursiveStruct": { - "RecursiveStruct": { - "NoRecurse": "foo" - } - } - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{RecursiveStruct: {RecursiveStruct: {NoRecurse: \"foo\"}}}" - } - } - } - }, - { - "description": "Recursive structure with several levels of recursion is marshalled correctly", - "given": { - "input": { - "RecursiveStruct": { - "RecursiveStruct": { - "RecursiveStruct": { - "RecursiveStruct": { - "NoRecurse": "foo" - } - } - } - } - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{RecursiveStruct: {RecursiveStruct: {RecursiveStruct: {RecursiveStruct: {NoRecurse: \"foo\"}}}}}" - } - } - } - }, - { - "description": "List of recursive structs is marshalled correctly when no recursive members are set", - "given": { - "input": { - "RecursiveStruct": { - "RecursiveList": [ - { - "NoRecurse": "foo" - }, - { - "NoRecurse": "bar" - } - ] - } - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{RecursiveStruct: {RecursiveList: [{NoRecurse: \"foo\"}, {NoRecurse: \"bar\"}]}}" - } - } - } - }, - { - "description": "List of recursive structs with one level of recursion is marshalled correctly", - "given": { - "input": { - "RecursiveStruct": { - "RecursiveList": [ - { - "NoRecurse": "foo" - }, - { - "RecursiveStruct": { - "NoRecurse": "bar" - } - } - ] - } - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{RecursiveStruct: {RecursiveList: [{NoRecurse: \"foo\"}, {RecursiveStruct: {NoRecurse: \"bar\"}}]}}" - } - } - } - }, - { - "description": "Recursive structure as map value is marshalled correctly", - "given": { - "input": { - "RecursiveStruct": { - "RecursiveMap": { - "foo": { - "NoRecurse": "foo" - }, - "bar": { - "NoRecurse": "bar", - "RecursiveStruct": { - "NoRecurse": "baz" - } - } - } - } - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{RecursiveStruct: {RecursiveMap: {bar: {NoRecurse: \"bar\", RecursiveStruct: { NoRecurse: \"baz\"}}, foo: {NoRecurse: \"foo\"}}}}" - } - } - } - }, - { - "description": "Empty maps are marshalled correctly", - "given": { - "input": { - "MapOfStringToString": {} - } - }, - "when": { - "action": "marshall", - "operation": "AllTypes" - }, - "then": { - "serializedAs": { - "body": { - "ionEquals": "{MapOfStringToString: {}}" - } - } - } - } -] diff --git a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/ion-output.ion b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/ion-output.ion deleted file mode 100644 index 99e94438ff34..000000000000 --- a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/ion-output.ion +++ /dev/null @@ -1,309 +0,0 @@ -[ - { - "description": "Scalar Members in payload are unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{StringMember: \"myname\", IntegerMember: 123, FloatMember: 1.2e0, DoubleMember: 1.3e0, LongMember: 200}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "StringMember": "myname", - "IntegerMember": 123, - "FloatMember": 1.2, - "DoubleMember": 1.3, - "LongMember": 200 - } - } - }, - { - "description": "Boolean member with value true in payload is unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{BooleanMember: true}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "BooleanMember": true - } - } - }, - { - "description": "Boolean member with value false in payload is unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{BooleanMember: false}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "BooleanMember": false - } - } - }, - { - "description": "Base64 encoded blob member is unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{BlobArg: {{aGkh}}}}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "BlobArg": "hi!" - } - } - }, - { - "description": "Nested Base64 encoded blob member is unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{StructWithNestedBlob: {NestedBlob: {{aGkh}}}}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "StructWithNestedBlob": { - "NestedBlob": "hi!" - } - } - } - }, - { - "description": "Base64 encoded blob as list member is unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{ListOfBlobs: [{{Zm9v}}, {{YmFy}}]}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "ListOfBlobs": [ - "foo", - "bar" - ] - } - } - }, - { - "description": "Base64 encoded blob as map value is unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{BlobMap: {foo: {{dGhlcmUh}}}}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "BlobMap": { - "foo": "there!" - } - } - } - }, - { - "description": "Timestamp member in payload is unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{TimestampMember: 2014-04-29T18:30:38.123Z}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "TimestampMember": 1398796238123 - } - } - }, - { - "description": "Nested Timestamp member in payload is unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{StructWithNestedTimestampMember: {NestedTimestamp: 2014-04-29T18:30:38.123Z}}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "StructWithNestedTimestampMember": { - "NestedTimestamp": 1398796238123 - } - } - } - }, - { - "description": "List of strings in payload is unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{SimpleList: [\"a\", \"b\", null]}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "SimpleList": [ - "a", - "b", - null - ] - } - } - }, - { - "description": "List of structs in payload is unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{ListOfStructs: [{StringMember: \"foo\"}, null, {}]}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "ListOfStructs": [ - { - "StringMember": "foo" - }, - null, - {} - ] - } - } - }, - { - "description": "List of maps in payload is unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{ListOfMaps: [{keyOne: \"valOne\", keyTwo: \"valTwo\"}, null, {}]}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "ListOfMaps": [ - { - "keyOne": "valOne", - "keyTwo": "valTwo" - }, - null, - {} - ] - } - } - }, - { - "description": "Map of String to list of integers is unmarshalled correctly", - "given": { - "response": { - "status_code": 200, - "body": "{MapOfStringToIntegerList: {a: [1, 2], b: [3, 4]}}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - "MapOfStringToIntegerList": { - "a": [ - 1, - 2 - ], - "b": [ - 3, - 4 - ] - } - } - } - }, - { - "description": "Unmodeled data is ignored", - "given": { - "response": { - "status_code": 200, - "body": "{foo: \"bar\"}" - } - }, - "when": { - "action": "unmarshall", - "operation": "AllTypes" - }, - "then": { - "deserializedAs": { - } - } - }, - { - "description": "Operation with no output defined ignores any content in body", - "given": { - "response": { - "status_code": 200, - "body": "THIS ISN'T ION!!!" - } - }, - "when": { - "action": "unmarshall", - "operation": "OperationWithNoInputOrOutput" - }, - "then": { - "deserializedAs": { - } - } - } -] diff --git a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-input.json b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-input.json index 0b1fc0e681c2..9bd43b7b9e56 100644 --- a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-input.json +++ b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-input.json @@ -144,6 +144,125 @@ "uri": "/2016-03-11/operationWithGreedyLabel/pathParamValue/foo/bar/baz" } } + }, + { + "description": "ListOfStrings in header is serialized as multi-valued header", + "given": { + "input": { + "StringMember": "singleValue", + "ListOfStringsMember": [ + "listValueOne", + "listValueTwo" + ] + } + }, + "when": { + "action": "marshall", + "operation": "MembersInHeaders" + }, + "then": { + "serializedAs": { + "uri": "/2016-03-11/membersInHeaders", + "headers": { + "contains": { + "x-amz-string": "singleValue", + "x-amz-string-list": [ + "listValueOne", + "listValueTwo" + ] + } + } + } + } + }, + { + "description": "Null string header member is not serialized", + "given": { + "input": { + "StringMember": null + } + }, + "when": { + "action": "marshall", + "operation": "MembersInHeaders" + }, + "then": { + "serializedAs": { + "uri": "/2016-03-11/membersInHeaders", + "headers": { + "doesNotContain": [ "x-amz-string" ] + } + } + } + }, + { + "description": "Null list header member is not serialized", + "given": { + "input": { + "ListOfStringsMember": null + } + }, + "when": { + "action": "marshall", + "operation": "MembersInHeaders" + }, + "then": { + "serializedAs": { + "uri": "/2016-03-11/membersInHeaders", + "headers": { + "doesNotContain": [ "x-amz-string-list" ] + } + } + } + }, + { + "description": "List header member with only null value is not serialized", + "given": { + "input": { + "ListOfStringsMember": [ + null + ] + } + }, + "when": { + "action": "marshall", + "operation": "MembersInHeaders" + }, + "then": { + "serializedAs": { + "uri": "/2016-03-11/membersInHeaders", + "headers": { + "doesNotContain": [ "x-amz-string-list" ] + } + } + } + }, + { + "description": "List header member's null elements are not serialized", + "given": { + "input": { + "ListOfStringsMember": [ + "listValueOne", + null + ] + } + }, + "when": { + "action": "marshall", + "operation": "MembersInHeaders" + }, + "then": { + "serializedAs": { + "uri": "/2016-03-11/membersInHeaders", + "headers": { + "contains": { + "x-amz-string-list": [ + "listValueOne" + ] + } + } + } + } } // TODO This is a post process customization for API Gateway // { diff --git a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-output.json b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-output.json index 940ae3e0bcb4..5a6197b2c625 100644 --- a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-output.json +++ b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-output.json @@ -161,5 +161,33 @@ "QueryParamOne": "Found QueryParamOne in the payload! Yay!" } } + }, + { + "description": "ListOfStrings in multi-valued header is unmarshalled correctly", + "given": { + "response": { + "status_code": 200, + "headers": { + "x-amz-string": "singleValue", + "x-amz-string-list": [ + "listValueOne", + "listValueTwo" + ] + } + } + }, + "when": { + "action": "unmarshall", + "operation": "MembersInHeaders" + }, + "then": { + "deserializedAs": { + "StringMember": "singleValue", + "ListOfStringsMember": [ + "listValueOne", + "listValueTwo" + ] + } + } } ] \ No newline at end of file diff --git a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-xml-input.json b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-xml-input.json index 7fefb6cf7de1..69eceade6c46 100644 --- a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-xml-input.json +++ b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-xml-input.json @@ -388,6 +388,125 @@ "uri": "/2016-03-11/operationWithGreedyLabel/pathParamValue//foo/bar/baz" } } + }, + { + "description": "ListOfStrings in header is serialized as multi-valued header", + "given": { + "input": { + "StringMember": "singleValue", + "ListOfStringsMember": [ + "listValueOne", + "listValueTwo" + ] + } + }, + "when": { + "action": "marshall", + "operation": "MembersInHeaders" + }, + "then": { + "serializedAs": { + "uri": "/2016-03-11/membersInHeaders", + "headers": { + "contains": { + "x-amz-string": "singleValue", + "x-amz-string-list": [ + "listValueOne", + "listValueTwo" + ] + } + } + } + } + }, + { + "description": "Null string header member is not serialized", + "given": { + "input": { + "StringMember": null + } + }, + "when": { + "action": "marshall", + "operation": "MembersInHeaders" + }, + "then": { + "serializedAs": { + "uri": "/2016-03-11/membersInHeaders", + "headers": { + "doesNotContain": [ "x-amz-string" ] + } + } + } + }, + { + "description": "Null list header member is not serialized", + "given": { + "input": { + "ListOfStringsMember": null + } + }, + "when": { + "action": "marshall", + "operation": "MembersInHeaders" + }, + "then": { + "serializedAs": { + "uri": "/2016-03-11/membersInHeaders", + "headers": { + "doesNotContain": [ "x-amz-string-list" ] + } + } + } + }, + { + "description": "List header member with only null value is not serialized", + "given": { + "input": { + "ListOfStringsMember": [ + null + ] + } + }, + "when": { + "action": "marshall", + "operation": "MembersInHeaders" + }, + "then": { + "serializedAs": { + "uri": "/2016-03-11/membersInHeaders", + "headers": { + "doesNotContain": [ "x-amz-string-list" ] + } + } + } + }, + { + "description": "List header member's null elements are not serialized", + "given": { + "input": { + "ListOfStringsMember": [ + "listValueOne", + null + ] + } + }, + "when": { + "action": "marshall", + "operation": "MembersInHeaders" + }, + "then": { + "serializedAs": { + "uri": "/2016-03-11/membersInHeaders", + "headers": { + "contains": { + "x-amz-string-list": [ + "listValueOne" + ] + } + } + } + } } // TODO this is not possible, payloads can only be structures or blobs. Only S3 utilizes this // { diff --git a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-xml-output.json b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-xml-output.json index 72cc4a9081a9..7a36f628a32a 100644 --- a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-xml-output.json +++ b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-xml-output.json @@ -334,5 +334,33 @@ } } } + }, + { + "description": "ListOfStrings in multi-valued header is unmarshalled correctly", + "given": { + "response": { + "status_code": 200, + "headers": { + "x-amz-string": "singleValue", + "x-amz-string-list": [ + "listValueOne", + "listValueTwo" + ] + } + } + }, + "when": { + "action": "unmarshall", + "operation": "MembersInHeaders" + }, + "then": { + "deserializedAs": { + "StringMember": "singleValue", + "ListOfStringsMember": [ + "listValueOne", + "listValueTwo" + ] + } + } } ] diff --git a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/ion-suite.json b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/ion-suite.json deleted file mode 100644 index ba57b4451afc..000000000000 --- a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/ion-suite.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "testCases": [ - "cases/ion-input.ion", - "cases/ion-output.ion" - ] -} diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index 8e53ef1a344a..ab2f2ba315bd 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/src/main/resources/codegen-resources/restjson/service-2.json b/test/protocol-tests/src/main/resources/codegen-resources/restjson/service-2.json index 72596a8d6e5c..6701ab2fbb17 100644 --- a/test/protocol-tests/src/main/resources/codegen-resources/restjson/service-2.json +++ b/test/protocol-tests/src/main/resources/codegen-resources/restjson/service-2.json @@ -505,6 +505,11 @@ "location":"header", "locationName":"x-amz-string" }, + "ListOfStringsMember":{ + "shape":"ListOfStrings", + "location":"header", + "locationName":"x-amz-string-list" + }, "BooleanMember":{ "shape":"Boolean", "location":"header", diff --git a/test/protocol-tests/src/main/resources/codegen-resources/restxml/service-2.json b/test/protocol-tests/src/main/resources/codegen-resources/restxml/service-2.json index cac67bf99cae..b95077656e46 100644 --- a/test/protocol-tests/src/main/resources/codegen-resources/restxml/service-2.json +++ b/test/protocol-tests/src/main/resources/codegen-resources/restxml/service-2.json @@ -268,6 +268,11 @@ "location":"header", "locationName":"x-amz-string" }, + "ListOfStringsMember":{ + "shape":"ListOfStrings", + "location":"header", + "locationName":"x-amz-string-list" + }, "BooleanMember":{ "shape":"Boolean", "location":"header", diff --git a/test/s3-benchmarks/pom.xml b/test/s3-benchmarks/pom.xml index 56a79a565e9e..4e099250fe6d 100644 --- a/test/s3-benchmarks/pom.xml +++ b/test/s3-benchmarks/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/sdk-benchmarks/pom.xml b/test/sdk-benchmarks/pom.xml index 19d8514a74fb..ba042aba9376 100644 --- a/test/sdk-benchmarks/pom.xml +++ b/test/sdk-benchmarks/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml diff --git a/test/sdk-native-image-test/pom.xml b/test/sdk-native-image-test/pom.xml index c073ca80921f..1b5513445455 100644 --- a/test/sdk-native-image-test/pom.xml +++ b/test/sdk-native-image-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml 4.0.0 @@ -173,4 +173,4 @@ - \ No newline at end of file + diff --git a/test/service-test-utils/pom.xml b/test/service-test-utils/pom.xml index d4b9ffd9fd46..b9b2ad75986e 100644 --- a/test/service-test-utils/pom.xml +++ b/test/service-test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml service-test-utils diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index abedc112bc02..a954432cce26 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml 4.0.0 @@ -239,4 +239,4 @@ - \ No newline at end of file + diff --git a/test/test-utils/pom.xml b/test/test-utils/pom.xml index 94ee4940c926..2e9a3ef70e51 100644 --- a/test/test-utils/pom.xml +++ b/test/test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml test-utils diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 5ccbcc0be9d1..f3e9b3a132ca 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT ../../pom.xml 4.0.0 @@ -42,6 +42,11 @@ software.amazon.awssdk ${awsjavasdk.version} + + json-utils + software.amazon.awssdk + ${awsjavasdk.version} + aws-core software.amazon.awssdk @@ -72,11 +77,6 @@ software.amazon.awssdk ${awsjavasdk.version} - - aws-ion-protocol - software.amazon.awssdk - ${awsjavasdk.version} - aws-json-protocol software.amazon.awssdk diff --git a/third-party/pom.xml b/third-party/pom.xml new file mode 100644 index 000000000000..4e64c8d9f4a4 --- /dev/null +++ b/third-party/pom.xml @@ -0,0 +1,66 @@ + + + + + 4.0.0 + + aws-sdk-java-pom + software.amazon.awssdk + 2.17.16-SNAPSHOT + + + third-party + AWS Java SDK :: Third Party + pom + + The AWS SDK for Java - Third Party is an umbrella module that contains child modules which are shaded third- + party dependencies. + + + + third-party-jackson-core + third-party-jackson-dataformat-cbor + + + + + + software.amazon.awssdk + bom-internal + ${project.version} + pom + import + + + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + true + + + + + + diff --git a/third-party/third-party-jackson-core/pom.xml b/third-party/third-party-jackson-core/pom.xml new file mode 100644 index 000000000000..809e1cfa199d --- /dev/null +++ b/third-party/third-party-jackson-core/pom.xml @@ -0,0 +1,142 @@ + + + + + + third-party + software.amazon.awssdk + 2.17.16-SNAPSHOT + + 4.0.0 + + third-party-jackson-core + AWS Java SDK :: Third Party :: Jackson-core + https://aws.amazon.com/sdkforjava + + + + + software.amazon.awssdk + bom-internal + ${project.version} + pom + import + + + + + + + + com.fasterxml.jackson.core + jackson-core + ${jackson.version} + true + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 3.1.0 + + + + + com.fasterxml.jackson.core:* + + + + + com.fasterxml.jackson.core + software.amazon.awssdk.thirdparty.jackson.core + + + + + + false + true + ${project.build.directory}/dependency-reduced-pom.xml + + + + package + + shade + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + compile + package + + attach-artifact + + + + + ${basedir}/target/aws-sdk-java-third-party-jackson-core-${awsjavasdk.version}.jar + jar + optional + + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.thirdparty.jackson.core + + + + + + maven-javadoc-plugin + + + javadoc-jar + package + + jar + + + + + ${basedir}/../../core/profiles/src/main/java/software/amazon/awssdk/profiles;${basedir}/../../core/sdk-core/src/main/java/software/amazon/awssdk/core + false + + + + + diff --git a/third-party/third-party-jackson-dataformat-cbor/pom.xml b/third-party/third-party-jackson-dataformat-cbor/pom.xml new file mode 100644 index 000000000000..5571eb4cf2cd --- /dev/null +++ b/third-party/third-party-jackson-dataformat-cbor/pom.xml @@ -0,0 +1,161 @@ + + + + + + third-party + software.amazon.awssdk + 2.17.16-SNAPSHOT + + 4.0.0 + + third-party-jackson-dataformat-cbor + AWS Java SDK :: Third Party :: Jackson-dataformat-cbor + https://aws.amazon.com/sdkforjava + + + + + software.amazon.awssdk + bom-internal + ${project.version} + pom + import + + + + + + + software.amazon.awssdk + third-party-jackson-core + ${awsjavasdk.version} + + + + + com.fasterxml.jackson.dataformat + jackson-dataformat-cbor + ${jackson.version} + true + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 3.1.0 + + + + + com.fasterxml.jackson.dataformat:* + + + + + com.fasterxml.jackson.dataformat.cbor + software.amazon.awssdk.thirdparty.jackson.dataformat.cbor + + + com.fasterxml.jackson.core + software.amazon.awssdk.thirdparty.jackson.core + + + + + com.fasterxml.jackson.dataformat:* + + + com/fasterxml/jackson/dataformat/cbor/databind/** + + + + + + + false + true + ${project.build.directory}/dependency-reduced-pom.xml + + + + package + + shade + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + compile + package + + attach-artifact + + + + + ${basedir}/target/aws-sdk-java-third-party-jackson-dataformat-cbor-${project.version}.jar + jar + optional + + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.thirdparty.jackson.dataformat.cbor + + + + + + maven-javadoc-plugin + + + javadoc-jar + package + + jar + + + + + ${basedir}/../../core/profiles/src/main/java/software/amazon/awssdk/profiles;${basedir}/../../core/sdk-core/src/main/java/software/amazon/awssdk/core + false + + + + + diff --git a/utils/pom.xml b/utils/pom.xml index 623856ebd689..70c92e4164c6 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.16.101-SNAPSHOT + 2.17.16-SNAPSHOT 4.0.0 diff --git a/utils/src/main/java/software/amazon/awssdk/utils/SystemSetting.java b/utils/src/main/java/software/amazon/awssdk/utils/SystemSetting.java index c5e3dff8df86..b2ebbb0b6fab 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/SystemSetting.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/SystemSetting.java @@ -55,6 +55,21 @@ default Optional getStringValue() { return SystemSettingUtils.resolveSetting(this); } + /** + * Attempt to load a system setting from {@link System#getProperty(String)} and {@link System#getenv(String)}. This should be + * used in favor of those methods because the SDK should support both methods of configuration. + * + * {@link System#getProperty(String)} takes precedent over {@link System#getenv(String)} if both are specified. + *

+ * Similar to {@link #getStringValue()}, but does not fall back to the default value. + * + * @return The requested setting, or {@link Optional#empty()} if the values were not set, or the security manager did not + * allow reading the setting. + */ + default Optional getNonDefaultStringValue() { + return SystemSettingUtils.resolveNonDefaultSetting(this); + } + /** * Load the requested system setting as per the documentation in {@link #getStringValue()}, throwing an exception if the value * was not set and had no default. diff --git a/utils/src/main/java/software/amazon/awssdk/utils/internal/SystemSettingUtils.java b/utils/src/main/java/software/amazon/awssdk/utils/internal/SystemSettingUtils.java index 5efc17d8c72d..7b13ada07f33 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/internal/SystemSettingUtils.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/internal/SystemSettingUtils.java @@ -46,6 +46,21 @@ public static Optional resolveSetting(SystemSetting setting) { .map(String::trim); } + /** + * Resolve the value of this system setting, loading it from the System by checking: + *

    + *
  1. The system properties.
  2. + *
  3. The environment variables.
  4. + *
+ *

+ * This is similar to {@link #resolveSetting(SystemSetting)} but does not fall back to the default value if neither + * the environment variable or system property value are present. + */ + public static Optional resolveNonDefaultSetting(SystemSetting setting) { + return firstPresent(resolveProperty(setting), () -> resolveEnvironmentVariable(setting)) + .map(String::trim); + } + /** * Attempt to load this setting from the system properties. */ diff --git a/utils/src/test/java/software/amazon/awssdk/utils/SystemSettingTest.java b/utils/src/test/java/software/amazon/awssdk/utils/SystemSettingTest.java new file mode 100644 index 000000000000..0b3b413c0367 --- /dev/null +++ b/utils/src/test/java/software/amazon/awssdk/utils/SystemSettingTest.java @@ -0,0 +1,56 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils; + +import static org.assertj.core.api.Java6Assertions.assertThat; +import org.junit.Test; + +public class SystemSettingTest { + + @Test + public void getNonDefaultStringValue_doesNotReturnDefaultValue() { + TestSystemSetting setting = new TestSystemSetting("prop", "env", "default"); + + assertThat(setting.getNonDefaultStringValue().isPresent()).isFalse(); + } + + private static class TestSystemSetting implements SystemSetting { + private final String property; + private final String environmentVariable; + private final String defaultValue; + + public TestSystemSetting(String property, String environmentVariable, String defaultValue) { + this.property = property; + this.environmentVariable = environmentVariable; + this.defaultValue = defaultValue; + } + + @Override + public String property() { + return property; + } + + @Override + public String environmentVariable() { + return environmentVariable; + } + + @Override + public String defaultValue() { + return defaultValue; + } + } +} diff --git a/utils/src/test/java/software/amazon/awssdk/utils/internal/SystemSettingUtilsTest.java b/utils/src/test/java/software/amazon/awssdk/utils/internal/SystemSettingUtilsTest.java new file mode 100644 index 000000000000..435b3ee157aa --- /dev/null +++ b/utils/src/test/java/software/amazon/awssdk/utils/internal/SystemSettingUtilsTest.java @@ -0,0 +1,57 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils.internal; + +import static org.assertj.core.api.Java6Assertions.assertThat; +import org.junit.Test; +import software.amazon.awssdk.utils.SystemSetting; + +public class SystemSettingUtilsTest { + + @Test + public void resolveNonDefaultSetting_doesNotReturnDefaultValue() { + TestSystemSetting setting = new TestSystemSetting("prop", "env", "default"); + + assertThat(SystemSettingUtils.resolveNonDefaultSetting(setting).isPresent()).isFalse(); + } + + private static class TestSystemSetting implements SystemSetting { + private final String property; + private final String environmentVariable; + private final String defaultValue; + + public TestSystemSetting(String property, String environmentVariable, String defaultValue) { + this.property = property; + this.environmentVariable = environmentVariable; + this.defaultValue = defaultValue; + } + + @Override + public String property() { + return property; + } + + @Override + public String environmentVariable() { + return environmentVariable; + } + + @Override + public String defaultValue() { + return defaultValue; + } + } +}