diff --git a/.changes/2.14.0.json b/.changes/2.14.0.json
new file mode 100644
index 000000000000..2225d74e19eb
--- /dev/null
+++ b/.changes/2.14.0.json
@@ -0,0 +1,51 @@
+{
+ "version": "2.14.0",
+ "date": "2020-08-17",
+ "entries": [
+ {
+ "type": "feature",
+ "category": "Amazon EC2 Container Registry",
+ "description": "This feature adds support for pushing and pulling Open Container Initiative (OCI) artifacts."
+ },
+ {
+ "type": "feature",
+ "category": "AWS SDK for Java v2",
+ "description": "The client-side metrics feature is out of developer preview and is now generaly available."
+ },
+ {
+ "type": "feature",
+ "category": "Amazon Kinesis",
+ "description": "Introducing ShardFilter for ListShards API to filter the shards using a position in the stream, and ChildShards support for GetRecords and SubscribeToShard API to discover children shards on shard end"
+ },
+ {
+ "type": "feature",
+ "category": "AWS RoboMaker",
+ "description": "This release introduces RoboMaker Simulation WorldForge, a capability that automatically generates one or more simulation worlds."
+ },
+ {
+ "type": "feature",
+ "category": "AWS Certificate Manager Private Certificate Authority",
+ "description": "ACM Private CA is launching cross-account support. This allows customers to share their private CAs with other accounts, AWS Organizations, and organizational units to issue end-entity certificates."
+ },
+ {
+ "type": "feature",
+ "category": "Elastic Load Balancing",
+ "description": "Adds support for HTTP Desync Mitigation in Application Load Balancers."
+ },
+ {
+ "type": "feature",
+ "category": "Elastic Load Balancing",
+ "description": "Adds support for HTTP Desync Mitigation in Classic Load Balancers."
+ },
+ {
+ "type": "feature",
+ "category": "AWS Certificate Manager",
+ "description": "ACM provides support for the new Private CA feature Cross-account CA sharing. ACM users can issue certificates signed by a private CA belonging to another account where the CA was shared with them."
+ },
+ {
+ "type": "feature",
+ "category": "Amazon QuickSight",
+ "description": "Amazon QuickSight now supports programmatic creation and management of analyses with new APIs."
+ }
+ ]
+}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b83edc94fc90..d0a2e5137001 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,37 @@
+# __2.14.0__ __2020-08-17__
+## __AWS Certificate Manager__
+ - ### Features
+ - ACM provides support for the new Private CA feature Cross-account CA sharing. ACM users can issue certificates signed by a private CA belonging to another account where the CA was shared with them.
+
+## __AWS Certificate Manager Private Certificate Authority__
+ - ### Features
+ - ACM Private CA is launching cross-account support. This allows customers to share their private CAs with other accounts, AWS Organizations, and organizational units to issue end-entity certificates.
+
+## __AWS RoboMaker__
+ - ### Features
+ - This release introduces RoboMaker Simulation WorldForge, a capability that automatically generates one or more simulation worlds.
+
+## __AWS SDK for Java v2__
+ - ### Features
+ - The client-side metrics feature is out of developer preview and is now generaly available.
+
+## __Amazon EC2 Container Registry__
+ - ### Features
+ - This feature adds support for pushing and pulling Open Container Initiative (OCI) artifacts.
+
+## __Amazon Kinesis__
+ - ### Features
+ - Introducing ShardFilter for ListShards API to filter the shards using a position in the stream, and ChildShards support for GetRecords and SubscribeToShard API to discover children shards on shard end
+
+## __Amazon QuickSight__
+ - ### Features
+ - Amazon QuickSight now supports programmatic creation and management of analyses with new APIs.
+
+## __Elastic Load Balancing__
+ - ### Features
+ - Adds support for HTTP Desync Mitigation in Application Load Balancers.
+ - Adds support for HTTP Desync Mitigation in Classic Load Balancers.
+
# __2.13.76__ __2020-08-14__
## __AWS License Manager__
- ### Features
diff --git a/README.md b/README.md
index c5c532c75add..4954bdad3647 100644
--- a/README.md
+++ b/README.md
@@ -49,7 +49,7 @@ To automatically manage module versions (currently all modules have the same ver
* A metric can be tagged with multiple categories. Clients can enable/disable metric collection
* at a {@link MetricCategory} level.
- *
- * NOTE: This is a Preview API and is subject to change so it should not be used in production.
*/
-@SdkPreviewApi
@SdkPublicApi
public enum MetricCategory {
/**
diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollection.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollection.java
index cfd5ba23bf0f..5eb4a031de98 100644
--- a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollection.java
+++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollection.java
@@ -19,15 +19,11 @@
import java.util.List;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
-import software.amazon.awssdk.annotations.SdkPreviewApi;
import software.amazon.awssdk.annotations.SdkPublicApi;
/**
* An immutable collection of metrics.
- *
- * NOTE: This is a Preview API and is subject to change so it should not be used in production.
*/
-@SdkPreviewApi
@SdkPublicApi
public interface MetricCollection extends Iterable
* The SDK may invoke methods on the interface from multiple threads
* concurrently so implementations must be threadsafe.
- *
- * NOTE: This is a Preview API and is subject to change so it should not be used in production.
*/
-@SdkPreviewApi
@ThreadSafe
@SdkPublicApi
public interface MetricPublisher extends SdkAutoCloseable {
diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricRecord.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricRecord.java
index 44ee098da41d..2ec0cbcb5db2 100644
--- a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricRecord.java
+++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricRecord.java
@@ -15,15 +15,11 @@
package software.amazon.awssdk.metrics;
-import software.amazon.awssdk.annotations.SdkPreviewApi;
import software.amazon.awssdk.annotations.SdkPublicApi;
/**
* A container associating a metric and its value.
- *
- * NOTE: This is a Preview API and is subject to change so it should not be used in production.
*/
-@SdkPreviewApi
@SdkPublicApi
public interface MetricRecord Warning: Make sure the {@link #close()} this publisher when it is done being used to release all resources it
* consumes. Failure to do so will result in possible thread or file descriptor leaks.
- *
- * NOTE: This is a Preview API and is subject to change so it should not be used in production.
*/
-@SdkPreviewApi
@ThreadSafe
@Immutable
@SdkPublicApi
diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/package-info.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/package-info.java
deleted file mode 100644
index 4c7360a33f87..000000000000
--- a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://aws.amazon.com/apache2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-@SdkPreviewApi
-package software.amazon.awssdk.metrics.publishers.cloudwatch;
-
-import software.amazon.awssdk.annotations.SdkPreviewApi;
\ No newline at end of file
diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml
index 6f83194aeeb8..90929bf90b39 100644
--- a/metric-publishers/pom.xml
+++ b/metric-publishers/pom.xml
@@ -17,7 +17,7 @@
Creates a root or subordinate private certificate authority (CA). You must specify the CA configuration, the certificate revocation list (CRL) configuration, the CA type, and an optional idempotency token to avoid accidental creation of multiple CAs. The CA configuration specifies the name of the algorithm and key size to be used to create the CA private key, the type of signing algorithm that the CA uses, and X.500 subject information. The CRL configuration specifies the CRL expiration period in days (the validity period of the CRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3 bucket that is included in certificates issued by the CA. If successful, this action returns the Amazon Resource Name (ARN) of the CA. Creates a root or subordinate private certificate authority (CA). You must specify the CA configuration, the certificate revocation list (CRL) configuration, the CA type, and an optional idempotency token to avoid accidental creation of multiple CAs. The CA configuration specifies the name of the algorithm and key size to be used to create the CA private key, the type of signing algorithm that the CA uses, and X.500 subject information. The CRL configuration specifies the CRL expiration period in days (the validity period of the CRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3 bucket that is included in certificates issued by the CA. If successful, this action returns the Amazon Resource Name (ARN) of the CA. ACM Private CAA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your CRLs. Both PCA and the IAM principal must have permission to write to the S3 bucket that you specify. If the IAM principal making the call does not have permission to write to the bucket, then an exception is thrown. For more information, see Configure Access to ACM Private CA. Creates an audit report that lists every time that your CA private key is used. The report is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate actions use the private key. Creates an audit report that lists every time that your CA private key is used. The report is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate actions use the private key. Both PCA and the IAM principal must have permission to write to the S3 bucket that you specify. If the IAM principal making the call does not have permission to write to the bucket, then an exception is thrown. For more information, see Configure Access to ACM Private CA. ACM Private CAA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your Audit Reports. Assigns permissions from a private CA to a designated AWS service. Services are specified by their service principals and can be given permission to create and retrieve certificates on a private CA. Services can also be given permission to list the active permissions that the private CA has granted. For ACM to automatically renew your private CA's certificates, you must assign all possible permissions from the CA to the ACM service principal. At this time, you can only assign permissions to ACM ( Grants one or more permissions on a private CA to the AWS Certificate Manager (ACM) service principal ( You can list current permissions with the ListPermissions action and revoke them with the DeletePermission action. About Permissions If the private CA and the certificates it issues reside in the same account, you can use For automatic certificate renewal to succeed, the ACM service principal needs permissions to create, retrieve, and list certificates. If the private CA and the ACM certificates reside in different accounts, then permissions cannot be used to enable automatic renewals. Instead, the ACM certificate owner must set up a resource-based policy to enable cross-account issuance and renewals. For more information, see Using a Resource Based Policy with ACM Private CA. Deletes a private certificate authority (CA). You must provide the Amazon Resource Name (ARN) of the private CA that you want to delete. You can find the ARN by calling the ListCertificateAuthorities action. Deleting a CA will invalidate other CAs and certificates below it in your CA hierarchy. Before you can delete a CA that you have created and activated, you must disable it. To do this, call the UpdateCertificateAuthority action and set the CertificateAuthorityStatus parameter to Additionally, you can delete a CA if you are waiting for it to be created (that is, the status of the CA is When you successfully call DeleteCertificateAuthority, the CA's status changes to Deletes a private certificate authority (CA). You must provide the Amazon Resource Name (ARN) of the private CA that you want to delete. You can find the ARN by calling the ListCertificateAuthorities action. Deleting a CA will invalidate other CAs and certificates below it in your CA hierarchy. Before you can delete a CA that you have created and activated, you must disable it. To do this, call the UpdateCertificateAuthority action and set the CertificateAuthorityStatus parameter to Additionally, you can delete a CA if you are waiting for it to be created (that is, the status of the CA is When you successfully call DeleteCertificateAuthority, the CA's status changes to Revokes permissions that a private CA assigned to a designated AWS service. Permissions can be created with the CreatePermission action and listed with the ListPermissions action. Revokes permissions on a private CA granted to the AWS Certificate Manager (ACM) service principal (acm.amazonaws.com). These permissions allow ACM to issue and renew ACM certificates that reside in the same AWS account as the CA. If you revoke these permissions, ACM will no longer renew the affected certificates automatically. Permissions can be granted with the CreatePermission action and listed with the ListPermissions action. About Permissions If the private CA and the certificates it issues reside in the same account, you can use For automatic certificate renewal to succeed, the ACM service principal needs permissions to create, retrieve, and list certificates. If the private CA and the ACM certificates reside in different accounts, then permissions cannot be used to enable automatic renewals. Instead, the ACM certificate owner must set up a resource-based policy to enable cross-account issuance and renewals. For more information, see Using a Resource Based Policy with ACM Private CA. Deletes the resource-based policy attached to a private CA. Deletion will remove any access that the policy has granted. If there is no policy attached to the private CA, this action will return successful. If you delete a policy that was applied through AWS Resource Access Manager (RAM), the CA will be removed from all shares in which it was included. The AWS Certificate Manager Service Linked Role that the policy supports is not affected when you delete the policy. The current policy can be shown with GetPolicy and updated with PutPolicy. About Policies A policy grants access on a private CA to an AWS customer account, to AWS Organizations, or to an AWS Organizations unit. Policies are under the control of a CA administrator. For more information, see Using a Resource Based Policy with ACM Private CA. A policy permits a user of AWS Certificate Manager (ACM) to issue ACM certificates signed by a CA in another account. For ACM to manage automatic renewal of these certificates, the ACM user must configure a Service Linked Role (SLR). The SLR allows the ACM service to assume the identity of the user, subject to confirmation against the ACM Private CA policy. For more information, see Using a Service Linked Role with ACM. Updates made in AWS Resource Manager (RAM) are reflected in policies. For more information, see Using AWS Resource Access Manager (RAM) with ACM Private CA. Lists information about your private certificate authority (CA). You specify the private CA on input by its ARN (Amazon Resource Name). The output contains the status of your CA. This can be any of the following: Lists information about your private certificate authority (CA) or one that has been shared with you. You specify the private CA on input by its ARN (Amazon Resource Name). The output contains the status of your CA. This can be any of the following: Lists information about a specific audit report created by calling the CreateCertificateAuthorityAuditReport action. Audit information is created every time the certificate authority (CA) private key is used. The private key is used when you call the IssueCertificate action or the RevokeCertificate action. Lists information about a specific audit report created by calling the CreateCertificateAuthorityAuditReport action. Audit information is created every time the certificate authority (CA) private key is used. The private key is used when you call the IssueCertificate action or the RevokeCertificate action. Retrieves a certificate from your private CA. The ARN of the certificate is returned when you call the IssueCertificate action. You must specify both the ARN of your private CA and the ARN of the issued certificate when calling the GetCertificate action. You can retrieve the certificate if it is in the ISSUED state. You can call the CreateCertificateAuthorityAuditReport action to create a report that contains information about all of the certificates issued and revoked by your private CA. Retrieves a certificate from your private CA or one that has been shared with you. The ARN of the certificate is returned when you call the IssueCertificate action. You must specify both the ARN of your private CA and the ARN of the issued certificate when calling the GetCertificate action. You can retrieve the certificate if it is in the ISSUED state. You can call the CreateCertificateAuthorityAuditReport action to create a report that contains information about all of the certificates issued and revoked by your private CA. Retrieves the certificate and certificate chain for your private certificate authority (CA). Both the certificate and the chain are base64 PEM-encoded. The chain does not include the CA certificate. Each certificate in the chain signs the one before it. Retrieves the certificate and certificate chain for your private certificate authority (CA) or one that has been shared with you. Both the certificate and the chain are base64 PEM-encoded. The chain does not include the CA certificate. Each certificate in the chain signs the one before it. Retrieves the certificate signing request (CSR) for your private certificate authority (CA). The CSR is created when you call the CreateCertificateAuthority action. Sign the CSR with your ACM Private CA-hosted or on-premises root or subordinate CA. Then import the signed certificate back into ACM Private CA by calling the ImportCertificateAuthorityCertificate action. The CSR is returned as a base64 PEM-encoded string. Retrieves the certificate signing request (CSR) for your private certificate authority (CA). The CSR is created when you call the CreateCertificateAuthority action. Sign the CSR with your ACM Private CA-hosted or on-premises root or subordinate CA. Then import the signed certificate back into ACM Private CA by calling the ImportCertificateAuthorityCertificate action. The CSR is returned as a base64 PEM-encoded string. Retrieves the resource-based policy attached to a private CA. If either the private CA resource or the policy cannot be found, this action returns a The policy can be attached or updated with PutPolicy and removed with DeletePolicy. About Policies A policy grants access on a private CA to an AWS customer account, to AWS Organizations, or to an AWS Organizations unit. Policies are under the control of a CA administrator. For more information, see Using a Resource Based Policy with ACM Private CA. A policy permits a user of AWS Certificate Manager (ACM) to issue ACM certificates signed by a CA in another account. For ACM to manage automatic renewal of these certificates, the ACM user must configure a Service Linked Role (SLR). The SLR allows the ACM service to assume the identity of the user, subject to confirmation against the ACM Private CA policy. For more information, see Using a Service Linked Role with ACM. Updates made in AWS Resource Manager (RAM) are reflected in policies. For more information, see Using AWS Resource Access Manager (RAM) with ACM Private CA. Imports a signed private CA certificate into ACM Private CA. This action is used when you are using a chain of trust whose root is located outside ACM Private CA. Before you can call this action, the following preparations must in place: In ACM Private CA, call the CreateCertificateAuthority action to create the private CA that that you plan to back with the imported certificate. Call the GetCertificateAuthorityCsr action to generate a certificate signing request (CSR). Sign the CSR using a root or intermediate CA hosted either by an on-premises PKI hierarchy or a commercial CA.. Create a certificate chain and copy the signed certificate and the certificate chain to your working directory. The following requirements apply when you import a CA certificate. You cannot import a non-self-signed certificate for use as a root CA. You cannot import a self-signed certificate for use as a subordinate CA. Your certificate chain must not include the private CA certificate that you are importing. Your ACM Private CA-hosted or on-premises CA certificate must be the last certificate in your chain. The subordinate certificate, if any, that your root CA signed must be next to last. The subordinate certificate signed by the preceding subordinate CA must come next, and so on until your chain is built. The chain must be PEM-encoded. Imports a signed private CA certificate into ACM Private CA. This action is used when you are using a chain of trust whose root is located outside ACM Private CA. Before you can call this action, the following preparations must in place: In ACM Private CA, call the CreateCertificateAuthority action to create the private CA that that you plan to back with the imported certificate. Call the GetCertificateAuthorityCsr action to generate a certificate signing request (CSR). Sign the CSR using a root or intermediate CA hosted by either an on-premises PKI hierarchy or by a commercial CA. Create a certificate chain and copy the signed certificate and the certificate chain to your working directory. The following requirements apply when you import a CA certificate. You cannot import a non-self-signed certificate for use as a root CA. You cannot import a self-signed certificate for use as a subordinate CA. Your certificate chain must not include the private CA certificate that you are importing. Your ACM Private CA-hosted or on-premises CA certificate must be the last certificate in your chain. The subordinate certificate, if any, that your root CA signed must be next to last. The subordinate certificate signed by the preceding subordinate CA must come next, and so on until your chain is built. The chain must be PEM-encoded. The maximum allowed size of a certificate is 32 KB. The maximum allowed size of a certificate chain is 2 MB. Enforcement of Critical Constraints ACM Private CA allows the following extensions to be marked critical in the imported CA certificate or chain. Basic constraints (must be marked critical) Subject alternative names Key usage Extended key usage Authority key identifier Subject key identifier Issuer alternative name Subject directory attributes Subject information access Certificate policies Policy mappings Inhibit anyPolicy ACM Private CA rejects the following extensions when they are marked critical in an imported CA certificate or chain. Name constraints Policy constraints CRL distribution points Authority information access Freshest CRL Any other extension Uses your private certificate authority (CA) to issue a client certificate. This action returns the Amazon Resource Name (ARN) of the certificate. You can retrieve the certificate by calling the GetCertificate action and specifying the ARN. You cannot use the ACM ListCertificateAuthorities action to retrieve the ARNs of the certificates that you issue by using ACM Private CA. Uses your private certificate authority (CA), or one that has been shared with you, to issue a client certificate. This action returns the Amazon Resource Name (ARN) of the certificate. You can retrieve the certificate by calling the GetCertificate action and specifying the ARN. You cannot use the ACM ListCertificateAuthorities action to retrieve the ARNs of the certificates that you issue by using ACM Private CA. Lists the private certificate authorities that you created by using the CreateCertificateAuthority action. Lists the private certificate authorities that you created by using the CreateCertificateAuthority action. Lists all the permissions, if any, that have been assigned by a private CA. Permissions can be granted with the CreatePermission action and revoked with the DeletePermission action. List all permissions on a private CA, if any, granted to the AWS Certificate Manager (ACM) service principal (acm.amazonaws.com). These permissions allow ACM to issue and renew ACM certificates that reside in the same AWS account as the CA. Permissions can be granted with the CreatePermission action and revoked with the DeletePermission action. About Permissions If the private CA and the certificates it issues reside in the same account, you can use For automatic certificate renewal to succeed, the ACM service principal needs permissions to create, retrieve, and list certificates. If the private CA and the ACM certificates reside in different accounts, then permissions cannot be used to enable automatic renewals. Instead, the ACM certificate owner must set up a resource-based policy to enable cross-account issuance and renewals. For more information, see Using a Resource Based Policy with ACM Private CA. Lists the tags, if any, that are associated with your private CA. Tags are labels that you can use to identify and organize your CAs. Each tag consists of a key and an optional value. Call the TagCertificateAuthority action to add one or more tags to your CA. Call the UntagCertificateAuthority action to remove tags. Lists the tags, if any, that are associated with your private CA or one that has been shared with you. Tags are labels that you can use to identify and organize your CAs. Each tag consists of a key and an optional value. Call the TagCertificateAuthority action to add one or more tags to your CA. Call the UntagCertificateAuthority action to remove tags. Attaches a resource-based policy to a private CA. A policy can also be applied by sharing a private CA through AWS Resource Access Manager (RAM). The policy can be displayed with GetPolicy and removed with DeletePolicy. About Policies A policy grants access on a private CA to an AWS customer account, to AWS Organizations, or to an AWS Organizations unit. Policies are under the control of a CA administrator. For more information, see Using a Resource Based Policy with ACM Private CA. A policy permits a user of AWS Certificate Manager (ACM) to issue ACM certificates signed by a CA in another account. For ACM to manage automatic renewal of these certificates, the ACM user must configure a Service Linked Role (SLR). The SLR allows the ACM service to assume the identity of the user, subject to confirmation against the ACM Private CA policy. For more information, see Using a Service Linked Role with ACM. Updates made in AWS Resource Manager (RAM) are reflected in policies. For more information, see Using AWS Resource Access Manager (RAM) with ACM Private CA. Restores a certificate authority (CA) that is in the Restores a certificate authority (CA) that is in the Revokes a certificate that was issued inside ACM Private CA. If you enable a certificate revocation list (CRL) when you create or update your private CA, information about the revoked certificates will be included in the CRL. ACM Private CA writes the CRL to an S3 bucket that you specify. For more information about revocation, see the CrlConfiguration structure. ACM Private CA also writes revocation information to the audit report. For more information, see CreateCertificateAuthorityAuditReport. You cannot revoke a root CA self-signed certificate. Revokes a certificate that was issued inside ACM Private CA. If you enable a certificate revocation list (CRL) when you create or update your private CA, information about the revoked certificates will be included in the CRL. ACM Private CA writes the CRL to an S3 bucket that you specify. A CRL is typically updated approximately 30 minutes after a certificate is revoked. If for any reason the CRL update fails, ACM Private CA attempts makes further attempts every 15 minutes. With Amazon CloudWatch, you can create alarms for the metrics Both PCA and the IAM principal must have permission to write to the S3 bucket that you specify. If the IAM principal making the call does not have permission to write to the bucket, then an exception is thrown. For more information, see Configure Access to ACM Private CA. ACM Private CA also writes revocation information to the audit report. For more information, see CreateCertificateAuthorityAuditReport. You cannot revoke a root CA self-signed certificate. Adds one or more tags to your private CA. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key and an optional value. You specify the private CA on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair. You can apply a tag to just one private CA if you want to identify a specific characteristic of that CA, or you can apply the same tag to multiple private CAs if you want to filter for a common relationship among those CAs. To remove one or more tags, use the UntagCertificateAuthority action. Call the ListTags action to see what tags are associated with your CA. Adds one or more tags to your private CA. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key and an optional value. You specify the private CA on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair. You can apply a tag to just one private CA if you want to identify a specific characteristic of that CA, or you can apply the same tag to multiple private CAs if you want to filter for a common relationship among those CAs. To remove one or more tags, use the UntagCertificateAuthority action. Call the ListTags action to see what tags are associated with your CA. Remove one or more tags from your private CA. A tag consists of a key-value pair. If you do not specify the value portion of the tag when calling this action, the tag will be removed regardless of value. If you specify a value, the tag is removed only if it is associated with the specified value. To add tags to a private CA, use the TagCertificateAuthority. Call the ListTags action to see what tags are associated with your CA. Remove one or more tags from your private CA. A tag consists of a key-value pair. If you do not specify the value portion of the tag when calling this action, the tag will be removed regardless of value. If you specify a value, the tag is removed only if it is associated with the specified value. To add tags to a private CA, use the TagCertificateAuthority. Call the ListTags action to see what tags are associated with your CA. Updates the status or configuration of a private certificate authority (CA). Your private CA must be in the Updates the status or configuration of a private certificate authority (CA). Your private CA must be in the Both PCA and the IAM principal must have permission to write to the S3 bucket that you specify. If the IAM principal making the call does not have permission to write to the bucket, then an exception is thrown. For more information, see Configure Access to ACM Private CA. A subdivision or unit of the organization (such as sales or finance) with which the certificate subject is affiliated. Disambiguating information for the certificate subject. Fully qualified domain name (FQDN) associated with the certificate subject. The certificate serial number. Contains information about the certificate subject. The certificate can be one issued by your private certificate authority (CA) or it can be your private CA certificate. The Subject field in the certificate identifies the entity that owns or controls the public key in the certificate. The entity can be a user, computer, device, or service. The Subject must contain an X.500 distinguished name (DN). A DN is a sequence of relative distinguished names (RDNs). The RDNs are separated by commas in the certificate. The DN must be unique for each entity, but your private CA can issue more than one certificate with the same DN to the same entity. Amazon Resource Name (ARN) for your private certificate authority (CA). The format is The AWS account ID that owns the certificate authority. Date and time at which your private CA was created. The period during which a deleted CA can be restored. For more information, see the The period during which a deleted CA can be restored. For more information, see the Contains information about your private certificate authority (CA). Your private CA can issue and revoke X.509 digital certificates. Digital certificates verify that the entity named in the certificate Subject field owns or controls the public key contained in the Subject Public Key Info field. Call the CreateCertificateAuthority action to create your private CA. You must then call the GetCertificateAuthorityCertificate action to retrieve a private CA certificate signing request (CSR). Sign the CSR with your ACM Private CA-hosted or on-premises root or subordinate CA certificate. Call the ImportCertificateAuthorityCertificate action to import the signed certificate into AWS Certificate Manager (ACM). Contains information about your private certificate authority (CA). Your private CA can issue and revoke X.509 digital certificates. Digital certificates verify that the entity named in the certificate Subject field owns or controls the public key contained in the Subject Public Key Info field. Call the CreateCertificateAuthority action to create your private CA. You must then call the GetCertificateAuthorityCertificate action to retrieve a private CA certificate signing request (CSR). Sign the CSR with your ACM Private CA-hosted or on-premises root or subordinate CA certificate. Call the ImportCertificateAuthorityCertificate action to import the signed certificate into AWS Certificate Manager (ACM). Name of the algorithm your private CA uses to sign certificate requests. Name of the algorithm your private CA uses to sign certificate requests. This parameter should not be confused with the Structure that contains X.500 distinguished name information for your private CA. Contains configuration information for your private certificate authority (CA). This includes information about the class of public key algorithm and the key pair that your private CA creates when it issues a certificate. It also includes the signature algorithm that it uses when issuing certificates, and its X.500 distinguished name. You must specify this information when you call the CreateCertificateAuthority action. Contains configuration information for your private certificate authority (CA). This includes information about the class of public key algorithm and the key pair that your private CA creates when it issues a certificate. It also includes the signature algorithm that it uses when issuing certificates, and its X.500 distinguished name. You must specify this information when you call the CreateCertificateAuthority action. The Amazon Resource Name (ARN) of the CA to be audited. This is of the form: The name of the S3 bucket that will contain the audit report. An alphanumeric string that contains a report identifier. The key that uniquely identifies the report file in your S3 bucket. Contains a Boolean value that you can use to enable a certification revocation list (CRL) for the CA, the name of the S3 bucket to which ACM Private CA will write the CRL, and an optional CNAME alias that you can use to hide the name of your bucket in the CRL Distribution Points extension of your CA certificate. For more information, see the CrlConfiguration structure. Contains a Boolean value that you can use to enable a certification revocation list (CRL) for the CA, the name of the S3 bucket to which ACM Private CA will write the CRL, and an optional CNAME alias that you can use to hide the name of your bucket in the CRL Distribution Points extension of your CA certificate. For more information, see the CrlConfiguration structure. Alphanumeric string that can be used to distinguish between calls to CreateCertificateAuthority. Idempotency tokens time out after five minutes. Therefore, if you call CreateCertificateAuthority multiple times with the same idempotency token within a five minute period, ACM Private CA recognizes that you are requesting only one certificate. As a result, ACM Private CA issues only one. If you change the idempotency token for each call, however, ACM Private CA recognizes that you are requesting multiple certificates. Alphanumeric string that can be used to distinguish between calls to CreateCertificateAuthority. For a given token, ACM Private CA creates exactly one CA. If you issue a subsequent call using the same token, ACM Private CA returns the ARN of the existing CA and takes no further action. If you change the idempotency token across multiple calls, ACM Private CA creates a unique CA for each unique token. Key-value pairs that will be attached to the new private CA. You can associate up to 50 tags with a private CA. For information using tags with IAM to manage permissions, see Controlling Access Using IAM Tags. Key-value pairs that will be attached to the new private CA. You can associate up to 50 tags with a private CA. For information using tags with IAM to manage permissions, see Controlling Access Using IAM Tags. The Amazon Resource Name (ARN) of the CA that grants the permissions. You can find the ARN by calling the ListCertificateAuthorities action. This must have the following form: The Amazon Resource Name (ARN) of the CA that grants the permissions. You can find the ARN by calling the ListCertificateAuthorities action. This must have the following form: Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. You can use this value to enable certificate revocation for a new CA when you call the CreateCertificateAuthority action or for an existing CA when you call the UpdateCertificateAuthority action. Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. You can use this value to enable certificate revocation for a new CA when you call the CreateCertificateAuthority action or for an existing CA when you call the UpdateCertificateAuthority action. Name of the S3 bucket that contains the CRL. If you do not provide a value for the CustomCname argument, the name of your S3 bucket is placed into the CRL Distribution Points extension of the issued certificate. You can change the name of your bucket by calling the UpdateCertificateAuthority action. You must specify a bucket policy that allows ACM Private CA to write the CRL to your bucket. Name of the S3 bucket that contains the CRL. If you do not provide a value for the CustomCname argument, the name of your S3 bucket is placed into the CRL Distribution Points extension of the issued certificate. You can change the name of your bucket by calling the UpdateCertificateAuthority action. You must specify a bucket policy that allows ACM Private CA to write the CRL to your bucket. Contains configuration information for a certificate revocation list (CRL). Your private certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You can enable CRLs for your new or an existing private CA by setting the Enabled parameter to Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed at 1/2 the age of next update or when a certificate is revoked. When a certificate is revoked, it is recorded in the next CRL that is generated and in the next audit report. Only time valid certificates are listed in the CRL. Expired certificates are not included. CRLs contain the following fields: Version: The current version number defined in RFC 5280 is V2. The integer value is 0x1. Signature Algorithm: The name of the algorithm used to sign the CRL. Issuer: The X.500 distinguished name of your private CA that issued the CRL. Last Update: The issue date and time of this CRL. Next Update: The day and time by which the next CRL will be issued. Revoked Certificates: List of revoked certificates. Each list item contains the following information. Serial Number: The serial number, in hexadecimal format, of the revoked certificate. Revocation Date: Date and time the certificate was revoked. CRL Entry Extensions: Optional extensions for the CRL entry. X509v3 CRL Reason Code: Reason the certificate was revoked. CRL Extensions: Optional extensions for the CRL. X509v3 Authority Key Identifier: Identifies the public key associated with the private key used to sign the certificate. X509v3 CRL Number:: Decimal sequence number for the CRL. Signature Algorithm: Algorithm used by your private CA to sign the CRL. Signature Value: Signature computed over the CRL. Certificate revocation lists created by ACM Private CA are DER-encoded. You can use the following OpenSSL command to list a CRL. Contains configuration information for a certificate revocation list (CRL). Your private certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You can enable CRLs for your new or an existing private CA by setting the Enabled parameter to ACM Private CAA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your CRLs. Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed at 1/2 the age of next update or when a certificate is revoked. When a certificate is revoked, it is recorded in the next CRL that is generated and in the next audit report. Only time valid certificates are listed in the CRL. Expired certificates are not included. CRLs contain the following fields: Version: The current version number defined in RFC 5280 is V2. The integer value is 0x1. Signature Algorithm: The name of the algorithm used to sign the CRL. Issuer: The X.500 distinguished name of your private CA that issued the CRL. Last Update: The issue date and time of this CRL. Next Update: The day and time by which the next CRL will be issued. Revoked Certificates: List of revoked certificates. Each list item contains the following information. Serial Number: The serial number, in hexadecimal format, of the revoked certificate. Revocation Date: Date and time the certificate was revoked. CRL Entry Extensions: Optional extensions for the CRL entry. X509v3 CRL Reason Code: Reason the certificate was revoked. CRL Extensions: Optional extensions for the CRL. X509v3 Authority Key Identifier: Identifies the public key associated with the private key used to sign the certificate. X509v3 CRL Number:: Decimal sequence number for the CRL. Signature Algorithm: Algorithm used by your private CA to sign the CRL. Signature Value: Signature computed over the CRL. Certificate revocation lists created by ACM Private CA are DER-encoded. You can use the following OpenSSL command to list a CRL. The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must have the following form: The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must have the following form: The Amazon Resource Number (ARN) of the private CA that issued the permissions. You can find the CA's ARN by calling the ListCertificateAuthorities action. This must have the following form: The Amazon Resource Number (ARN) of the private CA that issued the permissions. You can find the CA's ARN by calling the ListCertificateAuthorities action. This must have the following form: The Amazon Resource Number (ARN) of the private CA that will have its policy deleted. You can find the CA's ARN by calling the ListCertificateAuthorities action. The ARN value must have the form The report ID returned by calling the CreateCertificateAuthorityAuditReport action. The report ID returned by calling the CreateCertificateAuthorityAuditReport action. Specifies whether report creation is in progress, has succeeded, or has failed. Name of the S3 bucket that contains the report. S3 key that uniquely identifies the report file in your S3 bucket. The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form: The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form: A CertificateAuthority structure that contains information about your private CA. A CertificateAuthority structure that contains information about your private CA. The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form: The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form: The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form: The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form: The Amazon Resource Number (ARN) of the private CA that will have its policy retrieved. You can find the CA's ARN by calling the ListCertificateAuthorities action. The policy attached to the private CA as a JSON document. The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form: The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form: The token specified in the The token specified in the The S3 bucket policy is not valid. The policy must give ACM Private CA rights to read from and write to the bucket and find the bucket location. The resource policy is invalid or is missing a required statement. For general information about IAM policy and statement structure, see Overview of JSON Policies. The private CA is in a state during which a report or certificate cannot be generated. The state of the private CA does not allow this action to occur. The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form: The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form: The certificate signing request (CSR) for the certificate you want to issue. You can use the following OpenSSL command to create the CSR and a 2048 bit RSA private key. If you have a configuration file, you can use the following OpenSSL command. The The certificate signing request (CSR) for the certificate you want to issue. You can use the following OpenSSL command to create the CSR and a 2048 bit RSA private key. If you have a configuration file, you can use the following OpenSSL command. The Note: A CSR must provide either a subject name or a subject alternative name or the request will be rejected. The name of the algorithm that will be used to sign the certificate to be issued. The name of the algorithm that will be used to sign the certificate to be issued. This parameter should not be confused with the Specifies a custom configuration template to use when issuing a certificate. If this parameter is not provided, ACM Private CA defaults to the The following service-owned arn:aws:acm-pca:::template/EndEntityCertificate/V1 arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen0/V1 arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen1/V1 arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen2/V1 arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen3/V1 arn:aws:acm-pca:::template/RootCACertificate/V1 For more information, see Using Templates. Specifies a custom configuration template to use when issuing a certificate. If this parameter is not provided, ACM Private CA defaults to the Note: The CA depth configured on a subordinate CA certificate must not exceed the limit set by its parents in the CA hierarchy. The following service-owned arn:aws:acm-pca:::template/CodeSigningCertificate/V1 arn:aws:acm-pca:::template/CodeSigningCertificate_CSRPassthrough/V1 arn:aws:acm-pca:::template/EndEntityCertificate/V1 arn:aws:acm-pca:::template/EndEntityCertificate_CSRPassthrough/V1 arn:aws:acm-pca:::template/EndEntityClientAuthCertificate/V1 arn:aws:acm-pca:::template/EndEntityClientAuthCertificate_CSRPassthrough/V1 arn:aws:acm-pca:::template/EndEntityServerAuthCertificate/V1 arn:aws:acm-pca:::template/EndEntityServerAuthCertificate_CSRPassthrough/V1 arn:aws:acm-pca:::template/OCSPSigningCertificate/V1 arn:aws:acm-pca:::template/OCSPSigningCertificate_CSRPassthrough/V1 arn:aws:acm-pca:::template/RootCACertificate/V1 arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen0/V1 arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen1/V1 arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen2/V1 arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen3/V1 For more information, see Using Templates. The type of the validity period. Information describing the validity period of the certificate. When issuing a certificate, ACM Private CA sets the \"Not Before\" date in the validity field to date and time minus 60 minutes. This is intended to compensate for time inconsistencies across systems of 60 minutes or less. The validity period configured on a certificate must not exceed the limit set by its parents in the CA hierarchy. An ACM Private CA limit has been exceeded. See the exception message returned to determine the limit that was exceeded. An ACM Private CA quota has been exceeded. See the exception message returned to determine the quota that was exceeded. Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the Use this parameter to filter the returned set of certificate authorities based on their owner. The default is SELF. The Amazon Resource Number (ARN) of the private CA to inspect. You can find the ARN by calling the ListCertificateAuthorities action. This must be of the form: The Amazon Resource Number (ARN) of the private CA to inspect. You can find the ARN by calling the ListCertificateAuthorities action. This must be of the form: The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form: The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form: The current action was prevented because it would lock the caller out from performing subsequent actions. Verify that the specified parameters would not result in the caller being denied access to the resource. The time at which the permission was created. The AWS service or entity that holds the permission. At this time, the only valid principal is The ID of the account that assigned the permission. The private CA actions that can be performed by the designated AWS service. The name of the policy that is associated with the permission. Permissions designate which private CA actions can be performed by an AWS service or entity. In order for ACM to automatically renew private certificates, you must give the ACM service principal all available permissions ( Permissions designate which private CA actions can be performed by an AWS service or entity. In order for ACM to automatically renew private certificates, you must give the ACM service principal all available permissions ( The Amazon Resource Number (ARN) of the private CA to associate with the policy. The ARN of the CA can be found by calling the ListCertificateAuthorities action. The path and filename of a JSON-formatted IAM policy to attach to the specified private CA resource. If this policy does not contain all required statements or if it includes any statement that is not allowed, the A resource such as a private CA, S3 bucket, certificate, or audit report cannot be found. A resource such as a private CA, S3 bucket, certificate, audit report, or policy cannot be found. The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form: The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority action. This must be of the form: Configuration of the certificate revocation list (CRL), if any, maintained by your private CA. Certificate revocation information used by the CreateCertificateAuthority and UpdateCertificateAuthority actions. Your private certificate authority (CA) can create and maintain a certificate revocation list (CRL). A CRL contains information about certificates revoked by your CA. For more information, see RevokeCertificate. Certificate revocation information used by the CreateCertificateAuthority and UpdateCertificateAuthority actions. Your private certificate authority (CA) can create and maintain a certificate revocation list (CRL). A CRL contains information about certificates revoked by your CA. For more information, see RevokeCertificate. Serial number of the certificate to be revoked. This must be in hexadecimal format. You can retrieve the serial number by calling GetCertificate with the Amazon Resource Name (ARN) of the certificate you want and the ARN of your private CA. The GetCertificate action retrieves the certificate in the PEM format. You can use the following OpenSSL command to list the certificate in text format and copy the hexadecimal serial number. You can also copy the serial number from the console or use the DescribeCertificate action in the AWS Certificate Manager API Reference. Serial number of the certificate to be revoked. This must be in hexadecimal format. You can retrieve the serial number by calling GetCertificate with the Amazon Resource Name (ARN) of the certificate you want and the ARN of your private CA. The GetCertificate action retrieves the certificate in the PEM format. You can use the following OpenSSL command to list the certificate in text format and copy the hexadecimal serial number. You can also copy the serial number from the console or use the DescribeCertificate action in the AWS Certificate Manager API Reference. Value of the tag. Tags are labels that you can use to identify and organize your private CAs. Each tag consists of a key and an optional value. You can associate up to 50 tags with a private CA. To add one or more tags to a private CA, call the TagCertificateAuthority action. To remove a tag, call the UntagCertificateAuthority action. Tags are labels that you can use to identify and organize your private CAs. Each tag consists of a key and an optional value. You can associate up to 50 tags with a private CA. To add one or more tags to a private CA, call the TagCertificateAuthority action. To remove a tag, call the UntagCertificateAuthority action. The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form: The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form: The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form: The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form: Time period. A long integer interpreted according to the value of Specifies whether the Determines how ACM Private CA interprets the Sample input value: 491231235959 (UTCTime format) Output expiration date/time: 12/31/2049 23:59:59 Sample input value: 2524608000 Output expiration date/time: 01/01/2050 00:00:00 Example if Sample input value: 90 Output expiration date: 01/10/2020 12:34:54 UTC Length of time for which the certificate issued by your private certificate authority (CA), or by the private CA itself, is valid in days, months, or years. You can issue a certificate by calling the IssueCertificate action. Validity specifies the period of time during which a certificate is valid. Validity can be expressed as an explicit date and time when the certificate expires, or as a span of time after issuance, stated in days, months, or years. For more information, see Validity in RFC 5280. You can issue a certificate by calling the IssueCertificate action. This is the ACM Private CA API Reference. It provides descriptions, syntax, and usage examples for each of the actions and data types involved in creating and managing private certificate authorities (CA) for your organization. The documentation for each action shows the Query API request parameters and the XML response. Alternatively, you can use one of the AWS SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see AWS SDKs. Each ACM Private CA API action has a throttling limit which determines the number of times the action can be called per second. For more information, see API Rate Limits in ACM Private CA in the ACM Private CA user guide. This is the ACM Private CA API Reference. It provides descriptions, syntax, and usage examples for each of the actions and data types involved in creating and managing private certificate authorities (CA) for your organization. The documentation for each action shows the Query API request parameters and the XML response. Alternatively, you can use one of the AWS SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see AWS SDKs. Each ACM Private CA API action has a quota that determines the number of times the action can be called per second. For more information, see API Rate Quotas in ACM Private CA in the ACM Private CA user guide. The media type associated with the image manifest. The manifest media type of the image. An object representing an Amazon ECR image. A summary of the last completed image scan. The media type of the image manifest. The artifact media type of the image. An object that describes an image returned by a DescribeImages operation. Adds the specified tags to the specified load balancer. Each load balancer can have a maximum of 10 tags. Each tag consists of a key and an optional value. If a tag with the same key is already associated with the load balancer, For more information, see Tag Your Classic Load Balancer in the Classic Load Balancers Guide. Adds the specified tags to the specified load balancer. Each load balancer can have a maximum of 10 tags. Each tag consists of a key and an optional value. If a tag with the same key is already associated with the load balancer, For more information, see Tag Your Classic Load Balancer in the Classic Load Balancers Guide. Associates one or more security groups with your load balancer in a virtual private cloud (VPC). The specified security groups override the previously associated security groups. For more information, see Security Groups for Load Balancers in a VPC in the Classic Load Balancers Guide. Associates one or more security groups with your load balancer in a virtual private cloud (VPC). The specified security groups override the previously associated security groups. For more information, see Security Groups for Load Balancers in a VPC in the Classic Load Balancers Guide. Adds one or more subnets to the set of configured subnets for the specified load balancer. The load balancer evenly distributes requests across all registered subnets. For more information, see Add or Remove Subnets for Your Load Balancer in a VPC in the Classic Load Balancers Guide. Adds one or more subnets to the set of configured subnets for the specified load balancer. The load balancer evenly distributes requests across all registered subnets. For more information, see Add or Remove Subnets for Your Load Balancer in a VPC in the Classic Load Balancers Guide. Specifies the health check settings to use when evaluating the health state of your EC2 instances. For more information, see Configure Health Checks for Your Load Balancer in the Classic Load Balancers Guide. Specifies the health check settings to use when evaluating the health state of your EC2 instances. For more information, see Configure Health Checks for Your Load Balancer in the Classic Load Balancers Guide. Generates a stickiness policy with sticky session lifetimes that follow that of an application-generated cookie. This policy can be associated only with HTTP/HTTPS listeners. This policy is similar to the policy created by CreateLBCookieStickinessPolicy, except that the lifetime of the special Elastic Load Balancing cookie, If the application cookie is explicitly removed or expires, the session stops being sticky until a new application cookie is issued. For more information, see Application-Controlled Session Stickiness in the Classic Load Balancers Guide. Generates a stickiness policy with sticky session lifetimes that follow that of an application-generated cookie. This policy can be associated only with HTTP/HTTPS listeners. This policy is similar to the policy created by CreateLBCookieStickinessPolicy, except that the lifetime of the special Elastic Load Balancing cookie, If the application cookie is explicitly removed or expires, the session stops being sticky until a new application cookie is issued. For more information, see Application-Controlled Session Stickiness in the Classic Load Balancers Guide. Generates a stickiness policy with sticky session lifetimes controlled by the lifetime of the browser (user-agent) or a specified expiration period. This policy can be associated only with HTTP/HTTPS listeners. When a load balancer implements this policy, the load balancer uses a special cookie to track the instance for each request. When the load balancer receives a request, it first checks to see if this cookie is present in the request. If so, the load balancer sends the request to the application server specified in the cookie. If not, the load balancer sends the request to a server that is chosen based on the existing load-balancing algorithm. A cookie is inserted into the response for binding subsequent requests from the same user to that server. The validity of the cookie is based on the cookie expiration time, which is specified in the policy configuration. For more information, see Duration-Based Session Stickiness in the Classic Load Balancers Guide. Generates a stickiness policy with sticky session lifetimes controlled by the lifetime of the browser (user-agent) or a specified expiration period. This policy can be associated only with HTTP/HTTPS listeners. When a load balancer implements this policy, the load balancer uses a special cookie to track the instance for each request. When the load balancer receives a request, it first checks to see if this cookie is present in the request. If so, the load balancer sends the request to the application server specified in the cookie. If not, the load balancer sends the request to a server that is chosen based on the existing load-balancing algorithm. A cookie is inserted into the response for binding subsequent requests from the same user to that server. The validity of the cookie is based on the cookie expiration time, which is specified in the policy configuration. For more information, see Duration-Based Session Stickiness in the Classic Load Balancers Guide. Creates a Classic Load Balancer. You can add listeners, security groups, subnets, and tags when you create your load balancer, or you can add them later using CreateLoadBalancerListeners, ApplySecurityGroupsToLoadBalancer, AttachLoadBalancerToSubnets, and AddTags. To describe your current load balancers, see DescribeLoadBalancers. When you are finished with a load balancer, you can delete it using DeleteLoadBalancer. You can create up to 20 load balancers per region per account. You can request an increase for the number of load balancers for your account. For more information, see Limits for Your Classic Load Balancer in the Classic Load Balancers Guide. Creates a Classic Load Balancer. You can add listeners, security groups, subnets, and tags when you create your load balancer, or you can add them later using CreateLoadBalancerListeners, ApplySecurityGroupsToLoadBalancer, AttachLoadBalancerToSubnets, and AddTags. To describe your current load balancers, see DescribeLoadBalancers. When you are finished with a load balancer, you can delete it using DeleteLoadBalancer. You can create up to 20 load balancers per region per account. You can request an increase for the number of load balancers for your account. For more information, see Limits for Your Classic Load Balancer in the Classic Load Balancers Guide. Creates one or more listeners for the specified load balancer. If a listener with the specified port does not already exist, it is created; otherwise, the properties of the new listener must match the properties of the existing listener. For more information, see Listeners for Your Classic Load Balancer in the Classic Load Balancers Guide. Creates one or more listeners for the specified load balancer. If a listener with the specified port does not already exist, it is created; otherwise, the properties of the new listener must match the properties of the existing listener. For more information, see Listeners for Your Classic Load Balancer in the Classic Load Balancers Guide. Deregisters the specified instances from the specified load balancer. After the instance is deregistered, it no longer receives traffic from the load balancer. You can use DescribeLoadBalancers to verify that the instance is deregistered from the load balancer. For more information, see Register or De-Register EC2 Instances in the Classic Load Balancers Guide. Deregisters the specified instances from the specified load balancer. After the instance is deregistered, it no longer receives traffic from the load balancer. You can use DescribeLoadBalancers to verify that the instance is deregistered from the load balancer. For more information, see Register or De-Register EC2 Instances in the Classic Load Balancers Guide. Describes the current Elastic Load Balancing resource limits for your AWS account. For more information, see Limits for Your Classic Load Balancer in the Classic Load Balancers Guide. Describes the current Elastic Load Balancing resource limits for your AWS account. For more information, see Limits for Your Classic Load Balancer in the Classic Load Balancers Guide. Removes the specified Availability Zones from the set of Availability Zones for the specified load balancer in EC2-Classic or a default VPC. For load balancers in a non-default VPC, use DetachLoadBalancerFromSubnets. There must be at least one Availability Zone registered with a load balancer at all times. After an Availability Zone is removed, all instances registered with the load balancer that are in the removed Availability Zone go into the For more information, see Add or Remove Availability Zones in the Classic Load Balancers Guide. Removes the specified Availability Zones from the set of Availability Zones for the specified load balancer in EC2-Classic or a default VPC. For load balancers in a non-default VPC, use DetachLoadBalancerFromSubnets. There must be at least one Availability Zone registered with a load balancer at all times. After an Availability Zone is removed, all instances registered with the load balancer that are in the removed Availability Zone go into the For more information, see Add or Remove Availability Zones in the Classic Load Balancers Guide. Adds the specified Availability Zones to the set of Availability Zones for the specified load balancer in EC2-Classic or a default VPC. For load balancers in a non-default VPC, use AttachLoadBalancerToSubnets. The load balancer evenly distributes requests across all its registered Availability Zones that contain instances. For more information, see Add or Remove Availability Zones in the Classic Load Balancers Guide. Adds the specified Availability Zones to the set of Availability Zones for the specified load balancer in EC2-Classic or a default VPC. For load balancers in a non-default VPC, use AttachLoadBalancerToSubnets. The load balancer evenly distributes requests across all its registered Availability Zones that contain instances. For more information, see Add or Remove Availability Zones in the Classic Load Balancers Guide. Modifies the attributes of the specified load balancer. You can modify the load balancer attributes, such as For more information, see the following in the Classic Load Balancers Guide: Modifies the attributes of the specified load balancer. You can modify the load balancer attributes, such as For more information, see the following in the Classic Load Balancers Guide: Adds the specified instances to the specified load balancer. The instance must be a running instance in the same network as the load balancer (EC2-Classic or the same VPC). If you have EC2-Classic instances and a load balancer in a VPC with ClassicLink enabled, you can link the EC2-Classic instances to that VPC and then register the linked EC2-Classic instances with the load balancer in the VPC. Note that After the instance is registered, it starts receiving traffic and requests from the load balancer. Any instance that is not in one of the Availability Zones registered for the load balancer is moved to the To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer. For more information, see Register or De-Register EC2 Instances in the Classic Load Balancers Guide. Adds the specified instances to the specified load balancer. The instance must be a running instance in the same network as the load balancer (EC2-Classic or the same VPC). If you have EC2-Classic instances and a load balancer in a VPC with ClassicLink enabled, you can link the EC2-Classic instances to that VPC and then register the linked EC2-Classic instances with the load balancer in the VPC. Note that After the instance is registered, it starts receiving traffic and requests from the load balancer. Any instance that is not in one of the Availability Zones registered for the load balancer is moved to the To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer. For more information, see Register or De-Register EC2 Instances in the Classic Load Balancers Guide. Sets the certificate that terminates the specified listener's SSL connections. The specified certificate replaces any prior certificate that was used on the same load balancer and port. For more information about updating your SSL certificate, see Replace the SSL Certificate for Your Load Balancer in the Classic Load Balancers Guide. Sets the certificate that terminates the specified listener's SSL connections. The specified certificate replaces any prior certificate that was used on the same load balancer and port. For more information about updating your SSL certificate, see Replace the SSL Certificate for Your Load Balancer in the Classic Load Balancers Guide. Replaces the set of policies associated with the specified port on which the EC2 instance is listening with a new set of policies. At this time, only the back-end server authentication policy type can be applied to the instance ports; this policy type is composed of multiple public key policies. Each time you use You can use DescribeLoadBalancers or DescribeLoadBalancerPolicies to verify that the policy is associated with the EC2 instance. For more information about enabling back-end instance authentication, see Configure Back-end Instance Authentication in the Classic Load Balancers Guide. For more information about Proxy Protocol, see Configure Proxy Protocol Support in the Classic Load Balancers Guide. Replaces the set of policies associated with the specified port on which the EC2 instance is listening with a new set of policies. At this time, only the back-end server authentication policy type can be applied to the instance ports; this policy type is composed of multiple public key policies. Each time you use You can use DescribeLoadBalancers or DescribeLoadBalancerPolicies to verify that the policy is associated with the EC2 instance. For more information about enabling back-end instance authentication, see Configure Back-end Instance Authentication in the Classic Load Balancers Guide. For more information about Proxy Protocol, see Configure Proxy Protocol Support in the Classic Load Balancers Guide. Replaces the current set of policies for the specified load balancer port with the specified set of policies. To enable back-end server authentication, use SetLoadBalancerPoliciesForBackendServer. For more information about setting policies, see Update the SSL Negotiation Configuration, Duration-Based Session Stickiness, and Application-Controlled Session Stickiness in the Classic Load Balancers Guide. Replaces the current set of policies for the specified load balancer port with the specified set of policies. To enable back-end server authentication, use SetLoadBalancerPoliciesForBackendServer. For more information about setting policies, see Update the SSL Negotiation Configuration, Duration-Based Session Stickiness, and Application-Controlled Session Stickiness in the Classic Load Balancers Guide. This parameter is reserved. The name of the attribute. The following attribute is supported. This parameter is reserved. This value of the attribute. This data type is reserved. Information about additional load balancer attributes. The listeners. For more information, see Listeners for Your Classic Load Balancer in the Classic Load Balancers Guide. The listeners. For more information, see Listeners for Your Classic Load Balancer in the Classic Load Balancers Guide. The type of a load balancer. Valid only for load balancers in a VPC. By default, Elastic Load Balancing creates an Internet-facing load balancer with a DNS name that resolves to public IP addresses. For more information about Internet-facing and Internal load balancers, see Load Balancer Scheme in the Elastic Load Balancing User Guide. Specify The type of a load balancer. Valid only for load balancers in a VPC. By default, Elastic Load Balancing creates an Internet-facing load balancer with a DNS name that resolves to public IP addresses. For more information about Internet-facing and Internal load balancers, see Load Balancer Scheme in the Elastic Load Balancing User Guide. Specify A list of tags to assign to the load balancer. For more information about tagging your load balancer, see Tag Your Classic Load Balancer in the Classic Load Balancers Guide. A list of tags to assign to the load balancer. For more information about tagging your load balancer, see Tag Your Classic Load Balancer in the Classic Load Balancers Guide. Contains the parameters for CreateLoadBalancer. The protocol to use for routing traffic to instances: HTTP, HTTPS, TCP, or SSL. If the front-end protocol is HTTP, HTTPS, TCP, or SSL, If there is another listener with the same If there is another listener with the same The protocol to use for routing traffic to instances: HTTP, HTTPS, TCP, or SSL. If the front-end protocol is TCP or SSL, the back-end protocol must be TCP or SSL. If the front-end protocol is HTTP or HTTPS, the back-end protocol must be HTTP or HTTPS. If there is another listener with the same If there is another listener with the same The Amazon Resource Name (ARN) of the server certificate. Information about a listener. For information about the protocols and the ports supported by Elastic Load Balancing, see Listeners for Your Classic Load Balancer in the Classic Load Balancers Guide. Information about a listener. For information about the protocols and the ports supported by Elastic Load Balancing, see Listeners for Your Classic Load Balancer in the Classic Load Balancers Guide. If enabled, the load balancer routes the request traffic evenly across all instances regardless of the Availability Zones. For more information, see Configure Cross-Zone Load Balancing in the Classic Load Balancers Guide. If enabled, the load balancer routes the request traffic evenly across all instances regardless of the Availability Zones. For more information, see Configure Cross-Zone Load Balancing in the Classic Load Balancers Guide. If enabled, the load balancer captures detailed information of all requests and delivers the information to the Amazon S3 bucket that you specify. For more information, see Enable Access Logs in the Classic Load Balancers Guide. If enabled, the load balancer captures detailed information of all requests and delivers the information to the Amazon S3 bucket that you specify. For more information, see Enable Access Logs in the Classic Load Balancers Guide. If enabled, the load balancer allows existing requests to complete before the load balancer shifts traffic away from a deregistered or unhealthy instance. For more information, see Configure Connection Draining in the Classic Load Balancers Guide. If enabled, the load balancer allows existing requests to complete before the load balancer shifts traffic away from a deregistered or unhealthy instance. For more information, see Configure Connection Draining in the Classic Load Balancers Guide. If enabled, the load balancer allows the connections to remain idle (no data is sent over the connection) for the specified duration. By default, Elastic Load Balancing maintains a 60-second idle connection timeout for both front-end and back-end connections of your load balancer. For more information, see Configure Idle Connection Timeout in the Classic Load Balancers Guide. If enabled, the load balancer allows the connections to remain idle (no data is sent over the connection) for the specified duration. By default, Elastic Load Balancing maintains a 60-second idle connection timeout for both front-end and back-end connections of your load balancer. For more information, see Configure Idle Connection Timeout in the Classic Load Balancers Guide. This parameter is reserved. Any additional attributes. The attributes for a load balancer. The DNS name of the load balancer. For more information, see Configure a Custom Domain Name in the Classic Load Balancers Guide. The DNS name of the load balancer. For more information, see Configure a Custom Domain Name in the Classic Load Balancers Guide. A load balancer can distribute incoming traffic across your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered instances and ensures that it routes traffic only to healthy instances. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer and a protocol and port number for connections from the load balancer to the instances. Elastic Load Balancing supports three types of load balancers: Application Load Balancers, Network Load Balancers, and Classic Load Balancers. You can select a load balancer based on your application needs. For more information, see the Elastic Load Balancing User Guide. This reference covers the 2012-06-01 API, which supports Classic Load Balancers. The 2015-12-01 API supports Application Load Balancers and Network Load Balancers. To get started, create a load balancer with one or more listeners using CreateLoadBalancer. Register your instances with the load balancer using RegisterInstancesWithLoadBalancer. All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds with a 200 OK response code. A load balancer can distribute incoming traffic across your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered instances and ensures that it routes traffic only to healthy instances. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer and a protocol and port number for connections from the load balancer to the instances. Elastic Load Balancing supports three types of load balancers: Application Load Balancers, Network Load Balancers, and Classic Load Balancers. You can select a load balancer based on your application needs. For more information, see the Elastic Load Balancing User Guide. This reference covers the 2012-06-01 API, which supports Classic Load Balancers. The 2015-12-01 API supports Application Load Balancers and Network Load Balancers. To get started, create a load balancer with one or more listeners using CreateLoadBalancer. Register your instances with the load balancer using RegisterInstancesWithLoadBalancer. All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds with a 200 OK response code. Creates a rule for the specified listener. The listener must be associated with an Application Load Balancer. Rules are evaluated in priority order, from the lowest value to the highest value. When the conditions for a rule are met, its actions are performed. If the conditions for no rules are met, the actions for the default rule are performed. For more information, see Listener Rules in the Application Load Balancers Guide. To view your current rules, use DescribeRules. To update a rule, use ModifyRule. To set the priorities of your rules, use SetRulePriorities. To delete a rule, use DeleteRule. Creates a rule for the specified listener. The listener must be associated with an Application Load Balancer. Each rule consists of a priority, one or more actions, and one or more conditions. Rules are evaluated in priority order, from the lowest value to the highest value. When the conditions for a rule are met, its actions are performed. If the conditions for no rules are met, the actions for the default rule are performed. For more information, see Listener Rules in the Application Load Balancers Guide. To view your current rules, use DescribeRules. To update a rule, use ModifyRule. To set the priorities of your rules, use SetRulePriorities. To delete a rule, use DeleteRule. Deletes the specified rule. Deletes the specified rule. You can't delete the default rule. The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The last action to be performed must be one of the following types of actions: a The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. Information for creating an action that distributes requests among one or more target groups. For Network Load Balancers, you can specify a single target group. Specify only when Information about an action. Information about an action. Each rule must include exactly one of the following types of actions: The conditions. Each rule can include zero or one of the following conditions: The conditions. Each rule can optionally include up to one of each of the following conditions: The type of target that you must specify when registering targets with this target group. You can't specify targets for a target group using more than one target type. The type of target that you must specify when registering targets with this target group. You can't specify targets for a target group using more than one target type. The name of the attribute. The following attributes are supported by both Application Load Balancers and Network Load Balancers: The following attributes are supported by only Application Load Balancers: The following attributes are supported by only Network Load Balancers: The name of the attribute. The following attributes are supported by both Application Load Balancers and Network Load Balancers: The following attributes are supported by only Application Load Balancers: The following attributes are supported by only Network Load Balancers: [HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful response from a target. With Network Load Balancers, you can't modify this setting. [HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful response from a target. The possible values are from 200 to 499. You can specify multiple values (for example, \"200,202\") or a range of values (for example, \"200-299\"). The default is 200. With Network Load Balancers, you can't modify this setting. The condition value. You can use If A-Z, a-z, 0-9 - . * (matches 0 or more characters) ? (matches exactly 1 character) If A-Z, a-z, 0-9 _ - . $ / ~ \" ' @ : + & (using &) * (matches 0 or more characters) ? (matches exactly 1 character) The condition value. Specify only when If A-Z, a-z, 0-9 - . * (matches 0 or more characters) ? (matches exactly 1 character) If A-Z, a-z, 0-9 _ - . $ / ~ \" ' @ : + & (using &) * (matches 0 or more characters) ? (matches exactly 1 character) Information for a source IP condition. Specify only when Information about a condition for a rule. Information about a condition for a rule. Each rule can optionally include up to one of each of the following conditions: The name of the attribute. The following attributes are supported by both Application Load Balancers and Network Load Balancers: The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address: The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function: The following attribute is supported only by Network Load Balancers: The name of the attribute. The following attributes are supported by both Application Load Balancers and Network Load Balancers: The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address: The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function: The following attribute is supported only by Network Load Balancers: Creates a Kinesis data stream. A stream captures and transports data records that are continuously emitted from different data sources or producers. Scale-out within a stream is explicitly supported by means of shards, which are uniquely identified groups of data records in a stream. You specify and control the number of shards that a stream is composed of. Each shard can support reads up to five transactions per second, up to a maximum data read total of 2 MB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second. If the amount of data input increases or decreases, you can add or remove shards. The stream name identifies the stream. The name is scoped to the AWS account used by the application. It is also scoped by AWS Region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different Regions, can have the same name. You receive a Have more than five streams in the Create more shards than are authorized for your account. For the default shard limit for an AWS account, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact AWS Support. You can use CreateStream has a limit of five transactions per second per account. Creates a Kinesis data stream. A stream captures and transports data records that are continuously emitted from different data sources or producers. Scale-out within a stream is explicitly supported by means of shards, which are uniquely identified groups of data records in a stream. You specify and control the number of shards that a stream is composed of. Each shard can support reads up to five transactions per second, up to a maximum data read total of 2 MiB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per second. If the amount of data input increases or decreases, you can add or remove shards. The stream name identifies the stream. The name is scoped to the AWS account used by the application. It is also scoped by AWS Region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different Regions, can have the same name. You receive a Have more than five streams in the Create more shards than are authorized for your account. For the default shard limit for an AWS account, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact AWS Support. You can use CreateStream has a limit of five transactions per second per account. To deregister a consumer, provide its ARN. Alternatively, you can provide the ARN of the data stream and the name you gave the consumer when you registered it. You may also provide all three parameters, as long as they don't conflict with each other. If you don't know the name or ARN of the consumer that you want to deregister, you can use the ListStreamConsumers operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream. The description of a consumer contains its name and ARN. This operation has a limit of five transactions per second per account. To deregister a consumer, provide its ARN. Alternatively, you can provide the ARN of the data stream and the name you gave the consumer when you registered it. You may also provide all three parameters, as long as they don't conflict with each other. If you don't know the name or ARN of the consumer that you want to deregister, you can use the ListStreamConsumers operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream. The description of a consumer contains its name and ARN. This operation has a limit of five transactions per second per stream. Describes the specified Kinesis data stream. The information returned includes the stream name, Amazon Resource Name (ARN), creation time, enhanced metric configuration, and shard map. The shard map is an array of shard objects. For each shard object, there is the hash key and sequence number ranges that the shard spans, and the IDs of any earlier shards that played in a role in creating the shard. Every record ingested in the stream is identified by a sequence number, which is assigned when the record is put into the stream. You can limit the number of shards returned by each call. For more information, see Retrieving Shards from a Stream in the Amazon Kinesis Data Streams Developer Guide. There are no guarantees about the chronological order shards returned. To process shards in chronological order, use the ID of the parent shard to track the lineage to the oldest shard. This operation has a limit of 10 transactions per second per account. Describes the specified Kinesis data stream. The information returned includes the stream name, Amazon Resource Name (ARN), creation time, enhanced metric configuration, and shard map. The shard map is an array of shard objects. For each shard object, there is the hash key and sequence number ranges that the shard spans, and the IDs of any earlier shards that played in a role in creating the shard. Every record ingested in the stream is identified by a sequence number, which is assigned when the record is put into the stream. You can limit the number of shards returned by each call. For more information, see Retrieving Shards from a Stream in the Amazon Kinesis Data Streams Developer Guide. There are no guarantees about the chronological order shards returned. To process shards in chronological order, use the ID of the parent shard to track the lineage to the oldest shard. This operation has a limit of 10 transactions per second per account. To get the description of a registered consumer, provide the ARN of the consumer. Alternatively, you can provide the ARN of the data stream and the name you gave the consumer when you registered it. You may also provide all three parameters, as long as they don't conflict with each other. If you don't know the name or ARN of the consumer that you want to describe, you can use the ListStreamConsumers operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream. This operation has a limit of 20 transactions per second per account. To get the description of a registered consumer, provide the ARN of the consumer. Alternatively, you can provide the ARN of the data stream and the name you gave the consumer when you registered it. You may also provide all three parameters, as long as they don't conflict with each other. If you don't know the name or ARN of the consumer that you want to describe, you can use the ListStreamConsumers operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream. This operation has a limit of 20 transactions per second per stream. Provides a summarized description of the specified Kinesis data stream without the shard list. The information returned includes the stream name, Amazon Resource Name (ARN), status, record retention period, approximate creation time, monitoring, encryption details, and open shard count. Provides a summarized description of the specified Kinesis data stream without the shard list. The information returned includes the stream name, Amazon Resource Name (ARN), status, record retention period, approximate creation time, monitoring, encryption details, and open shard count. DescribeStreamSummary has a limit of 20 transactions per second per account. Gets data records from a Kinesis data stream's shard. Specify a shard iterator using the You can scale by provisioning multiple shards per stream while considering service limits (for more information, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide). Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the The size of the data returned by GetRecords varies depending on the utilization of the shard. The maximum size of data that GetRecords can return is 10 MiB. If a call returns this amount of data, subsequent calls made within the next 5 seconds throw To detect whether the application is falling behind in processing, you can use the Each Amazon Kinesis record includes a value, This operation has a limit of five transactions per second per account. Gets data records from a Kinesis data stream's shard. Specify a shard iterator using the You can scale by provisioning multiple shards per stream while considering service limits (for more information, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide). Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the The size of the data returned by GetRecords varies depending on the utilization of the shard. The maximum size of data that GetRecords can return is 10 MiB. If a call returns this amount of data, subsequent calls made within the next 5 seconds throw To detect whether the application is falling behind in processing, you can use the Each Amazon Kinesis record includes a value, This operation has a limit of five transactions per second per shard. Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it is returned to the requester. A shard iterator specifies the shard position from which to start reading data records sequentially. The position is specified using the sequence number of a data record in a shard. A sequence number is the identifier associated with every record ingested in the stream, and is assigned when a record is put into the stream. Each stream has one or more shards. You must specify the shard iterator type. For example, you can set the When you read repeatedly from a stream, use a GetShardIterator request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in If a GetShardIterator request is made too often, you receive a If the shard is closed, GetShardIterator returns a valid iterator for the last sequence number of the shard. A shard can be closed as a result of using SplitShard or MergeShards. GetShardIterator has a limit of five transactions per second per account per open shard. Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it is returned to the requester. A shard iterator specifies the shard position from which to start reading data records sequentially. The position is specified using the sequence number of a data record in a shard. A sequence number is the identifier associated with every record ingested in the stream, and is assigned when a record is put into the stream. Each stream has one or more shards. You must specify the shard iterator type. For example, you can set the When you read repeatedly from a stream, use a GetShardIterator request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in If a GetShardIterator request is made too often, you receive a If the shard is closed, GetShardIterator returns a valid iterator for the last sequence number of the shard. A shard can be closed as a result of using SplitShard or MergeShards. GetShardIterator has a limit of five transactions per second per account per open shard. Lists the consumers registered to receive data from a stream using enhanced fan-out, and provides information about each consumer. This operation has a limit of 10 transactions per second per account. Lists the consumers registered to receive data from a stream using enhanced fan-out, and provides information about each consumer. This operation has a limit of 5 transactions per second per stream. Merges two adjacent shards in a Kinesis data stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data. Two shards are considered adjacent if the union of the hash key ranges for the two shards form a contiguous set with no gaps. For example, if you have two shards, one with a hash key range of 276...381 and the other with a hash key range of 382...454, then you could merge these two shards into a single shard that would have a hash key range of 276...454. After the merge, the single child shard receives data for all hash key values covered by the two parent shards. If the stream is in the You can use DescribeStream to check the state of the stream, which is returned in You use DescribeStream to determine the shard IDs that are specified in the If you try to operate on too many streams in parallel using CreateStream, DeleteStream, Merges two adjacent shards in a Kinesis data stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data. Two shards are considered adjacent if the union of the hash key ranges for the two shards form a contiguous set with no gaps. For example, if you have two shards, one with a hash key range of 276...381 and the other with a hash key range of 382...454, then you could merge these two shards into a single shard that would have a hash key range of 276...454. After the merge, the single child shard receives data for all hash key values covered by the two parent shards. If the stream is in the You can use DescribeStream to check the state of the stream, which is returned in You use DescribeStream to determine the shard IDs that are specified in the If you try to operate on too many streams in parallel using CreateStream, DeleteStream, Writes a single data record into an Amazon Kinesis data stream. Call You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself. The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on. The partition key is used by Kinesis Data Streams to distribute data across shards. Kinesis Data Streams segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine the shard to which a given data record belongs. Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the If a By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period. Writes a single data record into an Amazon Kinesis data stream. Call You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself. The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on. The partition key is used by Kinesis Data Streams to distribute data across shards. Kinesis Data Streams segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine the shard to which a given data record belongs. Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the After you write a record to a stream, you cannot modify that record or its order within the stream. If a By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period. Writes multiple data records into a Kinesis data stream in a single call (also referred to as a Each You must specify the name of the stream that captures, stores, and transports the data; and an array of request The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on. The partition key is used by Kinesis Data Streams as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams Developer Guide. Each record in the The The response A successfully processed record includes An unsuccessfully processed record includes By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period. Writes multiple data records into a Kinesis data stream in a single call (also referred to as a Each You must specify the name of the stream that captures, stores, and transports the data; and an array of request The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on. The partition key is used by Kinesis Data Streams as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams Developer Guide. Each record in the The The response A successfully processed record includes An unsuccessfully processed record includes After you write a record to a stream, you cannot modify that record or its order within the stream. By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period. Registers a consumer with a Kinesis data stream. When you use this operation, the consumer you register can read data from the stream at a rate of up to 2 MiB per second. This rate is unaffected by the total number of consumers that read from the same stream. You can register up to 5 consumers per stream. A given consumer can only be registered with one stream. This operation has a limit of five transactions per second per account. Registers a consumer with a Kinesis data stream. When you use this operation, the consumer you register can then call SubscribeToShard to receive data from the stream using enhanced fan-out, at a rate of up to 2 MiB per second for every shard you subscribe to. This rate is unaffected by the total number of consumers that read from the same stream. You can register up to 20 consumers per stream. A given consumer can only be registered with one stream at a time. For an example of how to use this operations, see Enhanced Fan-Out Using the Kinesis Data Streams API. The use of this operation has a limit of five transactions per second per account. Also, only 5 consumers can be created simultaneously. In other words, you cannot have more than 5 consumers in a Splits a shard into two new shards in the Kinesis data stream, to increase the stream's capacity to ingest and transport data. You can also use You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information, see Split a Shard in the Amazon Kinesis Data Streams Developer Guide. You can use DescribeStream to determine the shard ID and hash key values for the You can use If the specified stream does not exist, For the default shard limit for an AWS account, see Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact AWS Support. If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a Splits a shard into two new shards in the Kinesis data stream, to increase the stream's capacity to ingest and transport data. You can also use You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information, see Split a Shard in the Amazon Kinesis Data Streams Developer Guide. You can use DescribeStream to determine the shard ID and hash key values for the You can use If the specified stream does not exist, For the default shard limit for an AWS account, see Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact AWS Support. If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a Call this operation from your consumer after you call RegisterStreamConsumer to register the consumer with Kinesis Data Streams. If the call succeeds, your consumer starts receiving events of type SubscribeToShardEvent for up to 5 minutes, after which time you need to call You can make one call to This operation establishes an HTTP/2 connection between the consumer you specify in the When the You can make one call to If you call For an example of how to use this operations, see Enhanced Fan-Out Using the Kinesis Data Streams API. Updates the shard count of the specified stream to the specified number of shards. Updating the shard count is an asynchronous operation. Upon receiving the request, Kinesis Data Streams returns immediately and sets the status of the stream to To update the shard count, Kinesis Data Streams performs splits or merges on individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges. This operation has the following default limits. By default, you cannot do the following: Scale more than twice per rolling 24-hour period per stream Scale up to more than double your current shard count for a stream Scale down below half your current shard count for a stream Scale up to more than 500 shards in a stream Scale a stream with more than 500 shards down unless the result is less than 500 shards Scale up to more than the shard limit for your account For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To request an increase in the call rate limit, the shard limit for this API, or your overall shard limit, use the limits form. Updates the shard count of the specified stream to the specified number of shards. Updating the shard count is an asynchronous operation. Upon receiving the request, Kinesis Data Streams returns immediately and sets the status of the stream to To update the shard count, Kinesis Data Streams performs splits or merges on individual shards. This can cause short-lived shards to be created, in addition to the final shards. These short-lived shards count towards your total shard limit for your account in the Region. When using this operation, we recommend that you specify a target shard count that is a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target value within your shard limit. However, if you specify a target that isn't a multiple of 25%, the scaling action might take longer to complete. This operation has the following default limits. By default, you cannot do the following: Scale more than ten times per rolling 24-hour period per stream Scale up to more than double your current shard count for a stream Scale down below half your current shard count for a stream Scale up to more than 500 shards in a stream Scale a stream with more than 500 shards down unless the result is less than 500 shards Scale up to more than the shard limit for your account For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To request an increase in the call rate limit, the shard limit for this API, or your overall shard limit, use the limits form. Represents the input for An object that represents the details of the consumer you registered. An object that represents the details of the consumer you registered. This type of object is returned by RegisterStreamConsumer. The ARN of the stream with which you registered the consumer. An object that represents the details of a registered consumer. An object that represents the details of a registered consumer. This type of object is returned by DescribeStreamConsumer. The number of shards that the stream will use. The throughput of the stream is a function of the number of shards; more shards are required for greater provisioned throughput. DefaultShardLimit; The number of shards that the stream will use. The throughput of the stream is a function of the number of shards; more shards are required for greater provisioned throughput. Represents the input for List of shard-level metrics to disable. The following are the valid shard-level metrics. The value \" For more information, see Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch in the Amazon Kinesis Data Streams Developer Guide. List of shard-level metrics to disable. The following are the valid shard-level metrics. The value \" For more information, see Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch in the Amazon Kinesis Data Streams Developer Guide. Represents the input for DisableEnhancedMonitoring. List of shard-level metrics to enable. The following are the valid shard-level metrics. The value \" For more information, see Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch in the Amazon Kinesis Data Streams Developer Guide. List of shard-level metrics to enable. The following are the valid shard-level metrics. The value \" For more information, see Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch in the Amazon Kinesis Data Streams Developer Guide. Represents the input for EnableEnhancedMonitoring. List of shard-level metrics. The following are the valid shard-level metrics. The value \" For more information, see Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch in the Amazon Kinesis Data Streams Developer Guide. List of shard-level metrics. The following are the valid shard-level metrics. The value \" For more information, see Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch in the Amazon Kinesis Data Streams Developer Guide. Represents enhanced metrics types. The maximum number of records to return. Specify a value of up to 10,000. If you specify a value that is greater than 10,000, GetRecords throws The maximum number of records to return. Specify a value of up to 10,000. If you specify a value that is greater than 10,000, GetRecords throws Represents the input for GetRecords. The number of milliseconds the GetRecords response is from the tip of the stream, indicating how far behind current time the consumer is. A value of zero indicates that record processing is caught up, and there are no new records to process at this moment. Represents the output for GetRecords. The processing of the request failed because of an unknown error, exception, or failure. A message that provides information about the error. The request was rejected because the state of the specified resource isn't valid for this request. For more information, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide. The request was rejected because the state of the specified resource isn't valid for this request. For more information, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide. A message that provides information about the error. The request was denied due to request throttling. For more information about throttling, see Limits in the AWS Key Management Service Developer Guide. The request was denied due to request throttling. For more information about throttling, see Limits in the AWS Key Management Service Developer Guide. The maximum number of shards to return in a single call to When the number of shards to be listed is greater than the value of The maximum number of shards to return in a single call to When the number of shards to be listed is greater than the value of Specify this input parameter to distinguish data streams that have the same name. For example, if you create a data stream and then delete it, and you later create another data stream with the same name, you can use this input parameter to specify which of the two streams you want to list the shards for. You cannot specify this parameter if you specify the A message that provides information about the error. The request rate for the stream is too high, or the requested data is too large for the available throughput. Reduce the frequency or size of your requests. For more information, see Streams Limits in the Amazon Kinesis Data Streams Developer Guide, and Error Retries and Exponential Backoff in AWS in the AWS General Reference. The request rate for the stream is too high, or the requested data is too large for the available throughput. Reduce the frequency or size of your requests. For more information, see Streams Limits in the Amazon Kinesis Data Streams Developer Guide, and Error Retries and Exponential Backoff in AWS in the AWS General Reference. The data blob to put into the record, which is base64-encoded when the blob is serialized. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB). The data blob to put into the record, which is base64-encoded when the blob is serialized. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MiB). The data blob to put into the record, which is base64-encoded when the blob is serialized. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB). The data blob to put into the record, which is base64-encoded when the blob is serialized. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MiB). The data blob. The data in the blob is both opaque and immutable to Kinesis Data Streams, which does not inspect, interpret, or change the data in the blob in any way. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB). The data blob. The data in the blob is both opaque and immutable to Kinesis Data Streams, which does not inspect, interpret, or change the data in the blob in any way. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MiB). The requested resource could not be found. The stream might not be specified correctly. You can set the starting position to one of the following values: The sequence number of the data record in the shard from which to start streaming. To specify a sequence number, set The time stamp of the data record from which to start reading. To specify a time stamp, set The current retention period, in hours. The current retention period, in hours. Minimum value of 24. Maximum value of 168. The current status of the stream being described. The stream status is one of the following states: The current retention period, in hours. Use this as Use this as The number of milliseconds the read records are from the tip of the stream, indicating how far behind current time the consumer is. A value of zero indicates that record processing is caught up, and there are no new records to process at this moment. After you call SubscribeToShard, Kinesis Data Streams sends events of this type to your consumer. After you call SubscribeToShard, Kinesis Data Streams sends events of this type over an HTTP/2 connection to your consumer. After you call SubscribeToShard, Kinesis Data Streams sends events of this type to your consumer. For an example of how to handle these events, see Enhanced Fan-Out Using the Kinesis Data Streams API. The processing of the request failed because of an unknown error, exception, or failure. This is a tagged union for all of the types of events an enhanced fan-out consumer can receive over HTTP/2 after a call to SubscribeToShard. The ID of the shard you want to subscribe to. To see a list of all the shards for a given stream, use ListShards. The new number of shards. The new number of shards. This value has the following default limits. By default, you cannot do the following: Set this value to more than double your current shard count for a stream. Set this value below half your current shard count for a stream. Set this value to more than 500 shards in a stream (the default limit for shard count per stream is 500 per account per region), unless you request a limit increase. Scale a stream with more than 500 shards down unless you set this value to less than 500 shards. Creates a customization for the Amazon QuickSight subscription associated with your AWS account. Creates Amazon QuickSight customizations the current AWS Region. Currently, you can add a custom default theme by using the You can create customizations for your AWS account or, if you specify a namespace, for a QuickSight namespace instead. Customizations that apply to a namespace always override customizations that apply to an AWS account. To find out which customizations apply, use the Before you add a theme as the namespace default, make sure that you first share the theme with the namespace. If you don't share it with the namespace, the theme won't be visible to your users even if you use this API operation to make it the default theme. Creates an analysis in Amazon QuickSight. Creates a dashboard from a template. To first create a template, see the CreateTemplate API operation. A dashboard is an entity in QuickSight that identifies QuickSight reports, created from analyses. You can share QuickSight dashboards. With the right permissions, you can create scheduled email reports from them. The Creates a dashboard from a template. To first create a template, see the A dashboard is an entity in QuickSight that identifies QuickSight reports, created from analyses. You can share QuickSight dashboards. With the right permissions, you can create scheduled email reports from them. If you have the correct permissions, you can create a dashboard from a template that exists in a different AWS account. Deletes customizations for the QuickSight subscription on your AWS account. Deletes all Amazon QuickSight customizations in this AWS Region for the specified AWS Account and QuickSight namespace. Deletes an analysis from Amazon QuickSight. You can optionally include a recovery window during which you can restore the analysis. If you don't specify a recovery window value, the operation defaults to 30 days. QuickSight attaches a At any time before recovery window ends, you can use the An analysis that's scheduled for deletion isn't accessible in the QuickSight console. To access it in the console, restore it. Deleting an analysis doesn't delete the dashboards that you publish from it. Deletes the data source permanently. This action breaks all the datasets that reference the deleted data source. Deletes the data source permanently. This operation breaks all the datasets that reference the deleted data source. Deletes a namespace and the users and groups that are associated with the namespace. This is an asynchronous process. Assets including dashboards, analyses, datasets and data sources are not deleted. To delete these assets, you use the APIs for the relevant asset. Deletes a namespace and the users and groups that are associated with the namespace. This is an asynchronous process. Assets including dashboards, analyses, datasets and data sources are not deleted. To delete these assets, you use the API operations for the relevant asset. Describes the customizations associated with your AWS account. Describes the customizations associated with the provided AWS account and Amazon QuickSight namespace in an AWS Region. The QuickSight console evaluates which customizations to apply by running this API operation with the To determine what customizations display when you run this command, it can help to visualize the relationship of the entities involved. To run the command in a different AWS Region, you change your region settings. If you're using the AWS CLI, you can use one of the following options: Use command line options. Use named profiles. Run Describes the settings that were used when your QuickSight subscription was first created in this AWS Account. Provides a summary of the metadata for an analysis. Provides the read and write permissions for an analysis. Generates a URL and authorization code that you can embed in your web server code. Before you use this command, make sure that you have configured the dashboards and permissions. Currently, you can use They must be used together. They can be used one time only. They are valid for 5 minutes after you run this command. The resulting user session is valid for 10 hours. For more information, see Embedding Amazon QuickSight Dashboards in the Amazon QuickSight User Guide or Embedding Amazon QuickSight Dashboards in the Amazon QuickSight API Reference. Generates a session URL and authorization code that you can use to embed an Amazon QuickSight read-only dashboard in your web server code. Before you use this command, make sure that you have configured the dashboards and permissions. Currently, you can use They must be used together. They can be used one time only. They are valid for 5 minutes after you run this command. The resulting user session is valid for 10 hours. For more information, see Embedding Amazon QuickSight in the Amazon QuickSight User Guide . Generates a session URL and authorization code that you can embed in your web server code. Generates a session URL and authorization code that you can use to embed the Amazon QuickSight console in your web server code. Use Lists Amazon QuickSight analyses that exist in the specified AWS account. Creates an Amazon QuickSight user, whose identity is associated with the AWS Identity and Access Management (IAM) identity or role specified in the request. Restores an analysis. Searches for analyses that belong to the user specified in the filter. Searchs for dashboards that belong to a user. Searches for dashboards that belong to a user. Updates customizations associated with the QuickSight subscription on your AWS account. Updates Amazon QuickSight customizations the current AWS Region. Currently, the only customization you can use is a theme. You can use customizations for your AWS account or, if you specify a namespace, for a QuickSight namespace instead. Customizations that apply to a namespace override customizations that apply to an AWS account. To find out which customizations apply, use the Updates the settings for the Amazon QuickSight subscription in your AWS Account. Updates the Amazon QuickSight settings in your AWS Account. Updates an analysis in Amazon QuickSight Updates the read and write permissions for an analysis. The default theme for this QuickSight subscription. The customizations associated with your AWS account for QuickSight. The Amazon QuickSight customizations associated with your AWS account or a QuickSight namespace in a specific AWS Region. The name associated with the QuickSight subscription in your AWS account. The \"account name\" you provided for the QuickSight subscription in your AWS account. You create this name when you sign up for QuickSight. It is unique in all of AWS and it appears only in the console when users sign in. The edition of QuickSight that you're currently subscribed to. The edition of QuickSight that you're currently subscribed to: Enterprise edition or Standard edition. The default QuickSight namespace for your AWS account. The default QuickSight namespace for your AWS account. Amazon Elasticsearch Service parameters. The ID of the analysis. The Amazon Resource Name (ARN) of the analysis. The descriptive name of the analysis. Status associated with the analysis. Errors associated with the analysis. The ARNs of the datasets of the analysis. The ARN of the theme of the analysis. The time that the analysis was created. The time that the analysis was last updated. Metadata structure for an analysis in Amazon QuickSight The type of the analysis error. The message associated with the analysis error. A metadata error structure for an analysis. The comparison operator that you want to use as a filter, for example The name of the value that you want to use as a filter, for example The value of the named item, in this case A filter that you apply when searching for one or more analyses. The source template for the source entity of the analysis. The source entity of an analysis. The dataset references of the source template of an analysis. The Amazon Resource Name (ARN) of the source template of an analysis. The source template of an analysis. The Amazon Resource Name (ARN) for the analysis. The ID of the analysis. This ID displays in the URL. The name of the analysis. This name is displayed in the QuickSight console. The last known status for the analysis. The time that the analysis was created. The time that the analysis was last updated. The summary metadata that describes an analysis. A resource is already in a state that indicates an action is happening that must complete before a new update can be applied. A resource is already in a state that indicates an operation is happening that must complete before a new update can be applied. The namespace associated with the customization that you're creating. The QuickSight namespace that you want to add customizations to. The customizations you're adding to the QuickSight subscription for the AWS account. For example, you could add a default theme by setting The QuickSight customizations you're adding in the current AWS Region. You can add these to an AWS account and a QuickSight namespace. For example, you could add a default theme by setting The customizations you're adding to the QuickSight subscription for the AWS account. The QuickSight customizations you're adding in the current AWS Region. The ID of the AWS account where you are creating an analysis. The ID for the analysis that you're creating. This ID displays in the URL of the analysis. A descriptive name for the analysis that you're creating. This name displays for the analysis in the QuickSight console. The parameter names and override values that you want to use. An analysis can have any parameter type, and some parameters might accept multiple values. A structure that describes the principals and the resource-level permissions on an analysis. You can use the To specify no permissions, omit A source entity to use for the analysis that you're creating. This metadata structure contains details that describe a source template and one or more datasets. The ARN for the theme to apply to the analysis that you're creating. To see the theme in the QuickSight console, make sure that you have access to it. Contains a map of the key-value pairs for the resource tag or tags assigned to the analysis. The ARN for the analysis. The ID of the analysis. The status of the creation of the analysis. The HTTP status of the request. The AWS request ID for this operation. A structure that contains the permissions of the dashboard. You can use this structure for granting permissions with principal and action information. A structure that contains the permissions of the dashboard. You can use this structure for granting permissions by providing a list of IAM action information for each principal ARN. To specify no permissions, omit the permissions list. Options for publishing the dashboard when you create it: Options for publishing the dashboard when you create it: The status of the creation of the namespace. This is an asynchronous process. A status of The status of the creation of the namespace. This is an asynchronous process. A status of A set of alternate data source parameters that you want to share for these credentials. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API compares the A set of alternate data source parameters that you want to share for these credentials. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API operation compares the The combination of user name and password that are used as credentials. Errors. Errors associated with this dashboard version. Version number. Version number for this version of the dashboard. The Amazon Resource Numbers (ARNs) for the datasets that are associated with a version of the dashboard. The Amazon Resource Numbers (ARNs) for the datasets that are associated with this version of the dashboard. Description. The ARN of the theme associated with a version of the dashboard. Dashboard version. A set of alternate data source parameters that you want to share for the credentials stored with this data source. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API compares the A set of alternate data source parameters that you want to share for the credentials stored with this data source. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API operation compares the A display name for the dataset. A display name for the date-time parameter. Values. The values for the date-time parameter. Date time parameter. A date-time parameter. A display name for the dataset. A display name for the decimal parameter. Values. The values for the decimal parameter. Decimal parameter. A decimal parameter. The ID for the AWS account that you want to delete QuickSight customizations from. The ID for the AWS account that you want to delete QuickSight customizations from in this AWS Region. The namespace associated with the customization that you're deleting. The QuickSight namespace that you're deleting the customizations from. The ID of the AWS account where you want to delete an analysis. The ID of the analysis that you're deleting. A value that specifies the number of days that QuickSight waits before it deletes the analysis. You can't use this parameter with the This option defaults to the value The HTTP status of the request. The Amazon Resource Name (ARN) of the deleted analysis. The ID of the deleted analysis. The date and time that the analysis is scheduled to be deleted. The AWS request ID for this operation. The namespace associated with the customization that you're describing. The QuickSight namespace that you want to describe QuickSight customizations for. The status of the creation of the customization. This is an asynchronous process. A status of The The ID for the AWS account that you want to describe QuickSight customizations for. The ID for the AWS account that you're describing. The namespace associated with the customization that you're describing. The QuickSight namespace that you're describing. The QuickSight customizations that exist in the current AWS Region. The AWS request ID for this operation. The HTTP status of the request. The ID for the AWS account that contains the settings that you want to list. The QuickSight settings for this AWS account. This information includes the edition of Amazon QuickSight that you subscribed to (Standard or Enterprise) and the notification email for the QuickSight subscription. The QuickSight console, the QuickSight subscription is sometimes referred to as a QuickSight \"account\" even though it's technically not an account by itself. Instead, it's a subscription to the QuickSight service for your AWS account. The edition that you subscribe to applies to QuickSight in every AWS Region where you use it. The AWS request ID for this operation. The HTTP status of the request. The ID of the AWS account that contains the analysis whose permissions you're describing. You must be using the AWS account that the analysis is in. The ID of the analysis whose permissions you're describing. The ID is part of the analysis URL. The ID of the analysis whose permissions you're describing. The customizations associated with QuickSight. The Amazon Resource Name (ARN) of the analysis whose permissions you're describing. The AWS request ID for this operation. A structure that describes the principals and the resource-level permissions on an analysis. The HTTP status of the request. The AWS request ID for this operation. The ID for the AWS account that contains the QuickSight namespaces that you want to list. The ID of the AWS account that contains the analysis. You must be using the AWS account that the analysis is in. The ID of the analysis that you're describing. The ID is part of the URL of the analysis. The settings associated with the QuickSight subscription associated with this AWS account. This information includes the edition of Amazon QuickSight that you subscribed to (Standard or Enterprise) and the notification email for the QuickSight subscription. The QuickSight console, the QuickSight subscription is sometimes referred to as a QuickSight \"account\" even though it is technically not an account, but a subscription in your AWS account. The AWS request ID for this operation. A metadata structure that contains summary information for the analysis that you're describing. The HTTP status of the request. The AWS request ID for this operation. The information about the namespace that you're describing. The response includes the namespace ARN, name, AWS Region, creation status, and identity store. The information about the namespace that you're describing. The response includes the namespace ARN, name, AWS Region, creation status, and identity store. The Amazon QuickSight user's Amazon Resource Name (ARN), for use with Active Directory (AD) users or group members Invited nonfederated users IAM users and IAM role-based sessions authenticated through Federated Single Sign-On using SAML, OpenID Connect, or IAM federation. The Amazon QuickSight user's Amazon Resource Name (ARN), for use with Active Directory (AD) users or group members Invited nonfederated users IAM users and IAM role-based sessions authenticated through Federated Single Sign-On using SAML, OpenID Connect, or IAM federation. Omit this parameter for users in the third group – IAM users and IAM role-based sessions. A single-use URL that you can put into your server-side webpage to embed your dashboard. This URL is valid for 5 minutes. The API provides the URL with an A single-use URL that you can put into your server-side webpage to embed your dashboard. This URL is valid for 5 minutes. The API operation provides the URL with an The ID for the AWS account that contains the QuickSight session that you're embedding. The ID for the AWS account associated with your QuickSight subscription. The entry point for the embedded session. The URL you use to access the embedded session. The entry point URL is constrained to the following paths: The Amazon QuickSight user's Amazon Resource Name (ARN), for use with Active Directory (AD) users or group members Invited nonfederated users IAM users and IAM role-based sessions authenticated through Federated Single Sign-On using SAML, OpenID Connect, or IAM federation. The Amazon QuickSight user's Amazon Resource Name (ARN), for use with Active Directory (AD) users or group members Invited nonfederated users IAM users and IAM role-based sessions authenticated through Federated Single Sign-On using SAML, OpenID Connect, or IAM federation Omit this parameter for users in the third group – IAM users and IAM role-based sessions. A single-use URL that you can put into your server-side web page to embed your QuickSight session. This URL is valid for 5 minutes. The API provides the URL with an A single-use URL that you can put into your server-side web page to embed your QuickSight session. This URL is valid for 5 minutes. The API operation provides the URL with an The principal ID of the group. A group in Amazon QuickSight consists of a set of users. You can use groups to make it easier to manage access and security. Currently, an Amazon QuickSight subscription can't contain more than 500 Amazon QuickSight groups. A group in Amazon QuickSight consists of a set of users. You can use groups to make it easier to manage access and security. A display name for the dataset. The name of the integer parameter. Values. The values for the integer parameter. Integer parameter. An integer parameter. The ID of the AWS account that contains the analyses. A pagination token that can be used in a subsequent request. The maximum number of results to return. Metadata describing each of the analyses that are listed. A pagination token that can be used in a subsequent request. The HTTP status of the request. The AWS request ID for this operation. An error that occured when the namespace was created. An error that occurred when the namespace was created. The error type. DateTime parameters. Date-time parameters. Parameters. A list of QuickSight parameters and the list's override values. Amazon RDS parameters. (Enterprise edition only) The name of the custom permissions profile that you want to assign to this user. Currently, custom permissions profile names are assigned to permissions profiles in the QuickSight console. You use this API to assign the named set of permissions to a QuickSight user. Customizing permissions in the QuickSight UI allows you to control a user's access to the following operations: QuickSight custom permissions are applied through IAM policies. Therefore, they override the permissions typically granted by assigning QuickSight users to one of the default security cohorts (admin, author, reader) in QuickSight. This feature is available only to QuickSight Enterprise edition subscriptions that use SAML 2.0-Based Federation for Single Sign-On (SSO). (Enterprise edition only) The name of the custom permissions profile that you want to assign to this user. Customized permissions allows you to control a user's access by restricting access the following operations: Create and update data sources Create and update datasets Create and update email reports Subscribe to email reports To add custom permissions to an existing user, use A set of custom permissions includes any combination of these restrictions. Currently, you need to create the profile names for custom permission sets by using the QuickSight console. Then, you use the QuickSight custom permissions are applied through IAM policies. Therefore, they override the permissions typically granted by assigning QuickSight users to one of the default security cohorts in QuickSight (admin, author, reader). This feature is available only to QuickSight Enterprise edition subscriptions that use SAML 2.0-Based Federation for Single Sign-On (SSO). The user name. The user's user name. The action to grant or revoke permissions on, for example The IAM action to grant or revoke permissions on, for example Permission for the resource. The ID of the AWS account that contains the analysis. The ID of the analysis that you're restoring. The HTTP status of the request. The Amazon Resource Name (ARN) of the analysis that you're restoring. The ID of the analysis that you're restoring. The AWS request ID for this operation. A physical table type for as S3 data source. The ID of the AWS account that contains the analyses that you're searching for. The structure for the search filters that you want to apply to your search. A pagination token that can be used in a subsequent request. The maximum number of results to return. Metadata describing the analyses that you searched for. A pagination token that can be used in a subsequent request. The HTTP status of the request. The AWS request ID for this operation. The maximum number of results to be returned per request. The maximum number of results to be returned per request. A display name for the dataset. A display name for a string parameter. Values. The values of a string parameter. String parameter. A string parameter. Time when this was created. A template object. A template is an entity in QuickSight that encapsulates the metadata required to create an analysis and that you can use to create a dashboard. A template adds a layer of abstraction by using placeholders to replace the dataset associated with the analysis. You can use templates to create dashboards by replacing dataset placeholders with datasets that follow the same schema that was used to create the source analysis and template. You can share templates across AWS accounts by allowing users in other AWS accounts to create a template or a dashboard from an existing template. A template object. A template is an entity in QuickSight that encapsulates the metadata required to create an analysis and that you can use to create a dashboard. A template adds a layer of abstraction by using placeholders to replace the dataset associated with an analysis. You can use templates to create dashboards by replacing dataset placeholders with datasets that follow the same schema that was used to create the source analysis and template. You can share templates across AWS accounts by allowing users in other AWS accounts to create a template or a dashboard from an existing template. Errors associated with the template. Errors associated with this template version. The version number of the template. The version number of the template version. Schema of the dataset identified by the placeholder. The idea is that any dashboard created from the template should be bound to new datasets matching the same schema described through this API. . Schema of the dataset identified by the placeholder. Any dashboard created from this template should be bound to new datasets matching the same schema described through this API operation. The Amazon Resource Name (ARN) of the analysis or template which was used to create this template. The Amazon Resource Name (ARN) of an analysis or template that was used to create this template. The ARN of the theme associated with this version of the template. A version of a template. The type of theme, based on how it was created. Valid values include: Summary information about a theme. The namespace associated with the customization that you're updating. The namespace that you want to update QuickSight customizations for. The customizations you want to update in QuickSight. The QuickSight customizations you're updating in the current AWS Region. The customizations associated with your QuickSight subscription. The QuickSight customizations you're updating in the current AWS Region. The ID for the AWS account that contains the QuickSight namespaces that you want to list. The ID for the AWS account that contains the QuickSight settings that you want to list. Email address used to send notifications regarding administration of QuickSight. The email address that you want QuickSight to send notifications to regarding your AWS account or QuickSight subscription. The ID of the AWS account that contains the analysis whose permissions you're updating. You must be using the AWS account that the analysis is in. The ID of the analysis whose permissions you're updating. The ID is part of the analysis URL. A structure that describes the permissions to add and the principal to add them to. A structure that describes the permissions to remove and the principal to remove them from. The Amazon Resource Name (ARN) of the analysis that you updated. The ID of the analysis that you updated permissions for. A structure that describes the principals and the resource-level permissions on an analysis. The AWS request ID for this operation. The HTTP status of the request. The ID of the AWS account that contains the analysis that you're updating. The ID for the analysis that you're updating. This ID displays in the URL of the analysis. A descriptive name for the analysis that you're updating. This name displays for the analysis in the QuickSight console. The parameter names and override values that you want to use. An analysis can have any parameter type, and some parameters might accept multiple values. A source entity to use for the analysis that you're updating. This metadata structure contains details that describe a source template and one or more datasets. The Amazon Resource Name (ARN) for the theme to apply to the analysis that you're creating. To see the theme in the QuickSight console, make sure that you have access to it. The ARN of the analysis that you're updating. The ID of the analysis. The update status of the last update that was made to the analysis. The HTTP status of the request. The AWS request ID for this operation. Options for publishing the dashboard when you create it: Options for publishing the dashboard when you create it: The Amazon QuickSight role of the user. The user role can be one of the following: The Amazon QuickSight role of the user. The role can be one of the following default security cohorts: The name of the QuickSight role is invisible to the user except for the console screens dealing with permissions. The name of the custom permissions profile that you want to assign to this user. Currently, custom permissions profile names are assigned to permissions profiles in the QuickSight console. You use this API to assign the named set of permissions to a QuickSight user. (Enterprise edition only) The name of the custom permissions profile that you want to assign to this user. Customized permissions allows you to control a user's access by restricting access the following operations: Create and update data sources Create and update datasets Create and update email reports Subscribe to email reports A set of custom permissions includes any combination of these restrictions. Currently, you need to create the profile names for custom permission sets by using the QuickSight console. Then, you use the QuickSight custom permissions are applied through IAM policies. Therefore, they override the permissions typically granted by assigning QuickSight users to one of the default security cohorts in QuickSight (admin, author, reader). This feature is available only to QuickSight Enterprise edition subscriptions that use SAML 2.0-Based Federation for Single Sign-On (SSO). The custom permissions profile associated with this user. A registered user of Amazon QuickSight. Currently, an Amazon QuickSight subscription can't contain more than 20 million users. A registered user of Amazon QuickSight. Deletes one or more worlds in a batch operation. Cancels a simulation job batch. When you cancel a simulation job batch, you are also cancelling all of the active simulation jobs created as part of the batch. Cancels the specified export job. Cancels the specified world generator job. Creates a simulation job. After 90 days, simulation jobs expire and will be deleted. They will no longer be accessible. Creates a world export job. Creates worlds using the specified template. Creates a world template. Deletes a simulation application. Deletes a world template. Describes a simulation job batch. Describes a world. Describes a world export job. Describes a world generation job. Describes a world template. Gets the world template body. Lists all tags on a AWS RoboMaker resource. Lists world export jobs. Lists world generator jobs. Lists world templates. Lists worlds. Updates a simulation application. Updates a world template. A list of Amazon Resource Names (arns) that correspond to worlds to delete. A list of unprocessed worlds associated with the call. These worlds were not deleted. The Amazon Resource Name (arn) of the world export job to cancel. The Amazon Resource Name (arn) of the world generator job to cancel. The simulation unit limit. Your simulation is allocated CPU and memory proportional to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB of memory. You are only billed for the SU utilization you consume up to the maximim value provided. The simulation unit limit. Your simulation is allocated CPU and memory proportional to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB of memory. You are only billed for the SU utilization you consume up to the maximim value provided. The default is 15. Compute information for the simulation job. The simulation unit limit. Your simulation is allocated CPU and memory proportional to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB of memory. You are only billed for the SU utilization you consume up to the maximim value provided. The simulation unit limit. Your simulation is allocated CPU and memory proportional to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB of memory. You are only billed for the SU utilization you consume up to the maximim value provided. The default is 15. Compute information for the simulation job The name of the data source. Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. The S3 bucket where the data files are located. A list of Amazon Resource Names (arns) that correspond to worlds to export. The list of S3 keys identifying the data source files. The IAM role that the world export process uses to access the Amazon S3 bucket and put the export. A map that contains tag keys and tag values that are attached to the world export job. Information about a data source. The name of the data source. The Amazon Resource Name (ARN) of the world export job. The S3 bucket where the data files are located. The status of the world export job. The world export job request is pending. The world export job is running. The world export job completed. The world export job failed. See The world export job was cancelled. The world export job is being cancelled. The list of S3 keys identifying the data source files. The time, in milliseconds since the epoch, when the world export job was created. The failure code of the world export job if it failed: Internal service error. The requested resource exceeds the maximum number allowed, or the number of concurrent stream requests exceeds the maximum number allowed. The specified resource could not be found. The request was throttled. An input parameter in the request is not valid. Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. The IAM role that the world export process uses to access the Amazon S3 bucket and put the export. A map that contains tag keys and tag values that are attached to the world export job. Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. The Amazon Resource Name (arn) of the world template describing the worlds you want to create. Information about the world count. A map that contains tag keys and tag values that are attached to the world generator job. The Amazon Resource Name (ARN) of the world generator job. The status of the world generator job. The world generator job request is pending. The world generator job is running. The world generator job completed. The world generator job failed. See Some worlds did not generate. The world generator job was cancelled. The world generator job is being cancelled. The time, in milliseconds since the epoch, when the world generator job was created. The failure code of the world generator job if it failed: Internal service error. The requested resource exceeds the maximum number allowed, or the number of concurrent stream requests exceeds the maximum number allowed. The specified resource could not be found. The request was throttled. An input parameter in the request is not valid. Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. The Amazon Resource Name (arn) of the world template. Information about the world count. A map that contains tag keys and tag values that are attached to the world generator job. Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. The name of the world template. The world template body. The location of the world template. A map that contains tag keys and tag values that are attached to the world template. The Amazon Resource Name (ARN) of the world template. Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. The time, in milliseconds since the epoch, when the world template was created. The name of the world template. A map that contains tag keys and tag values that are attached to the world template. The name of the data source. The S3 bucket where the data files are located. The list of S3 keys identifying the data source files. Information about a data source. The name of the data source. The S3 bucket where the data files are located. The list of S3 keys identifying the data source files. Information about a data source. The Amazon Resource Name (arn) of the world template you want to delete. The Amazon Resource Name (arn) of the world export job to describe. The Amazon Resource Name (ARN) of the world export job. The status of the world export job. The world export job request is pending. The world export job is running. The world export job completed. The world export job failed. See The world export job was cancelled. The world export job is being cancelled. The time, in milliseconds since the epoch, when the world export job was created. The failure code of the world export job if it failed: Internal service error. The requested resource exceeds the maximum number allowed, or the number of concurrent stream requests exceeds the maximum number allowed. The specified resource could not be found. The request was throttled. An input parameter in the request is not valid. The reason why the world export job failed. Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. A list of Amazon Resource Names (arns) that correspond to worlds to be exported. The IAM role that the world export process uses to access the Amazon S3 bucket and put the export. A map that contains tag keys and tag values that are attached to the world export job. The Amazon Resource Name (arn) of the world generation job to describe. The Amazon Resource Name (ARN) of the world generation job. The status of the world generation job: The world generation job request is pending. The world generation job is running. The world generation job completed. The world generation job failed. See Some worlds did not generate. The world generation job was cancelled. The world generation job is being cancelled. The time, in milliseconds since the epoch, when the world generation job was created. The failure code of the world generation job if it failed: Internal service error. The requested resource exceeds the maximum number allowed, or the number of concurrent stream requests exceeds the maximum number allowed. The specified resource could not be found. The request was throttled. An input parameter in the request is not valid. The reason why the world generation job failed. Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. The Amazon Resource Name (arn) of the world template. Information about the world count. Summary information about finished worlds. A map that contains tag keys and tag values that are attached to the world generation job. The Amazon Resource Name (arn) of the world you want to describe. The Amazon Resource Name (arn) of the world. The Amazon Resource Name (arn) of the world generation job that generated the world. The world template. The time, in milliseconds since the epoch, when the world was created. A map that contains tag keys and tag values that are attached to the world. The Amazon Resource Name (arn) of the world template you want to describe. The Amazon Resource Name (ARN) of the world template. Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. The name of the world template. The time, in milliseconds since the epoch, when the world template was created. The time, in milliseconds since the epoch, when the world template was last updated. A map that contains tag keys and tag values that are attached to the world template. The total number of failures. The worlds that failed. Information about worlds that failed. The total number of finished worlds. A list of worlds that succeeded. Information about worlds that failed. Information about worlds that finished. The Amazon Resource Name (arn) of the world template. The Amazon Resource Name (arn) of the world generator job. The world template body. The If the previous paginated request did not return all of the remaining results, the response object's The If the previous paginated request did not return all of the remaining results, the response object's The This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. If the previous paginated request did not return all of the remaining results, the response object's This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. The If the previous paginated request did not return all of the remaining results, the response object's The If the previous paginated request did not return all of the remaining results, the response object's A list of robot application summaries that meet the criteria of the request. If the previous paginated request did not return all of the remaining results, the response object's If the previous paginated request did not return all of the remaining results, the response object's When this parameter is used, Optional filters to limit results. The filter names A list of robots that meet the criteria of the request. If the previous paginated request did not return all of the remaining results, the response object's The version qualifier of the simulation application. If the previous paginated request did not return all of the remaining results, the response object's When this parameter is used, Optional list of filters to limit results. The filter name A list of simulation application summaries that meet the criteria of the request. If the previous paginated request did not return all of the remaining results, the response object's If the previous paginated request did not return all of the remaining results, the response object's When this parameter is used, Optional filters to limit results. A list of simulation job batch summaries. If the previous paginated request did not return all of the remaining results, the response object's If the previous paginated request did not return all of the remaining results, the response object's When this parameter is used, Optional filters to limit results. The filter names A list of simulation job summaries that meet the criteria of the request. If the previous paginated request did not return all of the remaining results, the response object's The AWS RoboMaker Amazon Resource Name (ARN) with tags to be listed. A list of robot application summaries that meet the criteria of the request. The The list of all tags added to the specified resource. The If the previous paginated request did not return all of the remaining results, the response object's When this parameter is used, When this parameter is used, Optional filters to limit results. The filter names Optional filters to limit results. You can use A list of robots that meet the criteria of the request. Summary information for world export jobs. The If the previous paginated request did not return all of the remaining results, the response object's The version qualifier of the simulation application. The If the previous paginated request did not return all of the remaining results, the response object's When this parameter is used, When this parameter is used, Optional list of filters to limit results. The filter name Optional filters to limit results. You can use A list of simulation application summaries that meet the criteria of the request. Summary information for world generator jobs. The If the previous paginated request did not return all of the remaining results, the response object's The If the previous paginated request did not return all of the remaining results, the response object's When this parameter is used, Optional filters to limit results. When this parameter is used, A list of simulation job batch summaries. Summary information for templates. The If the previous paginated request did not return all of the remaining results, the response object's The This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. If the previous paginated request did not return all of the remaining results, the response object's When this parameter is used, When this parameter is used, Optional filters to limit results. The filter names Optional filters to limit results. You can use A list of simulation job summaries that meet the criteria of the request. Summary information for worlds. The The AWS RoboMaker Amazon Resource Name (ARN) with tags to be listed. The list of all tags added to the specified resource. If the previous paginated request did not return all of the remaining results, the response object's The launch configuration for the simulation application. A list of world configurations. Information about a simulation application configuration. The Amazon S3 bucket name. The list of S3 keys identifying the data source files. Information about a template location. The Amazon Resource Name (ARN) of the template. The time, in milliseconds since the epoch, when the template was created. The time, in milliseconds since the epoch, when the template was last updated. The name of the template. Summary information for a template. The Amazon Resource Name (arn) of the world template to update. The name of the template. The world template body. The location of the world template. The Amazon Resource Name (arn) of the world template. The name of the world template. The time, in milliseconds since the epoch, when the world template was created. The time, in milliseconds since the epoch, when the world template was last updated. The world generated by Simulation WorldForge. Configuration information for a world. The number of unique floorplans. The number of unique interiors per floorplan. The number of worlds that will be created. You can configure the number of unique floorplans and the number of unique interiors for each floor plan. For example, if you want 1 world with 20 unique interiors, you set If you set The Amazon Resource Name (ARN) of the world export job. The status of the world export job. The world export job request is pending. The world export job is running. The world export job completed. The world export job failed. See The world export job was cancelled. The world export job is being cancelled. The time, in milliseconds since the epoch, when the world export job was created. A list of worlds. Information about a world export job. The failure code of the world export job if it failed: Internal service error. The requested resource exceeds the maximum number allowed, or the number of concurrent stream requests exceeds the maximum number allowed. The specified resource could not be found. The request was throttled. An input parameter in the request is not valid. The sample reason why the world failed. World errors are aggregated. A sample is used as the The number of failed worlds. Information about a failed world. The Amazon Resource Name (ARN) of the world generator job. The Amazon Resource Name (arn) of the world template. The time, in milliseconds since the epoch, when the world generator job was created. The status of the world generator job: The world generator job request is pending. The world generator job is running. The world generator job completed. The world generator job failed. See Some worlds did not generate. The world generator job was cancelled. The world generator job is being cancelled. Information about the world count. The number of worlds that were generated. The number of worlds that failed. Information about a world generator job. The Amazon Resource Name (ARN) of the world. The time, in milliseconds since the epoch, when the world was created. The Amazon Resource Name (arn) of the world generation job. The Amazon Resource Name (arn) of the world template. Information about a world. This section provides documentation for the AWS RoboMaker API operations.acm.amazonaws.com
). Permissions can be revoked with the DeletePermission action and listed with the ListPermissions action.acm.amazonaws.com
). These permissions allow ACM to issue and renew ACM certificates that reside in the same AWS account as the CA.
"
},
"DeleteCertificateAuthority":{
"name":"DeleteCertificateAuthority",
@@ -79,7 +79,7 @@
{"shape":"InvalidArnException"},
{"shape":"InvalidStateException"}
],
- "documentation":"CreatePermission
to grant permissions for ACM to carry out automatic certificate renewals.DISABLED
. CREATING
). You can also delete it if the CA has been created but you haven't yet imported the signed certificate into ACM Private CA (that is, the status of the CA is PENDING_CERTIFICATE
). DELETED
. However, the CA won't be permanently deleted until the restoration period has passed. By default, if you do not set the PermanentDeletionTimeInDays
parameter, the CA remains restorable for 30 days. You can set the parameter from 7 to 30 days. The DescribeCertificateAuthority action returns the time remaining in the restoration window of a private CA in the DELETED
state. To restore an eligible CA, call the RestoreCertificateAuthority action.DISABLED
. CREATING
). You can also delete it if the CA has been created but you haven't yet imported the signed certificate into ACM Private CA (that is, the status of the CA is PENDING_CERTIFICATE
). DELETED
. However, the CA won't be permanently deleted until the restoration period has passed. By default, if you do not set the PermanentDeletionTimeInDays
parameter, the CA remains restorable for 30 days. You can set the parameter from 7 to 30 days. The DescribeCertificateAuthority action returns the time remaining in the restoration window of a private CA in the DELETED
state. To restore an eligible CA, call the RestoreCertificateAuthority action.
"
+ },
+ "DeletePolicy":{
+ "name":"DeletePolicy",
+ "http":{
+ "method":"POST",
+ "requestUri":"/"
+ },
+ "input":{"shape":"DeletePolicyRequest"},
+ "errors":[
+ {"shape":"ConcurrentModificationException"},
+ {"shape":"InvalidArnException"},
+ {"shape":"InvalidStateException"},
+ {"shape":"LockoutPreventedException"},
+ {"shape":"RequestFailedException"},
+ {"shape":"ResourceNotFoundException"}
+ ],
+ "documentation":"CreatePermission
to grant permissions for ACM to carry out automatic certificate renewals.
"
},
"DescribeCertificateAuthority":{
"name":"DescribeCertificateAuthority",
@@ -108,7 +125,7 @@
{"shape":"ResourceNotFoundException"},
{"shape":"InvalidArnException"}
],
- "documentation":"
"
+ "documentation":"CREATING
- ACM Private CA is creating your private certificate authority.PENDING_CERTIFICATE
- The certificate is pending. You must use your ACM Private CA-hosted or on-premises root or subordinate CA to sign your private CA CSR and then import it into PCA. ACTIVE
- Your private CA is active.DISABLED
- Your private CA has been disabled.EXPIRED
- Your private CA certificate has expired.FAILED
- Your private CA has failed. Your CA can fail because of problems such a network outage or backend AWS failure or other errors. A failed CA can never return to the pending state. You must create a new CA. DELETED
- Your private CA is within the restoration period, after which it is permanently deleted. The length of time remaining in the CA's restoration period is also included in this action's output.
"
},
"DescribeCertificateAuthorityAuditReport":{
"name":"DescribeCertificateAuthorityAuditReport",
@@ -123,7 +140,7 @@
{"shape":"InvalidArnException"},
{"shape":"InvalidArgsException"}
],
- "documentation":"CREATING
- ACM Private CA is creating your private certificate authority.PENDING_CERTIFICATE
- The certificate is pending. You must use your ACM Private CA-hosted or on-premises root or subordinate CA to sign your private CA CSR and then import it into PCA. ACTIVE
- Your private CA is active.DISABLED
- Your private CA has been disabled.EXPIRED
- Your private CA certificate has expired.FAILED
- Your private CA has failed. Your CA can fail because of problems such a network outage or backend AWS failure or other errors. A failed CA can never return to the pending state. You must create a new CA. DELETED
- Your private CA is within the restoration period, after which it is permanently deleted. The length of time remaining in the CA's restoration period is also included in this action's output.ResourceNotFoundException
.
"
},
"ImportCertificateAuthorityCertificate":{
"name":"ImportCertificateAuthorityCertificate",
@@ -192,7 +225,7 @@
{"shape":"MalformedCertificateException"},
{"shape":"CertificateMismatchException"}
],
- "documentation":"
"
+ "documentation":"
"
},
"IssueCertificate":{
"name":"IssueCertificate",
@@ -210,7 +243,7 @@
{"shape":"InvalidArgsException"},
{"shape":"MalformedCSRException"}
],
- "documentation":"
"
},
"ListTags":{
"name":"ListTags",
@@ -256,7 +289,25 @@
{"shape":"InvalidArnException"},
{"shape":"InvalidStateException"}
],
- "documentation":"CreatePermission
to grant permissions for ACM to carry out automatic certificate renewals.
"
},
"RestoreCertificateAuthority":{
"name":"RestoreCertificateAuthority",
@@ -270,7 +321,7 @@
{"shape":"InvalidStateException"},
{"shape":"InvalidArnException"}
],
- "documentation":"DELETED
state. You can restore a CA during the period that you defined in the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthority action. Currently, you can specify 7 to 30 days. If you did not specify a PermanentDeletionTimeInDays value, by default you can restore the CA at any time in a 30 day period. You can check the time remaining in the restoration period of a private CA in the DELETED
state by calling the DescribeCertificateAuthority or ListCertificateAuthorities actions. The status of a restored CA is set to its pre-deletion status when the RestoreCertificateAuthority action returns. To change its status to ACTIVE
, call the UpdateCertificateAuthority action. If the private CA was in the PENDING_CERTIFICATE
state at deletion, you must use the ImportCertificateAuthorityCertificate action to import a certificate authority into the private CA before it can be activated. You cannot restore a CA after the restoration period has ended.DELETED
state. You can restore a CA during the period that you defined in the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthority action. Currently, you can specify 7 to 30 days. If you did not specify a PermanentDeletionTimeInDays value, by default you can restore the CA at any time in a 30 day period. You can check the time remaining in the restoration period of a private CA in the DELETED
state by calling the DescribeCertificateAuthority or ListCertificateAuthorities actions. The status of a restored CA is set to its pre-deletion status when the RestoreCertificateAuthority action returns. To change its status to ACTIVE
, call the UpdateCertificateAuthority action. If the private CA was in the PENDING_CERTIFICATE
state at deletion, you must use the ImportCertificateAuthorityCertificate action to import a certificate authority into the private CA before it can be activated. You cannot restore a CA after the restoration period has ended.CRLGenerated
and MisconfiguredCRLBucket
. For more information, see Supported CloudWatch Metrics.ACTIVE
or DISABLED
state before you can update it. You can disable a private CA that is in the ACTIVE
state or make a CA that is in the DISABLED
state active again.ACTIVE
or DISABLED
state before you can update it. You can disable a private CA that is in the ACTIVE
state or make a CA that is in the DISABLED
state active again. 12345678-1234-1234-1234-123456789012
.PermanentDeletionTimeInDays
parameter of the DeleteCertificateAuthorityRequest action. PermanentDeletionTimeInDays
parameter of the DeleteCertificateAuthorityRequest action. SigningAlgorithm
parameter used to sign certificates when they are issued.arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
.arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
. arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
. true
. Your private CA writes CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by specifying a value for the CustomCname parameter. Your private CA copies the CNAME or the S3 bucket name to the CRL Distribution Points extension of each certificate it issues. Your S3 bucket policy must give write permission to ACM Private CA.
openssl crl -inform DER -text -in crl_path -noout
true
. Your private CA writes CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by specifying a value for the CustomCname parameter. Your private CA copies the CNAME or the S3 bucket name to the CRL Distribution Points extension of each certificate it issues. Your S3 bucket policy must give write permission to ACM Private CA.
openssl crl -inform DER -text -in crl_path -noout
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
. arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
. arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
. arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
. arn:aws:acm-pca:region:account:certificate-authority/01234567-89ab-cdef-0123-0123456789ab
. arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
. arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
. arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
. arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
. arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
NextToken
argument is not valid. Use the token returned from your previous call to ListCertificateAuthorities.NextToken
argument is not valid. Use the token returned from your previous call to ListCertificateAuthorities.arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
openssl req -new -newkey rsa:2048 -days 365 -keyout private/test_cert_priv_key.pem -out csr/test_cert_.csr
usr_cert
block in the configuration file contains your X509 version 3 extensions. openssl req -new -config openssl_rsa.cnf -extensions usr_cert -newkey rsa:2048 -days -365 -keyout private/test_cert_priv_key.pem -out csr/test_cert_.csr
openssl req -new -newkey rsa:2048 -days 365 -keyout private/test_cert_priv_key.pem -out csr/test_cert_.csr
usr_cert
block in the configuration file contains your X509 version 3 extensions. openssl req -new -config openssl_rsa.cnf -extensions usr_cert -newkey rsa:2048 -days -365 -keyout private/test_cert_priv_key.pem -out csr/test_cert_.csr
SigningAlgorithm
parameter used to sign a CSR.EndEntityCertificate/V1
template.TemplateArn
values are supported by ACM Private CA:
EndEntityCertificate/V1
template. For CA certificates, you should choose the shortest path length that meets your needs. The path length is indicated by the PathLenN portion of the ARN, where N is the CA depth.TemplateArn
values are supported by ACM Private CA:
NextToken
element is sent in the response. Use this NextToken
value in a subsequent request to retrieve additional items.arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
You can get a private CA's ARN by running the ListCertificateAuthorities action.arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
You can get a private CA's ARN by running the ListCertificateAuthorities action.arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
acm.amazonaws.com
.IssueCertificate
, GetCertificate
, and ListPermissions
). Permissions can be assigned with the CreatePermission action, removed with the DeletePermission action, and listed with the ListPermissions action.IssueCertificate
, GetCertificate
, and ListPermissions
). Permissions can be assigned with the CreatePermission action, removed with the DeletePermission action, and listed with the ListPermissions action.PutPolicy
action returns an InvalidPolicyException
. For information about IAM policy and statement structure, see Overview of JSON Policies.arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
openssl x509 -in file_path -text -noout
openssl x509 -in file_path -text -noout
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
Type
, below.Value
parameter represents days, months, or years.Value
parameter, an integer. Supported validity types include those listed below. Type definitions with values include a sample input value and the resulting output. END_DATE
: The specific date and time when the certificate will expire, expressed using UTCTime (YYMMDDHHMMSS) or GeneralizedTime (YYYYMMDDHHMMSS) format. When UTCTime is used, if the year field (YY) is greater than or equal to 50, the year is interpreted as 19YY. If the year field is less than 50, the year is interpreted as 20YY.
ABSOLUTE
: The specific date and time when the certificate will expire, expressed in seconds since the Unix Epoch.
DAYS
, MONTHS
, YEARS
: The relative time from the moment of issuance until the certificate will expire, expressed in days, months, or years. DAYS
, issued on 10/12/2020 at 12:34:54 UTC:
"
}
},
- "documentation":"AddTags
updates its value.AddTags
updates its value.AWSELB
, follows the lifetime of the application-generated cookie specified in the policy configuration. The load balancer only inserts a new stickiness cookie when the application response includes a new application cookie.AWSELB
, follows the lifetime of the application-generated cookie specified in the policy configuration. The load balancer only inserts a new stickiness cookie when the application response includes a new application cookie.OutOfService
state. Then, the load balancer attempts to equally balance the traffic among its remaining Availability Zones.OutOfService
state. Then, the load balancer attempts to equally balance the traffic among its remaining Availability Zones.AccessLogs
, ConnectionDraining
, and CrossZoneLoadBalancing
by either enabling or disabling them. Or, you can modify the load balancer attribute ConnectionSettings
by specifying an idle connection timeout value for your load balancer.AccessLogs
, ConnectionDraining
, and CrossZoneLoadBalancing
by either enabling or disabling them. Or, you can modify the load balancer attribute ConnectionSettings
by specifying an idle connection timeout value for your load balancer.RegisterInstanceWithLoadBalancer
completes when the request has been registered. Instance registration takes a little time to complete. To check the state of the registered instances, use DescribeLoadBalancers or DescribeInstanceHealth.OutOfService
state. If an Availability Zone is added to the load balancer later, any instances registered with the load balancer move to the InService
state.RegisterInstanceWithLoadBalancer
completes when the request has been registered. Instance registration takes a little time to complete. To check the state of the registered instances, use DescribeLoadBalancers or DescribeInstanceHealth.OutOfService
state. If an Availability Zone is added to the load balancer later, any instances registered with the load balancer move to the InService
state.SetLoadBalancerPoliciesForBackendServer
to enable the policies, use the PolicyNames
parameter to list the policies that you want to enable.SetLoadBalancerPoliciesForBackendServer
to enable the policies, use the PolicyNames
parameter to list the policies that you want to enable.
"
},
"Value":{
"shape":"AdditionalAttributeValue",
- "documentation":"elb.http.desyncmitigationmode
- Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor
, defensive
, and strictest
. The default is defensive
.internal
to create a load balancer with a DNS name that resolves to private IP addresses.internal
to create a load balancer with a DNS name that resolves to private IP addresses.InstanceProtocol
must be at the same protocol.InstancePort
whose InstanceProtocol
is secure, (HTTPS or SSL), the listener's InstanceProtocol
must also be secure.InstancePort
whose InstanceProtocol
is HTTP or TCP, the listener's InstanceProtocol
must be HTTP or TCP.InstancePort
whose InstanceProtocol
is secure, (HTTPS or SSL), the listener's InstanceProtocol
must also be secure.InstancePort
whose InstanceProtocol
is HTTP or TCP, the listener's InstanceProtocol
must be HTTP or TCP.forward
, fixed-response
, or redirect
.Type
is forward
. If you specify both ForwardConfig
and TargetGroupArn
, you can specify only one target group using ForwardConfig
and it must be the same target group specified in TargetGroupArn
.forward
, fixed-response
, or redirect
, and it must be the last action to be performed.http-request-method
, host-header
, path-pattern
, and source-ip
, and zero or more of the following conditions: http-header
and query-string
.http-request-method
, host-header
, path-pattern
, and source-ip
. Each rule can also optionally include one or more of each of the following conditions: http-header
and query-string
.
"
+ "documentation":"instance
- Targets are specified by instance ID. This is the default value. If the target group protocol is UDP or TCP_UDP, the target type must be instance
.ip
- Targets are specified by IP address. You can specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.lambda
- The target groups contains a single Lambda function.
"
}
}
},
@@ -2055,7 +2055,7 @@
"members":{
"Key":{
"shape":"LoadBalancerAttributeKey",
- "documentation":"instance
- Targets are specified by instance ID. This is the default value.ip
- Targets are specified by IP address. You can specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.lambda
- The target groups contains a single Lambda function.
access_logs.s3.enabled
- Indicates whether access logs are enabled. The value is true
or false
. The default is false
.access_logs.s3.bucket
- The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.access_logs.s3.prefix
- The prefix for the location in the S3 bucket for the access logs.deletion_protection.enabled
- Indicates whether deletion protection is enabled. The value is true
or false
. The default is false
.
idle_timeout.timeout_seconds
- The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.routing.http.drop_invalid_header_fields.enabled
- Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true
) or routed to targets (false
). The default is false
.routing.http2.enabled
- Indicates whether HTTP/2 is enabled. The value is true
or false
. The default is true
. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.
"
+ "documentation":"load_balancing.cross_zone.enabled
- Indicates whether cross-zone load balancing is enabled. The value is true
or false
. The default is false
.
access_logs.s3.enabled
- Indicates whether access logs are enabled. The value is true
or false
. The default is false
.access_logs.s3.bucket
- The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.access_logs.s3.prefix
- The prefix for the location in the S3 bucket for the access logs.deletion_protection.enabled
- Indicates whether deletion protection is enabled. The value is true
or false
. The default is false
.
idle_timeout.timeout_seconds
- The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.routing.http.desync_mitigation_mode
- Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor
, defensive
, and strictest
. The default is defensive
.routing.http.drop_invalid_header_fields.enabled
- Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true
) or routed to targets (false
). The default is false
.routing.http2.enabled
- Indicates whether HTTP/2 is enabled. The value is true
or false
. The default is true
. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.
"
},
"Value":{
"shape":"LoadBalancerAttributeValue",
@@ -2313,7 +2313,7 @@
},
"Matcher":{
"shape":"Matcher",
- "documentation":"load_balancing.cross_zone.enabled
- Indicates whether cross-zone load balancing is enabled. The value is true
or false
. The default is false
.Values
if the rule contains only host-header
and path-pattern
conditions. Otherwise, you can use HostHeaderConfig
for host-header
conditions and PathPatternConfig
for path-pattern
conditions.Field
is host-header
, you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters.
Field
is path-pattern
, you can specify a single path pattern (for example, /img/*). A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters.
"
+ "documentation":"Field
is host-header
or path-pattern
. Alternatively, to specify multiple host names or multiple path patterns, use HostHeaderConfig
or PathPatternConfig
.Field
is host-header
and you are not using HostHeaderConfig
, you can specify a single host name (for example, my.example.com) in Values
. A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters.
Field
is path-pattern
and you are not using PathPatternConfig
, you can specify a single path pattern (for example, /img/*) in Values
. A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters.
"
},
"HostHeaderConfig":{
"shape":"HostHeaderConditionConfig",
@@ -2624,7 +2624,7 @@
"documentation":"Field
is source-ip
.http-request-method
, host-header
, path-pattern
, and source-ip
. Each rule can also optionally include one or more of each of the following conditions: http-header
and query-string
.
deregistration_delay.timeout_seconds
- The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining
to unused
. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.stickiness.enabled
- Indicates whether sticky sessions are enabled. The value is true
or false
. The default is false
.stickiness.type
- The type of sticky sessions. The possible values are lb_cookie
for Application Load Balancers or source_ip
for Network Load Balancers.
load_balancing.algorithm.type
- The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin
or least_outstanding_requests
. The default is round_robin
.slow_start.duration_seconds
- The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). Slow start mode is disabled by default.stickiness.lb_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
lambda.multi_value_headers.enabled
- Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true
or false
. The default is false
. If the value is false
and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.
"
+ "documentation":"proxy_protocol_v2.enabled
- Indicates whether Proxy Protocol version 2 is enabled. The value is true
or false
. The default is false
.
deregistration_delay.timeout_seconds
- The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining
to unused
. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.stickiness.enabled
- Indicates whether sticky sessions are enabled. The value is true
or false
. The default is false
.stickiness.type
- The type of sticky sessions. The possible values are lb_cookie
for Application Load Balancers or source_ip
for Network Load Balancers.
load_balancing.algorithm.type
- The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin
or least_outstanding_requests
. The default is round_robin
.slow_start.duration_seconds
- The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).stickiness.lb_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
lambda.multi_value_headers.enabled
- Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true
or false
. The default is false
. If the value is false
and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.
"
},
"Value":{
"shape":"TargetGroupAttributeValue",
diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml
index aabbc5491ba1..2e64c6d2d02e 100644
--- a/services/elasticsearch/pom.xml
+++ b/services/elasticsearch/pom.xml
@@ -21,7 +21,7 @@
proxy_protocol_v2.enabled
- Indicates whether Proxy Protocol version 2 is enabled. The value is true
or false
. The default is false
.CreateStream
is an asynchronous operation. Upon receiving a CreateStream
request, Kinesis Data Streams immediately returns and sets the stream status to CREATING
. After the stream is created, Kinesis Data Streams sets the stream status to ACTIVE
. You should perform read and write operations only on an ACTIVE
stream. LimitExceededException
when making a CreateStream
request when you try to do one of the following:
CREATING
state at any point in time.DescribeStream
to check the stream status, which is returned in StreamStatus
.CreateStream
is an asynchronous operation. Upon receiving a CreateStream
request, Kinesis Data Streams immediately returns and sets the stream status to CREATING
. After the stream is created, Kinesis Data Streams sets the stream status to ACTIVE
. You should perform read and write operations only on an ACTIVE
stream. LimitExceededException
when making a CreateStream
request when you try to do one of the following:
CREATING
state at any point in time.DescribeStream
to check the stream status, which is returned in StreamStatus
.ShardIterator
parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. It might take multiple calls to get to a portion of the shard that contains records.NextShardIterator
. Specify the shard iterator returned in NextShardIterator
in subsequent calls to GetRecords. If the shard has been closed, the shard iterator can't return more data and GetRecords returns null
in NextShardIterator
. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process.Limit
parameter to specify the maximum number of records that GetRecords can return. Consider your average record size when determining this limit. The maximum number of records that can be returned per call is 10,000.ProvisionedThroughputExceededException
. If there is insufficient provisioned throughput on the stream, subsequent calls made within the next 1 second throw ProvisionedThroughputExceededException
. GetRecords doesn't return any data when it throws an exception. For this reason, we recommend that you wait 1 second between calls to GetRecords. However, it's possible that the application will get exceptions for longer than 1 second.MillisBehindLatest
response attribute. You can also monitor the stream using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon Kinesis Data Streams Developer Guide).ApproximateArrivalTimestamp
, that is set when a stream successfully receives and stores a record. This is commonly referred to as a server-side time stamp, whereas a client-side time stamp is set when a data producer creates or sends the record to a stream (a data producer is any data source putting data records into a stream, for example with PutRecords). The time stamp has millisecond precision. There are no guarantees about the time stamp accuracy, or that the time stamp is always increasing. For example, records in a shard or across a stream might have time stamps that are out of order.ShardIterator
parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. It might take multiple calls to get to a portion of the shard that contains records.NextShardIterator
. Specify the shard iterator returned in NextShardIterator
in subsequent calls to GetRecords. If the shard has been closed, the shard iterator can't return more data and GetRecords returns null
in NextShardIterator
. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process.Limit
parameter to specify the maximum number of records that GetRecords can return. Consider your average record size when determining this limit. The maximum number of records that can be returned per call is 10,000.ProvisionedThroughputExceededException
. If there is insufficient provisioned throughput on the stream, subsequent calls made within the next 1 second throw ProvisionedThroughputExceededException
. GetRecords doesn't return any data when it throws an exception. For this reason, we recommend that you wait 1 second between calls to GetRecords. However, it's possible that the application will get exceptions for longer than 1 second.MillisBehindLatest
response attribute. You can also monitor the stream using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon Kinesis Data Streams Developer Guide).ApproximateArrivalTimestamp
, that is set when a stream successfully receives and stores a record. This is commonly referred to as a server-side time stamp, whereas a client-side time stamp is set when a data producer creates or sends the record to a stream (a data producer is any data source putting data records into a stream, for example with PutRecords). The time stamp has millisecond precision. There are no guarantees about the time stamp accuracy, or that the time stamp is always increasing. For example, records in a shard or across a stream might have time stamps that are out of order.ShardIteratorType
parameter to read exactly from the position denoted by a specific sequence number by using the AT_SEQUENCE_NUMBER
shard iterator type. Alternatively, the parameter can read right after the sequence number by using the AFTER_SEQUENCE_NUMBER
shard iterator type, using sequence numbers returned by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. In the request, you can specify the shard iterator type AT_TIMESTAMP
to read records from an arbitrary point in time, TRIM_HORIZON
to cause ShardIterator
to point to the last untrimmed record in the shard in the system (the oldest data record in the shard), or LATEST
so that you always read the most recent data in the shard. NextShardIterator
. A new shard iterator is returned by every GetRecords request in NextShardIterator
, which you use in the ShardIterator
parameter of the next GetRecords request. ProvisionedThroughputExceededException
. For more information about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Data Streams Developer Guide.ShardIteratorType
parameter to read exactly from the position denoted by a specific sequence number by using the AT_SEQUENCE_NUMBER
shard iterator type. Alternatively, the parameter can read right after the sequence number by using the AFTER_SEQUENCE_NUMBER
shard iterator type, using sequence numbers returned by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. In the request, you can specify the shard iterator type AT_TIMESTAMP
to read records from an arbitrary point in time, TRIM_HORIZON
to cause ShardIterator
to point to the last untrimmed record in the shard in the system (the oldest data record in the shard), or LATEST
so that you always read the most recent data in the shard. NextShardIterator
. A new shard iterator is returned by every GetRecords request in NextShardIterator
, which you use in the ShardIterator
parameter of the next GetRecords request. ProvisionedThroughputExceededException
. For more information about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Data Streams Developer Guide.MergeShards
is called when there is a need to reduce the overall capacity of a stream because of excess capacity that is not being used. You must specify the shard to be merged and the adjacent shard for a stream. For more information about merging shards, see Merge Two Shards in the Amazon Kinesis Data Streams Developer Guide.ACTIVE
state, you can call MergeShards
. If a stream is in the CREATING
, UPDATING
, or DELETING
state, MergeShards
returns a ResourceInUseException
. If the specified stream does not exist, MergeShards
returns a ResourceNotFoundException
. StreamStatus
.MergeShards
is an asynchronous operation. Upon receiving a MergeShards
request, Amazon Kinesis Data Streams immediately returns a response and sets the StreamStatus
to UPDATING
. After the operation is completed, Kinesis Data Streams sets the StreamStatus
to ACTIVE
. Read and write operations continue to work while the stream is in the UPDATING
state. MergeShards
request. MergeShards
, or SplitShard, you receive a LimitExceededException
. MergeShards
has a limit of five transactions per second per account.MergeShards
is called when there is a need to reduce the overall capacity of a stream because of excess capacity that is not being used. You must specify the shard to be merged and the adjacent shard for a stream. For more information about merging shards, see Merge Two Shards in the Amazon Kinesis Data Streams Developer Guide.ACTIVE
state, you can call MergeShards
. If a stream is in the CREATING
, UPDATING
, or DELETING
state, MergeShards
returns a ResourceInUseException
. If the specified stream does not exist, MergeShards
returns a ResourceNotFoundException
. StreamStatus
.MergeShards
is an asynchronous operation. Upon receiving a MergeShards
request, Amazon Kinesis Data Streams immediately returns a response and sets the StreamStatus
to UPDATING
. After the operation is completed, Kinesis Data Streams sets the StreamStatus
to ACTIVE
. Read and write operations continue to work while the stream is in the UPDATING
state. MergeShards
request. MergeShards
, or SplitShard, you receive a LimitExceededException
. MergeShards
has a limit of five transactions per second per account.PutRecord
to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.ExplicitHashKey
parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams Developer Guide.PutRecord
returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record.SequenceNumberForOrdering
parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams Developer Guide.PutRecord
request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, PutRecord
throws ProvisionedThroughputExceededException
. PutRecord
to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per second.ExplicitHashKey
parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams Developer Guide.PutRecord
returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record.SequenceNumberForOrdering
parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams Developer Guide.PutRecord
request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, PutRecord
throws ProvisionedThroughputExceededException
. PutRecords
request). Use this operation to send data into the stream for data ingestion and processing. PutRecords
request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.Records
, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.Records
array may include an optional parameter, ExplicitHashKey
, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Data Streams Developer Guide.PutRecords
response includes an array of response Records
. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records
array always includes the same number of records as the request array.Records
array includes both successfully and unsuccessfully processed records. Kinesis Data Streams attempts to process all records in each PutRecords
request. A single record failure does not stop the processing of subsequent records.ShardId
and SequenceNumber
values. The ShardId
parameter identifies the shard in the stream where the record is stored. The SequenceNumber
parameter is an identifier assigned to the put record, unique to all records in the stream.ErrorCode
and ErrorMessage
values. ErrorCode
reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException
or InternalFailure
. ErrorMessage
provides more detailed information about the ProvisionedThroughputExceededException
exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Data Streams Developer Guide.PutRecords
request). Use this operation to send data into the stream for data ingestion and processing. PutRecords
request can support up to 500 records. Each record in the request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per second.Records
, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.Records
array may include an optional parameter, ExplicitHashKey
, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Data Streams Developer Guide.PutRecords
response includes an array of response Records
. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records
array always includes the same number of records as the request array.Records
array includes both successfully and unsuccessfully processed records. Kinesis Data Streams attempts to process all records in each PutRecords
request. A single record failure does not stop the processing of subsequent records. As a result, PutRecords doesn't guarantee the ordering of records. If you need to read records in the same order they are written to the stream, use PutRecord instead of PutRecords
, and write to the same shard.ShardId
and SequenceNumber
values. The ShardId
parameter identifies the shard in the stream where the record is stored. The SequenceNumber
parameter is an identifier assigned to the put record, unique to all records in the stream.ErrorCode
and ErrorMessage
values. ErrorCode
reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException
or InternalFailure
. ErrorMessage
provides more detailed information about the ProvisionedThroughputExceededException
exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Data Streams Developer Guide.CREATING
status at the same time. Registering a 6th consumer while there are 5 in a CREATING
status results in a LimitExceededException
.SplitShard
is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested. SplitShard
when a shard appears to be approaching its maximum utilization; for example, the producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call SplitShard
to increase stream capacity, so that more Kinesis Data Streams applications can simultaneously read data from the stream for real-time processing. ShardToSplit
and NewStartingHashKey
parameters that are specified in the SplitShard
request.SplitShard
is an asynchronous operation. Upon receiving a SplitShard
request, Kinesis Data Streams immediately returns a response and sets the stream status to UPDATING
. After the operation is completed, Kinesis Data Streams sets the stream status to ACTIVE
. Read and write operations continue to work while the stream is in the UPDATING
state. DescribeStream
to check the status of the stream, which is returned in StreamStatus
. If the stream is in the ACTIVE
state, you can call SplitShard
. If a stream is in CREATING
or UPDATING
or DELETING
states, DescribeStream
returns a ResourceInUseException
.DescribeStream
returns a ResourceNotFoundException
. If you try to create more shards than are authorized for your account, you receive a LimitExceededException
. LimitExceededException
. SplitShard
has a limit of five transactions per second per account.SplitShard
is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested. SplitShard
when a shard appears to be approaching its maximum utilization; for example, the producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call SplitShard
to increase stream capacity, so that more Kinesis Data Streams applications can simultaneously read data from the stream for real-time processing. ShardToSplit
and NewStartingHashKey
parameters that are specified in the SplitShard
request.SplitShard
is an asynchronous operation. Upon receiving a SplitShard
request, Kinesis Data Streams immediately returns a response and sets the stream status to UPDATING
. After the operation is completed, Kinesis Data Streams sets the stream status to ACTIVE
. Read and write operations continue to work while the stream is in the UPDATING
state. DescribeStream
to check the status of the stream, which is returned in StreamStatus
. If the stream is in the ACTIVE
state, you can call SplitShard
. If a stream is in CREATING
or UPDATING
or DELETING
states, DescribeStream
returns a ResourceInUseException
.DescribeStream
returns a ResourceNotFoundException
. If you try to create more shards than are authorized for your account, you receive a LimitExceededException
. LimitExceededException
. SplitShard
has a limit of five transactions per second per account.SubscribeToShard
again to renew the subscription if you want to continue to receive records.SubscribeToShard
per second per ConsumerARN
. If your call succeeds, and then you call the operation again less than 5 seconds later, the second call generates a ResourceInUseException. If you call the operation a second time more than 5 seconds after the first call succeeds, the second call succeeds and the first connection gets shut down.ConsumerARN
parameter and the shard you specify in the ShardId
parameter. After the connection is successfully established, Kinesis Data Streams pushes records from the shard to the consumer over this connection. Before you call this operation, call RegisterStreamConsumer to register the consumer with Kinesis Data Streams.SubscribeToShard
call succeeds, your consumer starts receiving events of type SubscribeToShardEvent over the HTTP/2 connection for up to 5 minutes, after which time you need to call SubscribeToShard
again to renew the subscription if you want to continue to receive records.SubscribeToShard
per second per registered consumer per shard. For example, if you have a 4000 shard stream and two registered stream consumers, you can make one SubscribeToShard
request per second for each combination of shard and registered consumer, allowing you to subscribe both consumers to all 4000 shards in one second. SubscribeToShard
again with the same ConsumerARN
and ShardId
within 5 seconds of a successful call, you'll get a ResourceInUseException
. If you call SubscribeToShard
5 seconds or more after a successful call, the first connection will expire and the second call will take over the subscription.UPDATING
. After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE
. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is UPDATING
.
UPDATING
. After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE
. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is UPDATING
.
AddTagsToStream
.CreateStream
.ALL
\" disables every metric.
IncomingBytes
IncomingRecords
OutgoingBytes
OutgoingRecords
WriteProvisionedThroughputExceeded
ReadProvisionedThroughputExceeded
IteratorAgeMilliseconds
ALL
ALL
\" disables every metric.
IncomingBytes
IncomingRecords
OutgoingBytes
OutgoingRecords
WriteProvisionedThroughputExceeded
ReadProvisionedThroughputExceeded
IteratorAgeMilliseconds
ALL
ALL
\" enables every metric.
IncomingBytes
IncomingRecords
OutgoingBytes
OutgoingRecords
WriteProvisionedThroughputExceeded
ReadProvisionedThroughputExceeded
IteratorAgeMilliseconds
ALL
ALL
\" enables every metric.
IncomingBytes
IncomingRecords
OutgoingBytes
OutgoingRecords
WriteProvisionedThroughputExceeded
ReadProvisionedThroughputExceeded
IteratorAgeMilliseconds
ALL
ALL
\" enhances every metric.
IncomingBytes
IncomingRecords
OutgoingBytes
OutgoingRecords
WriteProvisionedThroughputExceeded
ReadProvisionedThroughputExceeded
IteratorAgeMilliseconds
ALL
ALL
\" enhances every metric.
IncomingBytes
IncomingRecords
OutgoingBytes
OutgoingRecords
WriteProvisionedThroughputExceeded
ReadProvisionedThroughputExceeded
IteratorAgeMilliseconds
ALL
InvalidArgumentException
.InvalidArgumentException
. The default value is 10,000.ListShards
. The minimum value you can specify for this parameter is 1, and the maximum is 1,000, which is also the default.MaxResults
, the response contains a NextToken
value that you can use in a subsequent call to ListShards
to list the next set of shards.ListShards
. The minimum value you can specify for this parameter is 1, and the maximum is 10,000, which is also the default.MaxResults
, the response contains a NextToken
value that you can use in a subsequent call to ListShards
to list the next set of shards.NextToken
parameter.AT_SEQUENCE_NUMBER
: Start streaming from the position denoted by the sequence number specified in the SequenceNumber
field.AFTER_SEQUENCE_NUMBER
: Start streaming right after the position denoted by the sequence number specified in the SequenceNumber
field.AT_TIMESTAMP
: Start streaming from the position denoted by the time stamp specified in the Timestamp
field.TRIM_HORIZON
: Start streaming at the last untrimmed record in the shard, which is the oldest data record in the shard.LATEST
: Start streaming just after the most recent record in the shard, so that you always read the most recent data in the shard.StartingPosition
to AT_SEQUENCE_NUMBER
or AFTER_SEQUENCE_NUMBER
.StartingPosition
to Type AT_TIMESTAMP
. A time stamp is the Unix epoch date with precision in milliseconds. For example, 2016-04-04T19:58:46.480-00:00
or 1459799926.480
. If a record with this exact time stamp does not exist, records will be streamed from the next (later) record. If the time stamp is older than the current trim horizon, records will be streamed from the oldest untrimmed data record (TRIM_HORIZON
).
"
},
"RetentionPeriodHours":{
- "shape":"PositiveIntegerObject",
+ "shape":"RetentionPeriodHours",
"documentation":"CREATING
- The stream is being created. Kinesis Data Streams immediately returns and sets StreamStatus
to CREATING
.DELETING
- The stream is being deleted. The specified stream is in the DELETING
state until Kinesis Data Streams completes the deletion.ACTIVE
- The stream exists and is ready for read and write operations or deletion. You should perform read and write operations only on an ACTIVE
stream.UPDATING
- Shards in the stream are being merged or split. Read and write operations continue to work while the stream is in the UPDATING
state.StartingSequenceNumber
in the next call to SubscribeToShard.SequenceNumber
in the next call to SubscribeToShard, with StartingPosition
set to AT_SEQUENCE_NUMBER
or AFTER_SEQUENCE_NUMBER
. Use ContinuationSequenceNumber
for checkpointing because it captures your shard progress even when no data is written to the shard.
"
},
"ScalingType":{
"shape":"ScalingType",
diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml
index d7d935630e5b..bd1ba035a906 100644
--- a/services/kinesisanalytics/pom.xml
+++ b/services/kinesisanalytics/pom.xml
@@ -22,7 +22,7 @@
CreateAccountCustomization
or UpdateAccountCustomization
API operation. To further customize QuickSight by removing QuickSight sample assets and videos for all new users, see Customizing QuickSight in the Amazon QuickSight User Guide.DescribeAccountCustomization
API operation.CreateDashboard
, DescribeDashboard
, and ListDashboardsByUser
API operations act on the dashboard entity. If you have the correct permissions, you can create a dashboard from a template that exists in a different AWS account. CreateTemplate
API operation.DeletionTime
stamp to the response that specifies the end of the recovery window. At the end of the recovery window, QuickSight deletes the analysis permanently.RestoreAnalysis
API operation to remove the DeletionTime
stamp and cancel the deletion of the analysis. The analysis remains visible in the API until it's deleted, so you can describe it but you can't make a template from it.Resolved
flag included.
"
},
"DescribeAccountSettings":{
"name":"DescribeAccountSettings",
@@ -582,6 +619,41 @@
],
"documentation":"AWS Account
- The AWS account exists at the top of the hierarchy. It has the potential to use all of the AWS Regions and AWS Services. When you subscribe to QuickSight, you choose one AWS Region to use as your home region. That's where your free SPICE capacity is located. You can use QuickSight in any supported AWS Region. AWS Region
- In each AWS Region where you sign in to QuickSight at least once, QuickSight acts as a separate instance of the same service. If you have a user directory, it resides in us-east-1, which is the US East (N. Virginia). Generally speaking, these users have access to QuickSight in any AWS Region, unless they are constrained to a namespace.
aws configure
to change your default AWS Region. Use Enter to key the same settings for your keys. For more information, see Configuring the AWS CLI.Namespace
- A QuickSight namespace is a partition that contains users and assets (data sources, datasets, dashboards, and so on). To access assets that are in a specific namespace, users and groups must also be part of the same namespace. People who share a namespace are completely isolated from users and assets in other namespaces, even if they are in the same AWS account and AWS Region.Applied customizations
- Within an AWS Region, a set of QuickSight customizations can apply to an AWS account or to a namespace. Settings that you apply to a namespace override settings that you apply to an AWS Account. All settings are isolated to a single AWS Region. To apply them in other AWS Regions, run the CreateAccountCustomization
command in each AWS Region where you want to apply the same customizations. GetDashboardEmbedURL
only from the server, not from the user's browser. The following rules apply to the combination of URL and authorization code:
GetDashboardEmbedURL
only from the server, not from the user's browser. The following rules apply to the combination of URL and authorization code:
GetSessionEmbedUrl
where you want to provide an authoring portal that allows users to create data sources, datasets, analyses, and dashboards. The users who access an embedded QuickSight console need belong to the author or admin security cohort. If you want to restrict permissions to some of these features, add a custom permissions profile to the user with the UpdateUser
API operation. Use RegisterUser
API operation to add a new user with a custom permission profile attached. For more information, see the following sections in the Amazon QuickSight User Guide:DescribeAccountCustomization
API operation. \"Operator\": \"StringEquals\"
.\"Name\": \"QUICKSIGHT_USER\"
.QUICKSIGHT_USER
, that you want to use as a filter, for example \"Value\"
. An example is \"arn:aws:quicksight:us-east-1:1:user/default/UserName1\"
.AccountCustomization
to the midnight theme (DefaultTheme=\"arn:aws:quicksight::aws:theme/MIDNIGHT\"
) or to a custom theme (DefaultTheme=\"arn:aws:quicksight:us-west-2:111122223333:theme/bdb844d0-0fe9-4d9d-b520-0fe602d93639\"
).AccountCustomization
to the midnight theme: \"AccountCustomization\": { \"DefaultTheme\": \"arn:aws:quicksight::aws:theme/MIDNIGHT\" }.
. Or, you could add a custom theme by specifying \"AccountCustomization\": { \"DefaultTheme\": \"arn:aws:quicksight:us-west-2:111122223333:theme/bdb844d0-0fe9-4d9d-b520-0fe602d93639\" }
. Permissions
structure to grant permissions by providing a list of AWS Identity and Access Management (IAM) action information for each principal listed by Amazon Resource Name (ARN). Permissions
.
"
+ "documentation":"AvailabilityStatus
for AdHocFilteringOption
- This status can be either ENABLED
or DISABLED
. When this is set to DISABLED
, QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is ENABLED
by default. AvailabilityStatus
for ExportToCSVOption
- This status can be either ENABLED
or DISABLED
. The visual option to export data to .csv format isn't enabled when this is set to DISABLED
. This option is ENABLED
by default. VisibilityState
for SheetControlsOption
- This visibility state can be either COLLAPSED
or EXPANDED
. This option is COLLAPSED
by default.
"
},
"ThemeArn":{
"shape":"Arn",
@@ -2731,7 +3137,7 @@
},
"CreationStatus":{
"shape":"NamespaceStatus",
- "documentation":"AvailabilityStatus
for AdHocFilteringOption
- This status can be either ENABLED
or DISABLED
. When this is set to DISABLED
, QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is ENABLED
by default. AvailabilityStatus
for ExportToCSVOption
- This status can be either ENABLED
or DISABLED
. The visual option to export data to .CSV format isn't enabled when this is set to DISABLED
. This option is ENABLED
by default. VisibilityState
for SheetControlsOption
- This visibility state can be either COLLAPSED
or EXPANDED
. This option is COLLAPSED
by default. CREATED
means that your namespace is ready to use. If an error occurs, it indicates if the process is retryable
or non-retryable
. In the case of a non-retryable error, refer to the error message for follow-up actions.CREATED
means that your namespace is ready to use. If an error occurs, it indicates if the process is retryable
or non-retryable
. In the case of a non-retryable error, refer to the error message for follow-up tasks.DataSourceParameters
structure that's in the request with the structures in the AlternateDataSourceParameters
allowlist. If the structures are an exact match, the request is allowed to use the new data source with the existing credentials. If the AlternateDataSourceParameters
list is null, the DataSourceParameters
originally used with these Credentials
is automatically allowed.DataSourceParameters
structure that's in the request with the structures in the AlternateDataSourceParameters
allowlist. If the structures are an exact match, the request is allowed to use the new data source with the existing credentials. If the AlternateDataSourceParameters
list is null, the DataSourceParameters
originally used with these Credentials
is automatically allowed.DataSourceParameters
structure that's in the request with the structures in the AlternateDataSourceParameters
allowlist. If the structures are an exact match, the request is allowed to use the credentials from this existing data source. If the AlternateDataSourceParameters
list is null, the Credentials
originally used with this DataSourceParameters
are automatically allowed.DataSourceParameters
structure that's in the request with the structures in the AlternateDataSourceParameters
allowlist. If the structures are an exact match, the request is allowed to use the credentials from this existing data source. If the AlternateDataSourceParameters
list is null, the Credentials
originally used with this DataSourceParameters
are automatically allowed.ForceDeleteWithoutRecovery
option in the same API call. The default value is 30.NoForceDeleteWithoutRecovery
. To immediately delete the analysis, add the ForceDeleteWithoutRecovery
option. You can't restore an analysis after it's deleted. CREATED
means that your customization is ready to use.Resolved
flag works with the other parameters to determine which view of QuickSight customizations is returned. You can add this flag to your command to use the same view that QuickSight uses to identify which customizations to apply to the console. Omit this flag, or set it to no-resolved
, to reveal customizations that are configured at different levels. DescribeNamespace
also works for namespaces that are in the process of being created. For incomplete namespaces, this API lists the namespace error types and messages associated with the creation process.DescribeNamespace
also works for namespaces that are in the process of being created. For incomplete namespaces, this API operation lists the namespace error types and messages associated with the creation process.QUICKSIGHT
identity type. You can use this for any Amazon QuickSight users in your account (readers, authors, or admins) authenticated as one of the following:
",
+ "documentation":"QUICKSIGHT
identity type. You can use this for any Amazon QuickSight users in your account (readers, authors, or admins) authenticated as one of the following:
auth_code
value that enables one (and only one) sign-on to a user session that is valid for 10 hours. auth_code
value that enables one (and only one) sign-on to a user session that is valid for 10 hours.
",
"location":"querystring",
"locationName":"entry-point"
},
@@ -5513,7 +6068,7 @@
},
"UserArn":{
"shape":"Arn",
- "documentation":"/start
/start/analyses
/start/dashboards
/start/favorites
/dashboards/DashboardId
- where DashboardId
is the actual ID key from the QuickSight console URL of the dashboard/analyses/AnalysisId
- where AnalysisId
is the actual ID key from the QuickSight console URL of the analysisQUICKSIGHT
identity type. You can use this for any Amazon QuickSight users in your account (readers, authors, or admins) authenticated as one of the following:
",
+ "documentation":"QUICKSIGHT
identity type. You can use this for any type of Amazon QuickSight users in your account (readers, authors, or admins). They need to be authenticated as one of the following:
auth_code
value that enables one (and only one) sign-on to a user session that is valid for 10 hours. auth_code
value that enables one (and only one) sign-on to a user session that is valid for 10 hours.
UpdateUser
instead.RegisterUser
API operation to assign the named set of permissions to a QuickSight user. \"quicksight:DescribeDashboard\"
.\"quicksight:DescribeDashboard\"
.QUICKSIGHT
and CUSTOM
.
"
+ "documentation":"AvailabilityStatus
for AdHocFilteringOption
- This status can be either ENABLED
or DISABLED
. When this is set to DISABLED
, QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is ENABLED
by default. AvailabilityStatus
for ExportToCSVOption
- This status can be either ENABLED
or DISABLED
. The visual option to export data to .csv format isn't enabled when this is set to DISABLED
. This option is ENABLED
by default. VisibilityState
for SheetControlsOption
- This visibility state can be either COLLAPSED
or EXPANDED
. This option is COLLAPSED
by default.
"
},
"ThemeArn":{
"shape":"Arn",
@@ -9841,11 +10668,11 @@
},
"Role":{
"shape":"UserRole",
- "documentation":"AvailabilityStatus
for AdHocFilteringOption
- This status can be either ENABLED
or DISABLED
. When this is set to DISABLED
, QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is ENABLED
by default. AvailabilityStatus
for ExportToCSVOption
- This status can be either ENABLED
or DISABLED
. The visual option to export data to .CSV format isn't enabled when this is set to DISABLED
. This option is ENABLED
by default. VisibilityState
for SheetControlsOption
- This visibility state can be either COLLAPSED
or EXPANDED
. This option is COLLAPSED
by default.
"
+ "documentation":"READER
: A user who has read-only access to dashboards.AUTHOR
: A user who can create data sources, datasets, analyses, and dashboards.ADMIN
: A user who is an author, who can also manage Amazon QuickSight settings.
READER
: A user who has read-only access to dashboards.AUTHOR
: A user who can create data sources, datasets, analyses, and dashboards.ADMIN
: A user who is an author, who can also manage Amazon QuickSight settings.
RegisterUser
API operation to assign the named set of permissions to a QuickSight user.
"
},
- "s3Keys":{
- "shape":"S3Keys",
- "documentation":"failureCode
for more information.
"
+ },
+ "clientRequestToken":{
+ "shape":"ClientRequestToken",
+ "documentation":"
"
+ },
+ "createdAt":{
+ "shape":"CreatedAt",
+ "documentation":"failureCode
for more information.
"
+ },
+ "clientRequestToken":{
+ "shape":"ClientRequestToken",
+ "documentation":"
"
+ },
+ "createdAt":{
+ "shape":"CreatedAt",
+ "documentation":"failureCode
and failureReason
for more information.
"
+ },
+ "failureReason":{
+ "shape":"GenericString",
+ "documentation":"
"
+ },
+ "createdAt":{
+ "shape":"CreatedAt",
+ "documentation":"failureCode
for more information.
"
+ },
+ "failureReason":{
+ "shape":"GenericString",
+ "documentation":"nextToken
value returned from a previous paginated ListDeploymentJobs
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListDeploymentJobs
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
value to include in a future ListDeploymentJobs
request. When the results of a ListDeploymentJobs
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListDeploymentJobs
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
value returned from a previous paginated ListFleets
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListFleets
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
value to include in a future ListDeploymentJobs
request. When the results of a ListFleets
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListFleets
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
value returned from a previous paginated ListRobotApplications
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListRobotApplications
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListRobotApplications
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListRobots
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. ListRobots
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListRobots
request with the returned nextToken
value. This value can be between 1 and 200. If this parameter is not used, then ListRobots
returns up to 200 results and a nextToken
value if applicable. status
and fleetName
are supported. When filtering, you must use the complete value of the filtered item. You can use up to three filters, but they must be for the same named item. For example, if you are looking for items with the status Registered
or the status Available
.nextToken
parameter value is set to a token. To retrieve the next set of results, call ListRobots
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListSimulationApplications
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. ListSimulationApplications
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListSimulationApplications
request with the returned nextToken
value. This value can be between 1 and 100. If this parameter is not used, then ListSimulationApplications
returns up to 100 results and a nextToken
value if applicable. name
is supported. When filtering, you must use the complete value of the filtered item. You can use up to three filters.nextToken
parameter value is set to a token. To retrieve the next set of results, call ListSimulationApplications
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListSimulationJobBatches
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. ListSimulationJobBatches
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListSimulationJobBatches
request with the returned nextToken
value. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListSimulationJobBatches
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListSimulationJobs
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. ListSimulationJobs
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListSimulationJobs
request with the returned nextToken
value. This value can be between 1 and 1000. If this parameter is not used, then ListSimulationJobs
returns up to 1000 results and a nextToken
value if applicable. status
and simulationApplicationName
and robotApplicationName
are supported. When filtering, you must use the complete value of the filtered item. You can use up to three filters, but they must be for the same named item. For example, if you are looking for items with the status Preparing
or the status Running
.nextToken
parameter value is set to a token. To retrieve the next set of results, call ListSimulationJobs
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
value to include in a future ListRobotApplications
request. When the results of a ListRobotApplications
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return. nextToken
value returned from a previous paginated ListRobots
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListWorldExportJobs
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. ListRobots
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListRobots
request with the returned nextToken
value. This value can be between 1 and 200. If this parameter is not used, then ListRobots
returns up to 200 results and a nextToken
value if applicable. ListWorldExportJobs
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListWorldExportJobs
request with the returned nextToken
value. This value can be between 1 and 100. If this parameter is not used, then ListWorldExportJobs
returns up to 100 results and a nextToken
value if applicable. status
and fleetName
are supported. When filtering, you must use the complete value of the filtered item. You can use up to three filters, but they must be for the same named item. For example, if you are looking for items with the status Registered
or the status Available
.generationJobId
and templateId
.nextToken
value to include in a future ListRobots
request. When the results of a ListRobot
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListWorldExportJobsRequest
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
value returned from a previous paginated ListSimulationApplications
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListWorldGenerationJobsRequest
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. ListSimulationApplications
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListSimulationApplications
request with the returned nextToken
value. This value can be between 1 and 100. If this parameter is not used, then ListSimulationApplications
returns up to 100 results and a nextToken
value if applicable. ListWorldGeneratorJobs
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListWorldGeneratorJobs
request with the returned nextToken
value. This value can be between 1 and 100. If this parameter is not used, then ListWorldGeneratorJobs
returns up to 100 results and a nextToken
value if applicable. name
is supported. When filtering, you must use the complete value of the filtered item. You can use up to three filters.status
and templateId
.nextToken
value to include in a future ListSimulationApplications
request. When the results of a ListRobot
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListWorldGeneratorJobsRequest
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
value returned from a previous paginated ListSimulationJobBatches
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListWorldTemplates
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. ListSimulationJobBatches
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListSimulationJobBatches
request with the returned nextToken
value. ListWorldTemplates
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListWorldTemplates
request with the returned nextToken
value. This value can be between 1 and 100. If this parameter is not used, then ListWorldTemplates
returns up to 100 results and a nextToken
value if applicable. nextToken
value to include in a future ListSimulationJobBatches
request. When the results of a ListSimulationJobBatches
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListWorldTemplates
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. nextToken
value returned from a previous paginated ListSimulationJobs
request where maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken
value. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListWorlds
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. ListSimulationJobs
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListSimulationJobs
request with the returned nextToken
value. This value can be between 1 and 1000. If this parameter is not used, then ListSimulationJobs
returns up to 1000 results and a nextToken
value if applicable. ListWorlds
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListWorlds
request with the returned nextToken
value. This value can be between 1 and 100. If this parameter is not used, then ListWorlds
returns up to 100 results and a nextToken
value if applicable. status
and simulationApplicationName
and robotApplicationName
are supported. When filtering, you must use the complete value of the filtered item. You can use up to three filters, but they must be for the same named item. For example, if you are looking for items with the status Preparing
or the status Running
.status
.nextToken
value to include in a future ListSimulationJobs
request. When the results of a ListRobot
request exceed maxResults
, this value can be used to retrieve the next page of results. This value is null
when there are no more results to return. nextToken
parameter value is set to a token. To retrieve the next set of results, call ListWorlds
again and assign that token to the request object's nextToken
parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. floorplanCount = 1
and interiorCountPerFloorplan = 20
. This will result in 20 worlds (floorplanCount
* interiorCountPerFloorplan)
. floorplanCount = 4
and interiorCountPerFloorplan = 5
, there will be 20 worlds with 5 unique floor plans.
"
+ },
+ "createdAt":{
+ "shape":"CreatedAt",
+ "documentation":"failureCode
for more information.
"
+ },
+ "sampleFailureReason":{
+ "shape":"GenericString",
+ "documentation":"sampleFailureReason
.
"
+ },
+ "worldCount":{
+ "shape":"WorldCount",
+ "documentation":"failureCode
for more information.