diff --git a/.changes/2.1581.0.json b/.changes/2.1581.0.json new file mode 100644 index 0000000000..656afa88f3 --- /dev/null +++ b/.changes/2.1581.0.json @@ -0,0 +1,22 @@ +[ + { + "type": "feature", + "category": "CloudWatchLogs", + "description": "Update LogSamples field in Anomaly model to be a list of LogEvent" + }, + { + "type": "feature", + "category": "EC2", + "description": "This release adds the new DescribeMacHosts API operation for getting information about EC2 Mac Dedicated Hosts. Users can now see the latest macOS versions that their underlying Apple Mac can support without needing to be updated." + }, + { + "type": "feature", + "category": "Finspace", + "description": "Adding new attributes readWrite and onDemand to dataview models for Database Maintenance operations." + }, + { + "type": "feature", + "category": "ManagedBlockchainQuery", + "description": "Introduces a new API for Amazon Managed Blockchain Query: ListFilteredTransactionEvents." + } +] \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d0c6d054c..467aa38d2a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,13 @@ # Changelog for AWS SDK for JavaScript - + +## 2.1581.0 +* feature: CloudWatchLogs: Update LogSamples field in Anomaly model to be a list of LogEvent +* feature: EC2: This release adds the new DescribeMacHosts API operation for getting information about EC2 Mac Dedicated Hosts. Users can now see the latest macOS versions that their underlying Apple Mac can support without needing to be updated. +* feature: Finspace: Adding new attributes readWrite and onDemand to dataview models for Database Maintenance operations. +* feature: ManagedBlockchainQuery: Introduces a new API for Amazon Managed Blockchain Query: ListFilteredTransactionEvents. + ## 2.1580.0 * feature: CloudFormation: This release supports for a new API ListStackSetAutoDeploymentTargets, which provider auto-deployment configuration as a describable resource. Customers can now view the specific combinations of regions and OUs that are being auto-deployed. * feature: KMS: Adds the ability to use the default policy name by omitting the policyName parameter in calls to PutKeyPolicy and GetKeyPolicy diff --git a/README.md b/README.md index 3370d920d5..3dd8f30194 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ require('aws-sdk/lib/maintenance_mode_message').suppress = true; To use the SDK in the browser, simply add the following script tag to your HTML pages: - + You can also build a custom browser SDK with your specified set of AWS services. This can allow you to reduce the SDK's size, specify different API versions of diff --git a/apis/cloudformation-2010-05-15.normal.json b/apis/cloudformation-2010-05-15.normal.json index babbdc4e55..90038c8600 100644 --- a/apis/cloudformation-2010-05-15.normal.json +++ b/apis/cloudformation-2010-05-15.normal.json @@ -55,7 +55,7 @@ "shape": "TypeNotFoundException" } ], - "documentation": "
Activates a public third-party extension, making it available for use in stack templates. For more information, see Using public extensions in the CloudFormation User Guide.
Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration
to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.
Activates a public third-party extension, making it available for use in stack templates. For more information, see Using public extensions in the CloudFormation User Guide.
Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.
", "idempotent": true }, "BatchDescribeTypeConfigurations": { @@ -437,7 +437,7 @@ "shape": "TypeNotFoundException" } ], - "documentation": "Marks an extension or extension version as DEPRECATED
in the CloudFormation registry, removing it from active use. Deprecated extensions or extension versions cannot be used in CloudFormation operations.
To deregister an entire extension, you must individually deregister all active versions of that extension. If an extension has only a single active version, deregistering that version results in the extension itself being deregistered and marked as deprecated in the registry.
You can't deregister the default version of an extension if there are other active version of that extension. If you do deregister the default version of an extension, the extension type itself is deregistered as well and marked as deprecated.
To view the deprecation status of an extension or extension version, use DescribeType
.
Marks an extension or extension version as DEPRECATED
in the CloudFormation registry, removing it from active use. Deprecated extensions or extension versions cannot be used in CloudFormation operations.
To deregister an entire extension, you must individually deregister all active versions of that extension. If an extension has only a single active version, deregistering that version results in the extension itself being deregistered and marked as deprecated in the registry.
You can't deregister the default version of an extension if there are other active version of that extension. If you do deregister the default version of an extension, the extension type itself is deregistered as well and marked as deprecated.
To view the deprecation status of an extension or extension version, use DescribeType.
", "idempotent": true }, "DescribeAccountLimits": { @@ -556,7 +556,7 @@ "shape": "CFNRegistryException" } ], - "documentation": "Returns information about a CloudFormation extension publisher.
If you don't supply a PublisherId
, and you have registered as an extension publisher, DescribePublisher
returns information about your own publisher account.
For more information about registering as a publisher, see:
Publishing extensions to make them available for public use in the CloudFormation CLI User Guide
Returns information about a CloudFormation extension publisher.
If you don't supply a PublisherId
, and you have registered as an extension publisher, DescribePublisher
returns information about your own publisher account.
For more information about registering as a publisher, see:
Publishing extensions to make them available for public use in the CloudFormation CLI User Guide
Publishes the specified extension to the CloudFormation registry as a public extension in this Region. Public extensions are available for use by all CloudFormation users. For more information about publishing extensions, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.
To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher
.
Publishes the specified extension to the CloudFormation registry as a public extension in this Region. Public extensions are available for use by all CloudFormation users. For more information about publishing extensions, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.
To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher.
", "idempotent": true }, "RecordHandlerProgress": { @@ -1418,7 +1418,7 @@ "shape": "CFNRegistryException" } ], - "documentation": "Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your Amazon Web Services account, and includes:
Validating the extension schema.
Determining which handlers, if any, have been specified for the extension.
Making the extension available for use in your account.
For more information about how to develop extensions and ready them for registration, see Creating Resource Providers in the CloudFormation CLI User Guide.
You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per Region. Use DeregisterType
to deregister specific extension versions if necessary.
Once you have initiated a registration request using RegisterType, you can use DescribeTypeRegistration to monitor the progress of the registration request.
Once you have registered a private extension in your account and Region, use SetTypeConfiguration
to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.
Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your Amazon Web Services account, and includes:
Validating the extension schema.
Determining which handlers, if any, have been specified for the extension.
Making the extension available for use in your account.
For more information about how to develop extensions and ready them for registration, see Creating Resource Providers in the CloudFormation CLI User Guide.
You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per Region. Use DeregisterType to deregister specific extension versions if necessary.
Once you have initiated a registration request using RegisterType, you can use DescribeTypeRegistration to monitor the progress of the registration request.
Once you have registered a private extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.
", "idempotent": true }, "RollbackStack": { @@ -1473,7 +1473,7 @@ "shape": "TypeNotFoundException" } ], - "documentation": "Specifies the configuration data for a registered CloudFormation extension, in the given account and Region.
To view the current configuration data for an extension, refer to the ConfigurationSchema
element of DescribeType
. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.
It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Using dynamic references to specify template values in the CloudFormation User Guide.
Specifies the configuration data for a registered CloudFormation extension, in the given account and Region.
To view the current configuration data for an extension, refer to the ConfigurationSchema
element of DescribeType. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.
It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Using dynamic references to specify template values in the CloudFormation User Guide.
Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry.
For resource types, this includes passing all contracts tests defined for the type.
For modules, this includes determining if the module's model meets all necessary requirements.
For more information, see Testing your public extension prior to publishing in the CloudFormation CLI User Guide.
If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region for testing.
To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType
.
Once you've initiated testing on an extension using TestType
, you can pass the returned TypeVersionArn
into DescribeType
to monitor the current test status and test status description for the extension.
An extension must have a test status of PASSED
before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.
Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry.
For resource types, this includes passing all contracts tests defined for the type.
For modules, this includes determining if the module's model meets all necessary requirements.
For more information, see Testing your public extension prior to publishing in the CloudFormation CLI User Guide.
If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region for testing.
To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType.
Once you've initiated testing on an extension using TestType
, you can pass the returned TypeVersionArn
into DescribeType to monitor the current test status and test status description for the extension.
An extension must have a test status of PASSED
before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.
Updates the parameter values for stack instances for the specified accounts, within the specified Amazon Web Services Regions. A stack instance refers to a stack in a specific account and Region.
You can only update stack instances in Amazon Web Services Regions and accounts where they already exist; to create additional stack instances, use CreateStackInstances
.
During stack set updates, any parameters overridden for a stack instance aren't updated, but retain their overridden value.
You can only update the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet
to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet
to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances
.
Updates the parameter values for stack instances for the specified accounts, within the specified Amazon Web Services Regions. A stack instance refers to a stack in a specific account and Region.
You can only update stack instances in Amazon Web Services Regions and accounts where they already exist; to create additional stack instances, use CreateStackInstances.
During stack set updates, any parameters overridden for a stack instance aren't updated, but retain their overridden value.
You can only update the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances
.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.
CAPABILITY_IAM
and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM
or CAPABILITY_NAMED_IAM
capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM
.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities
error.
If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM resources in CloudFormation templates.
CAPABILITY_AUTO_EXPAND
Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include
and AWS::Serverless
transforms, which are macros hosted by CloudFormation.
This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect.
If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.
For more information about macros, see Using CloudFormation macros to perform custom processing on templates.
Only one of the Capabilities
and ResourceType
parameters can be specified.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.
CAPABILITY_IAM
and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM
or CAPABILITY_NAMED_IAM
capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM
.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities
error.
If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM resources in CloudFormation templates.
CAPABILITY_AUTO_EXPAND
Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.
This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect.
If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.
For more information about macros, see Using CloudFormation macros to perform custom processing on templates.
Only one of the Capabilities
and ResourceType
parameters can be specified.
Determines what action will be taken if stack creation fails. If this parameter is specified, the DisableRollback
parameter to the ExecuteChangeSet
API operation must not be specified. This must be one of these values:
DELETE
- Deletes the change set if the stack creation fails. This is only valid when the ChangeSetType
parameter is set to CREATE
. If the deletion of the stack fails, the status of the stack is DELETE_FAILED
.
DO_NOTHING
- if the stack creation fails, do nothing. This is equivalent to specifying true
for the DisableRollback
parameter to the ExecuteChangeSet
API operation.
ROLLBACK
- if the stack creation fails, roll back the stack. This is equivalent to specifying false
for the DisableRollback
parameter to the ExecuteChangeSet
API operation.
For nested stacks, when the OnStackFailure
parameter is set to DELETE
for the change set for the parent stack, any failure in a child stack will cause the parent stack creation to fail and all stacks to be deleted.
Determines what action will be taken if stack creation fails. If this parameter is specified, the DisableRollback
parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values:
DELETE
- Deletes the change set if the stack creation fails. This is only valid when the ChangeSetType
parameter is set to CREATE
. If the deletion of the stack fails, the status of the stack is DELETE_FAILED
.
DO_NOTHING
- if the stack creation fails, do nothing. This is equivalent to specifying true
for the DisableRollback
parameter to the ExecuteChangeSet API operation.
ROLLBACK
- if the stack creation fails, roll back the stack. This is equivalent to specifying false
for the DisableRollback
parameter to the ExecuteChangeSet API operation.
For nested stacks, when the OnStackFailure
parameter is set to DELETE
for the change set for the parent stack, any failure in a child stack will cause the parent stack creation to fail and all stacks to be deleted.
A list of Parameter
structures that specify input parameters for the stack. For more information, see the Parameter
data type.
A list of Parameter
structures that specify input parameters for the stack. For more information, see the Parameter data type.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.
CAPABILITY_IAM
and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM
or CAPABILITY_NAMED_IAM
capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM
.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities
error.
If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include
and AWS::Serverless
transforms, which are macros hosted by CloudFormation.
If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.
You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.
Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.
For more information, see Using CloudFormation macros to perform custom processing on templates.
Only one of the Capabilities
and ResourceType
parameters can be specified.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.
CAPABILITY_IAM
and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM
or CAPABILITY_NAMED_IAM
capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM
.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities
error.
If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.
If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.
You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.
Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.
For more information, see Using CloudFormation macros to perform custom processing on templates.
Only one of the Capabilities
and ResourceType
parameters can be specified.
A list of stack set parameters whose values you want to override in the selected stack instances.
Any overridden parameter values will be applied to all stack instances in the specified accounts and Amazon Web Services Regions. When specifying parameters and their values, be aware of how CloudFormation sets parameter values during stack instance operations:
To override the current value for a parameter, include the parameter and specify its value.
To leave an overridden parameter set to its present value, include the parameter and specify UsePreviousValue
as true
. (You can't specify both a value and set UsePreviousValue
to true
.)
To set an overridden parameter back to the value specified in the stack set, specify a parameter list but don't include the parameter in the list.
To leave all parameters set to their present values, don't specify this property at all.
During stack set updates, any parameter values overridden for a stack instance aren't updated, but retain their overridden value.
You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet
to update the stack set template.
A list of stack set parameters whose values you want to override in the selected stack instances.
Any overridden parameter values will be applied to all stack instances in the specified accounts and Amazon Web Services Regions. When specifying parameters and their values, be aware of how CloudFormation sets parameter values during stack instance operations:
To override the current value for a parameter, include the parameter and specify its value.
To leave an overridden parameter set to its present value, include the parameter and specify UsePreviousValue
as true
. (You can't specify both a value and set UsePreviousValue
to true
.)
To set an overridden parameter back to the value specified in the stack set, specify a parameter list but don't include the parameter in the list.
To leave all parameters set to their present values, don't specify this property at all.
During stack set updates, any parameter values overridden for a stack instance aren't updated, but retain their overridden value.
You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template.
" }, "OperationPreferences": { "shape": "StackSetOperationPreferences", @@ -2615,7 +2615,7 @@ }, "Capabilities": { "shape": "Capabilities", - "documentation": "In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances.
CAPABILITY_IAM
and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM
or CAPABILITY_NAMED_IAM
capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM
.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities
error.
If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.
Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include
and AWS::Serverless
transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.
In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances.
CAPABILITY_IAM
and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM
or CAPABILITY_NAMED_IAM
capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM
.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities
error.
If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.
Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.
A list of Parameter
structures that describes the input parameters and their values used to create the change set. For more information, see the Parameter
data type.
A list of Parameter
structures that describes the input parameters and their values used to create the change set. For more information, see the Parameter data type.
Determines what action will be taken if stack creation fails. When this parameter is specified, the DisableRollback
parameter to the ExecuteChangeSet
API operation must not be specified. This must be one of these values:
DELETE
- Deletes the change set if the stack creation fails. This is only valid when the ChangeSetType
parameter is set to CREATE
. If the deletion of the stack fails, the status of the stack is DELETE_FAILED
.
DO_NOTHING
- if the stack creation fails, do nothing. This is equivalent to specifying true
for the DisableRollback
parameter to the ExecuteChangeSet
API operation.
ROLLBACK
- if the stack creation fails, roll back the stack. This is equivalent to specifying false
for the DisableRollback
parameter to the ExecuteChangeSet
API operation.
Determines what action will be taken if stack creation fails. When this parameter is specified, the DisableRollback
parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values:
DELETE
- Deletes the change set if the stack creation fails. This is only valid when the ChangeSetType
parameter is set to CREATE
. If the deletion of the stack fails, the status of the stack is DELETE_FAILED
.
DO_NOTHING
- if the stack creation fails, do nothing. This is equivalent to specifying true
for the DisableRollback
parameter to the ExecuteChangeSet API operation.
ROLLBACK
- if the stack creation fails, roll back the stack. This is equivalent to specifying false
for the DisableRollback
parameter to the ExecuteChangeSet API operation.
The name of the extension.
If the extension is a public third-party type you have activated with a type name alias, CloudFormation returns the type name alias. For more information, see ActivateType
.
The name of the extension.
If the extension is a public third-party type you have activated with a type name alias, CloudFormation returns the type name alias. For more information, see ActivateType.
" }, "DefaultVersionId": { "shape": "TypeVersionId", - "documentation": "The ID of the default version of the extension. The default version is used when the extension version isn't specified.
This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon Web Services and published by third parties, CloudFormation returns null
. For more information, see RegisterType
.
To set the default version of an extension, use SetTypeDefaultVersion.
" + "documentation": "The ID of the default version of the extension. The default version is used when the extension version isn't specified.
This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon Web Services and published by third parties, CloudFormation returns null
. For more information, see RegisterType.
To set the default version of an extension, use SetTypeDefaultVersion.
" }, "IsDefaultVersion": { "shape": "IsDefaultVersion", @@ -3606,7 +3606,7 @@ }, "LoggingConfig": { "shape": "LoggingConfig", - "documentation": "Contains logging configuration information for private extensions. This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon Web Services and published by third parties, CloudFormation returns null
. For more information, see RegisterType
.
Contains logging configuration information for private extensions. This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon Web Services and published by third parties, CloudFormation returns null
. For more information, see RegisterType.
The Amazon Resource Name (ARN) of the IAM execution role used to register the extension. This applies only to private extensions you have registered in your account. For more information, see RegisterType
.
If the registered extension calls any Amazon Web Services APIs, you must create an IAM execution role that includes the necessary permissions to call those Amazon Web Services APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your extension with the appropriate credentials.
" + "documentation": "The Amazon Resource Name (ARN) of the IAM execution role used to register the extension. This applies only to private extensions you have registered in your account. For more information, see RegisterType.
If the registered extension calls any Amazon Web Services APIs, you must create an IAM execution role that includes the necessary permissions to call those Amazon Web Services APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your extension with the appropriate credentials.
" }, "Visibility": { "shape": "Visibility", @@ -3630,7 +3630,7 @@ }, "LastUpdated": { "shape": "Timestamp", - "documentation": "When the specified extension version was registered. This applies only to:
Private extensions you have registered in your account. For more information, see RegisterType
.
Public extensions you have activated in your account with auto-update specified. For more information, see ActivateType
.
When the specified extension version was registered. This applies only to:
Private extensions you have registered in your account. For more information, see RegisterType.
Public extensions you have activated in your account with auto-update specified. For more information, see ActivateType.
A JSON string that represent the current configuration data for the extension in this account and Region.
To set the configuration data for an extension, use SetTypeConfiguration
. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.
A JSON string that represent the current configuration data for the extension in this account and Region.
To set the configuration data for an extension, use SetTypeConfiguration. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.
" }, "PublisherId": { "shape": "PublisherId", @@ -3892,7 +3892,7 @@ }, "DisableRollback": { "shape": "DisableRollback", - "documentation": "Preserves the state of previously provisioned resources when an operation fails. This parameter can't be specified when the OnStackFailure
parameter to the CreateChangeSet
API operation was specified.
True
- if the stack creation fails, do nothing. This is equivalent to specifying DO_NOTHING
for the OnStackFailure
parameter to the CreateChangeSet
API operation.
False
- if the stack creation fails, roll back the stack. This is equivalent to specifying ROLLBACK
for the OnStackFailure
parameter to the CreateChangeSet
API operation.
Default: True
Preserves the state of previously provisioned resources when an operation fails. This parameter can't be specified when the OnStackFailure
parameter to the CreateChangeSet API operation was specified.
True
- if the stack creation fails, do nothing. This is equivalent to specifying DO_NOTHING
for the OnStackFailure
parameter to the CreateChangeSet API operation.
False
- if the stack creation fails, roll back the stack. This is equivalent to specifying ROLLBACK
for the OnStackFailure
parameter to the CreateChangeSet API operation.
Default: True
If the request doesn't return all the remaining results, NextToken
is set to a token. To retrieve the next set of results, call ListStackSetAutoDeploymentTargets
again and use that value for the NextToken
parameter. If the request returns all results, NextToken
is set to an empty string.
If the request doesn't return all the remaining results, NextToken
is set to a token. To retrieve the next set of results, call ListStackSetAutoDeploymentTargets again and use that value for the NextToken
parameter. If the request returns all results, NextToken
is set to an empty string.
The amount of time, in minutes, during which CloudFormation should monitor all the rollback triggers after the stack creation or update operation deploys all necessary resources.
The default is 0 minutes.
If you specify a monitoring period but don't specify any rollback triggers, CloudFormation still waits the specified period of time before cleaning up old resources after update operations. You can use this monitoring period to perform any manual stack validation desired, and manually cancel the stack creation or update (using CancelUpdateStack
, for example) as necessary.
If you specify 0 for this parameter, CloudFormation still monitors the specified rollback triggers during stack creation and update operations. Then, for update operations, it begins disposing of old resources immediately once the operation completes.
" + "documentation": "The amount of time, in minutes, during which CloudFormation should monitor all the rollback triggers after the stack creation or update operation deploys all necessary resources.
The default is 0 minutes.
If you specify a monitoring period but don't specify any rollback triggers, CloudFormation still waits the specified period of time before cleaning up old resources after update operations. You can use this monitoring period to perform any manual stack validation desired, and manually cancel the stack creation or update (using CancelUpdateStack, for example) as necessary.
If you specify 0 for this parameter, CloudFormation still monitors the specified rollback triggers during stack creation and update operations. Then, for update operations, it begins disposing of old resources immediately once the operation completes.
" } }, "documentation": "Structure containing the rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.
Rollback triggers enable you to have CloudFormation monitor the state of your application during stack creation and updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've specified. For more information, see Monitor and Roll Back Stack Operations.
" @@ -6233,7 +6233,7 @@ }, "Type": { "shape": "Type", - "documentation": "The resource type of the rollback trigger. Specify either AWS::CloudWatch::Alarm
or AWS::CloudWatch::CompositeAlarm
resource types.
The resource type of the rollback trigger. Specify either AWS::CloudWatch::Alarm or AWS::CloudWatch::CompositeAlarm resource types.
" } }, "documentation": "A rollback trigger CloudFormation monitors during creation and updating of stacks. If any of the alarms you specify goes to ALARM state during the stack operation or within the specified monitoring period afterwards, CloudFormation rolls back the entire stack operation.
" @@ -6339,11 +6339,11 @@ "members": { "TypeArn": { "shape": "TypeArn", - "documentation": "The Amazon Resource Name (ARN) for the extension, in this account and Region.
For public extensions, this will be the ARN assigned when you call the ActivateType
API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType
API operation in this account and Region.
Do not include the extension versions suffix at the end of the ARN. You can set the configuration for an extension, but not for a specific extension version.
" + "documentation": "The Amazon Resource Name (ARN) for the extension, in this account and Region.
For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account and Region.
Do not include the extension versions suffix at the end of the ARN. You can set the configuration for an extension, but not for a specific extension version.
" }, "Configuration": { "shape": "TypeConfiguration", - "documentation": "The configuration data for the extension, in this account and Region.
The configuration data must be formatted as JSON, and validate against the schema returned in the ConfigurationSchema
response element of DescribeType
. For more information, see Defining account-level configuration data for an extension in the CloudFormation CLI User Guide.
The configuration data for the extension, in this account and Region.
The configuration data must be formatted as JSON, and validate against the schema returned in the ConfigurationSchema
response element of DescribeType. For more information, see Defining account-level configuration data for an extension in the CloudFormation CLI User Guide.
[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets
.
[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.
" }, "DriftStatus": { "shape": "StackDriftStatus", @@ -6898,7 +6898,7 @@ }, "OrganizationalUnitId": { "shape": "OrganizationalUnitId", - "documentation": "[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets
.
[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.
" }, "DriftStatus": { "shape": "StackDriftStatus", @@ -7290,7 +7290,7 @@ }, "OrganizationalUnitIds": { "shape": "OrganizationalUnitIdList", - "documentation": "[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets
.
[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.
" }, "ManagedExecution": { "shape": "ManagedExecution", @@ -7324,7 +7324,7 @@ "documentation": "The list of Regions targeted for this organization or OU.
" } }, - "documentation": "One of the targets for the stack set. Returned by the ListStackSetAutoDeploymentTargets
API operation.
One of the targets for the stack set. Returned by the ListStackSetAutoDeploymentTargets API operation.
" }, "StackSetDriftDetectionDetails": { "type": "structure", @@ -7538,7 +7538,7 @@ }, "OrganizationalUnitId": { "shape": "OrganizationalUnitId", - "documentation": "[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets
.
[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.
" } }, "documentation": "The structure that contains information about a specified operation's results for a given account in a given Region.
" @@ -7878,11 +7878,11 @@ "members": { "DeletionPolicy": { "shape": "GeneratedTemplateDeletionPolicy", - "documentation": "The DeletionPolicy
assigned to resources in the generated template. Supported values are:
DELETE
- delete all resources when the stack is deleted.
RETAIN
- retain all resources when the stack is deleted.
For more information, see DeletionPolicy
attribute in the CloudFormation User Guide.
The DeletionPolicy
assigned to resources in the generated template. Supported values are:
DELETE
- delete all resources when the stack is deleted.
RETAIN
- retain all resources when the stack is deleted.
For more information, see DeletionPolicy attribute in the CloudFormation User Guide.
" }, "UpdateReplacePolicy": { "shape": "GeneratedTemplateUpdateReplacePolicy", - "documentation": "The UpdateReplacePolicy
assigned to resources in the generated template. Supported values are:
DELETE
- delete all resources when the resource is replaced during an update operation.
RETAIN
- retain all resources when the resource is replaced during an update operation.
For more information, see UpdateReplacePolicy
attribute in the CloudFormation User Guide.
The UpdateReplacePolicy
assigned to resources in the generated template. Supported values are:
DELETE
- delete all resources when the resource is replaced during an update operation.
RETAIN
- retain all resources when the resource is replaced during an update operation.
For more information, see UpdateReplacePolicy attribute in the CloudFormation User Guide.
" } }, "documentation": "The configuration details of a generated template.
" @@ -8134,7 +8134,7 @@ }, "TypeArn": { "shape": "TypeArn", - "documentation": "The Amazon Resource Name (ARN) for the extension, in this account and Region.
For public extensions, this will be the ARN assigned when you call the ActivateType
API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType
API operation in this account and Region.
The Amazon Resource Name (ARN) for the extension, in this account and Region.
For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account and Region.
" }, "TypeName": { "shape": "TypeName", @@ -8158,7 +8158,7 @@ "members": { "TypeArn": { "shape": "TypeArn", - "documentation": "The Amazon Resource Name (ARN) for the extension, in this account and Region.
For public extensions, this will be the ARN assigned when you call the ActivateType
API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType
API operation in this account and Region.
The Amazon Resource Name (ARN) for the extension, in this account and Region.
For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account and Region.
" }, "TypeConfigurationAlias": { "shape": "TypeConfigurationAlias", @@ -8239,11 +8239,11 @@ }, "TypeName": { "shape": "TypeName", - "documentation": "The name of the extension.
If you specified a TypeNameAlias
when you call the ActivateType
API operation in your account and Region, CloudFormation considers that alias as the type name.
The name of the extension.
If you specified a TypeNameAlias
when you call the ActivateType API operation in your account and Region, CloudFormation considers that alias as the type name.
The ID of the default version of the extension. The default version is used when the extension version isn't specified.
This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon and published by third parties, CloudFormation returns null
. For more information, see RegisterType
.
To set the default version of an extension, use SetTypeDefaultVersion.
" + "documentation": "The ID of the default version of the extension. The default version is used when the extension version isn't specified.
This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon and published by third parties, CloudFormation returns null
. For more information, see RegisterType.
To set the default version of an extension, use SetTypeDefaultVersion.
" }, "TypeArn": { "shape": "TypeArn", @@ -8251,7 +8251,7 @@ }, "LastUpdated": { "shape": "Timestamp", - "documentation": "When the specified extension version was registered. This applies only to:
Private extensions you have registered in your account. For more information, see RegisterType
.
Public extensions you have activated in your account with auto-update specified. For more information, see ActivateType
.
For all other extension types, CloudFormation returns null
.
When the specified extension version was registered. This applies only to:
Private extensions you have registered in your account. For more information, see RegisterType.
Public extensions you have activated in your account with auto-update specified. For more information, see ActivateType.
For all other extension types, CloudFormation returns null
.
A list of Parameter
structures that specify input parameters for the stack. For more information, see the Parameter
data type.
A list of Parameter
structures that specify input parameters for the stack. For more information, see the Parameter data type.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack.
CAPABILITY_IAM
and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM
or CAPABILITY_NAMED_IAM
capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM
.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities
error.
If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include
and AWS::Serverless
transforms, which are macros hosted by CloudFormation.
If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.
You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs.
Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.
For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.
Only one of the Capabilities
and ResourceType
parameters can be specified.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack.
CAPABILITY_IAM
and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM
or CAPABILITY_NAMED_IAM
capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM
.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities
error.
If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.
If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.
You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs.
Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.
For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.
Only one of the Capabilities
and ResourceType
parameters can be specified.
A list of input parameters whose values you want to update for the specified stack instances.
Any overridden parameter values will be applied to all stack instances in the specified accounts and Amazon Web Services Regions. When specifying parameters and their values, be aware of how CloudFormation sets parameter values during stack instance update operations:
To override the current value for a parameter, include the parameter and specify its value.
To leave an overridden parameter set to its present value, include the parameter and specify UsePreviousValue
as true
. (You can't specify both a value and set UsePreviousValue
to true
.)
To set an overridden parameter back to the value specified in the stack set, specify a parameter list but don't include the parameter in the list.
To leave all parameters set to their present values, don't specify this property at all.
During stack set updates, any parameter values overridden for a stack instance aren't updated, but retain their overridden value.
You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet
to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet
to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances
.
A list of input parameters whose values you want to update for the specified stack instances.
Any overridden parameter values will be applied to all stack instances in the specified accounts and Amazon Web Services Regions. When specifying parameters and their values, be aware of how CloudFormation sets parameter values during stack instance update operations:
To override the current value for a parameter, include the parameter and specify its value.
To leave an overridden parameter set to its present value, include the parameter and specify UsePreviousValue
as true
. (You can't specify both a value and set UsePreviousValue
to true
.)
To set an overridden parameter back to the value specified in the stack set, specify a parameter list but don't include the parameter in the list.
To leave all parameters set to their present values, don't specify this property at all.
During stack set updates, any parameter values overridden for a stack instance aren't updated, but retain their overridden value.
You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet
to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances
.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances.
CAPABILITY_IAM
and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM
or CAPABILITY_NAMED_IAM
capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM
.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities
error.
If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.
Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include
and AWS::Serverless
transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances.
CAPABILITY_IAM
and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM
or CAPABILITY_NAMED_IAM
capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM
.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities
error.
If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.
Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.
Describes the lock status for a snapshot.
" }, + "DescribeMacHosts": { + "name": "DescribeMacHosts", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "DescribeMacHostsRequest" + }, + "output": { + "shape": "DescribeMacHostsResult" + }, + "documentation": "Describes the specified EC2 Mac Dedicated Host or all of your EC2 Mac Dedicated Hosts.
" + }, "DescribeManagedPrefixLists": { "name": "DescribeManagedPrefixLists", "http": { @@ -24692,6 +24706,49 @@ } } }, + "DescribeMacHostsRequest": { + "type": "structure", + "members": { + "Filters": { + "shape": "FilterList", + "documentation": "The filters.
availability-zone
- The Availability Zone of the EC2 Mac Dedicated Host.
instance-type
- The instance type size that the EC2 Mac Dedicated Host is configured to support.
The IDs of the EC2 Mac Dedicated Hosts.
", + "locationName": "HostId" + }, + "MaxResults": { + "shape": "DescribeMacHostsRequestMaxResults", + "documentation": "The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken
value. This value can be between 5 and 500. If maxResults
is given a larger value than 500, you receive an error.
The token to use to retrieve the next page of results.
" + } + } + }, + "DescribeMacHostsRequestMaxResults": { + "type": "integer", + "max": 500, + "min": 5 + }, + "DescribeMacHostsResult": { + "type": "structure", + "members": { + "MacHosts": { + "shape": "MacHostList", + "documentation": "Information about the EC2 Mac Dedicated Hosts.
", + "locationName": "macHostSet" + }, + "NextToken": { + "shape": "String", + "documentation": "The token to use to retrieve the next page of results.
", + "locationName": "nextToken" + } + } + }, "DescribeManagedPrefixListsRequest": { "type": "structure", "members": { @@ -44252,6 +44309,36 @@ "Long": { "type": "long" }, + "MacHost": { + "type": "structure", + "members": { + "HostId": { + "shape": "DedicatedHostId", + "documentation": "The EC2 Mac Dedicated Host ID.
", + "locationName": "hostId" + }, + "MacOSLatestSupportedVersions": { + "shape": "MacOSVersionStringList", + "documentation": "The latest macOS versions that the EC2 Mac Dedicated Host can launch without being upgraded.
", + "locationName": "macOSLatestSupportedVersionSet" + } + }, + "documentation": "Information about the EC2 Mac Dedicated Host.
" + }, + "MacHostList": { + "type": "list", + "member": { + "shape": "MacHost", + "locationName": "item" + } + }, + "MacOSVersionStringList": { + "type": "list", + "member": { + "shape": "String", + "locationName": "item" + } + }, "MaintenanceDetails": { "type": "structure", "members": { diff --git a/apis/ec2-2016-11-15.paginators.json b/apis/ec2-2016-11-15.paginators.json index d0520c22a8..ea572e61e8 100644 --- a/apis/ec2-2016-11-15.paginators.json +++ b/apis/ec2-2016-11-15.paginators.json @@ -342,6 +342,12 @@ "output_token": "NextToken", "result_key": "LocalGateways" }, + "DescribeMacHosts": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "MacHosts" + }, "DescribeManagedPrefixLists": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/apis/finspace-2021-03-12.min.json b/apis/finspace-2021-03-12.min.json index b5feb30aa6..4c60476ef0 100644 --- a/apis/finspace-2021-03-12.min.json +++ b/apis/finspace-2021-03-12.min.json @@ -148,29 +148,29 @@ "shape": "S1d" }, "cacheStorageConfigurations": { - "shape": "S1q" + "shape": "S1r" }, "autoScalingConfiguration": { - "shape": "S1t" + "shape": "S1u" }, "clusterDescription": {}, "capacityConfiguration": { - "shape": "S1z" + "shape": "S20" }, "releaseLabel": {}, "vpcConfiguration": { - "shape": "S22" + "shape": "S23" }, "initializationScript": {}, "commandLineArguments": { - "shape": "S2a" + "shape": "S2b" }, "code": { - "shape": "S2e" + "shape": "S2f" }, "executionRole": {}, "savedownStorageConfiguration": { - "shape": "S2j" + "shape": "S2k" }, "azMode": {}, "availabilityZoneId": {}, @@ -178,7 +178,7 @@ "shape": "S5" }, "scalingGroupConfiguration": { - "shape": "S2o" + "shape": "S2p" } } }, @@ -194,38 +194,38 @@ "shape": "S1a" }, "volumes": { - "shape": "S2w" + "shape": "S2x" }, "databases": { "shape": "S1d" }, "cacheStorageConfigurations": { - "shape": "S1q" + "shape": "S1r" }, "autoScalingConfiguration": { - "shape": "S1t" + "shape": "S1u" }, "clusterDescription": {}, "capacityConfiguration": { - "shape": "S1z" + "shape": "S20" }, "releaseLabel": {}, "vpcConfiguration": { - "shape": "S22" + "shape": "S23" }, "initializationScript": {}, "commandLineArguments": { - "shape": "S2a" + "shape": "S2b" }, "code": { - "shape": "S2e" + "shape": "S2f" }, "executionRole": {}, "lastModifiedTimestamp": { "type": "timestamp" }, "savedownStorageConfiguration": { - "shape": "S2j" + "shape": "S2k" }, "azMode": {}, "availabilityZoneId": {}, @@ -233,7 +233,7 @@ "type": "timestamp" }, "scalingGroupConfiguration": { - "shape": "S2o" + "shape": "S2p" } } } @@ -312,6 +312,9 @@ "autoUpdate": { "type": "boolean" }, + "readWrite": { + "type": "boolean" + }, "description": {}, "tags": { "shape": "S5" @@ -337,6 +340,9 @@ "autoUpdate": { "type": "boolean" }, + "readWrite": { + "type": "boolean" + }, "createdTimestamp": { "type": "timestamp" }, @@ -888,38 +894,38 @@ "shape": "S1a" }, "volumes": { - "shape": "S2w" + "shape": "S2x" }, "databases": { "shape": "S1d" }, "cacheStorageConfigurations": { - "shape": "S1q" + "shape": "S1r" }, "autoScalingConfiguration": { - "shape": "S1t" + "shape": "S1u" }, "clusterDescription": {}, "capacityConfiguration": { - "shape": "S1z" + "shape": "S20" }, "releaseLabel": {}, "vpcConfiguration": { - "shape": "S22" + "shape": "S23" }, "initializationScript": {}, "commandLineArguments": { - "shape": "S2a" + "shape": "S2b" }, "code": { - "shape": "S2e" + "shape": "S2f" }, "executionRole": {}, "lastModifiedTimestamp": { "type": "timestamp" }, "savedownStorageConfiguration": { - "shape": "S2j" + "shape": "S2k" }, "azMode": {}, "availabilityZoneId": {}, @@ -927,7 +933,7 @@ "type": "timestamp" }, "scalingGroupConfiguration": { - "shape": "S2o" + "shape": "S2p" } } } @@ -1062,6 +1068,9 @@ "autoUpdate": { "type": "boolean" }, + "readWrite": { + "type": "boolean" + }, "environmentId": {}, "createdTimestamp": { "type": "timestamp" @@ -1437,7 +1446,7 @@ "clusterDescription": {}, "releaseLabel": {}, "volumes": { - "shape": "S2w" + "shape": "S2x" }, "initializationScript": {}, "executionRole": {}, @@ -1560,6 +1569,9 @@ "autoUpdate": { "type": "boolean" }, + "readWrite": { + "type": "boolean" + }, "createdTimestamp": { "type": "timestamp" }, @@ -1939,11 +1951,11 @@ "idempotencyToken": true }, "code": { - "shape": "S2e" + "shape": "S2f" }, "initializationScript": {}, "commandLineArguments": { - "shape": "S2a" + "shape": "S2b" }, "deploymentConfiguration": { "type": "structure", @@ -2098,6 +2110,9 @@ "autoUpdate": { "type": "boolean" }, + "readWrite": { + "type": "boolean" + }, "description": {}, "createdTimestamp": { "type": "timestamp" @@ -2424,11 +2439,14 @@ "type": "list", "member": {} }, - "volumeName": {} + "volumeName": {}, + "onDemand": { + "type": "boolean" + } } } }, - "S1q": { + "S1r": { "type": "list", "member": { "type": "structure", @@ -2444,7 +2462,7 @@ } } }, - "S1t": { + "S1u": { "type": "structure", "members": { "minNodeCount": { @@ -2465,7 +2483,7 @@ } } }, - "S1z": { + "S20": { "type": "structure", "members": { "nodeType": {}, @@ -2474,7 +2492,7 @@ } } }, - "S22": { + "S23": { "type": "structure", "members": { "vpcId": {}, @@ -2489,7 +2507,7 @@ "ipAddressType": {} } }, - "S2a": { + "S2b": { "type": "list", "member": { "type": "structure", @@ -2499,7 +2517,7 @@ } } }, - "S2e": { + "S2f": { "type": "structure", "members": { "s3Bucket": {}, @@ -2507,7 +2525,7 @@ "s3ObjectVersion": {} } }, - "S2j": { + "S2k": { "type": "structure", "members": { "type": {}, @@ -2517,7 +2535,7 @@ "volumeName": {} } }, - "S2o": { + "S2p": { "type": "structure", "required": [ "scalingGroupName", @@ -2540,7 +2558,7 @@ } } }, - "S2w": { + "S2x": { "type": "list", "member": { "type": "structure", diff --git a/apis/finspace-2021-03-12.normal.json b/apis/finspace-2021-03-12.normal.json index 79ae74f120..560f8f3b96 100644 --- a/apis/finspace-2021-03-12.normal.json +++ b/apis/finspace-2021-03-12.normal.json @@ -1748,7 +1748,10 @@ "min": 1 }, "AvailabilityZoneId": { - "type": "string" + "type": "string", + "max": 12, + "min": 8, + "pattern": "^[a-zA-Z0-9-]+$" }, "AvailabilityZoneIds": { "type": "list", @@ -1814,7 +1817,8 @@ "ChangesetId": { "type": "string", "max": 26, - "min": 1 + "min": 1, + "pattern": "^[a-zA-Z0-9]+$" }, "ChangesetStatus": { "type": "string", @@ -2284,7 +2288,7 @@ }, "azMode": { "shape": "KxAzMode", - "documentation": "The number of availability zones you want to assign per cluster. This can be one of the following
SINGLE
– Assigns one availability zone per cluster.
MULTI
– Assigns all the availability zones per cluster.
The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE
for volumes. This places dataview in a single AZ.
The option to specify whether you want to apply all the future additions and corrections automatically to the dataview, when you ingest new changesets. The default value is false.
" }, + "readWrite": { + "shape": "booleanValue", + "documentation": "The option to specify whether you want to make the dataview writable to perform database maintenance. The following are some considerations related to writable dataviews.
You cannot create partial writable dataviews. When you create writeable dataviews you must provide the entire database path.
You cannot perform updates on a writeable dataview. Hence, autoUpdate
must be set as False if readWrite
is True for a dataview.
You must also use a unique volume for creating a writeable dataview. So, if you choose a volume that is already in use by another dataview, the dataview creation fails.
Once you create a dataview as writeable, you cannot change it to read-only. So, you cannot update the readWrite
parameter later.
A description of the dataview.
" @@ -2334,7 +2342,7 @@ }, "azMode": { "shape": "KxAzMode", - "documentation": "The number of availability zones you want to assign per cluster. This can be one of the following
SINGLE
– Assigns one availability zone per cluster.
MULTI
– Assigns all the availability zones per cluster.
The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE
for volumes. This places dataview in a single AZ.
The option to select whether you want to apply all the future additions and corrections automatically to the dataview when you ingest new changesets. The default value is false.
" }, + "readWrite": { + "shape": "booleanValue", + "documentation": "Returns True if the dataview is created as writeable and False otherwise.
" + }, "createdTimestamp": { "shape": "Timestamp", "documentation": "The timestamp at which the dataview was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
" @@ -2460,7 +2472,7 @@ }, "hostType": { "shape": "KxHostType", - "documentation": "The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.
" + "documentation": "The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.
You can add one of the following values:
kx.sg.4xlarge
– The host type with a configuration of 108 GiB memory and 16 vCPUs.
kx.sg.8xlarge
– The host type with a configuration of 216 GiB memory and 32 vCPUs.
kx.sg.16xlarge
– The host type with a configuration of 432 GiB memory and 64 vCPUs.
kx.sg.32xlarge
– The host type with a configuration of 864 GiB memory and 128 vCPUs.
kx.sg1.16xlarge
– The host type with a configuration of 1949 GiB memory and 64 vCPUs.
kx.sg1.24xlarge
– The host type with a configuration of 2948 GiB memory and 96 vCPUs.
The number of availability zones you want to assign per cluster. Currently, FinSpace only support SINGLE
for volumes.
The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE
for volumes. This places dataview in a single AZ.
The number of availability zones you want to assign per cluster. Currently, FinSpace only support SINGLE
for volumes.
The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE
for volumes. This places dataview in a single AZ.
The number of availability zones you want to assign per cluster. This can be one of the following
SINGLE
– Assigns one availability zone per cluster.
MULTI
– Assigns all the availability zones per cluster.
The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE
for volumes. This places dataview in a single AZ.
The option to specify whether you want to apply all the future additions and corrections automatically to the dataview when new changesets are ingested. The default value is false.
" }, + "readWrite": { + "shape": "booleanValue", + "documentation": "Returns True if the dataview is created as writeable and False otherwise.
" + }, "environmentId": { "shape": "EnvironmentId", "documentation": "A unique identifier for the kdb environment, from where you want to retrieve the dataview details.
" @@ -3694,7 +3710,7 @@ }, "hostType": { "shape": "KxHostType", - "documentation": "The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.
" + "documentation": "The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.
It can have one of the following values:
kx.sg.4xlarge
– The host type with a configuration of 108 GiB memory and 16 vCPUs.
kx.sg.8xlarge
– The host type with a configuration of 216 GiB memory and 32 vCPUs.
kx.sg.16xlarge
– The host type with a configuration of 432 GiB memory and 64 vCPUs.
kx.sg.32xlarge
– The host type with a configuration of 864 GiB memory and 128 vCPUs.
kx.sg1.16xlarge
– The host type with a configuration of 1949 GiB memory and 64 vCPUs.
kx.sg1.24xlarge
– The host type with a configuration of 2948 GiB memory and 96 vCPUs.
The number of availability zones you want to assign per cluster. Currently, FinSpace only support SINGLE
for volumes.
The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE
for volumes. This places dataview in a single AZ.
The number of availability zones you want to assign per cluster. This can be one of the following
SINGLE
– Assigns one availability zone per cluster.
MULTI
– Assigns all the availability zones per cluster.
The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE
for volumes. This places dataview in a single AZ.
The option to specify whether you want to apply all the future additions and corrections automatically to the dataview when you ingest new changesets. The default value is false.
" }, + "readWrite": { + "shape": "booleanValue", + "documentation": "Returns True if the dataview is created as writeable and False otherwise.
" + }, "createdTimestamp": { "shape": "Timestamp", "documentation": "The timestamp at which the dataview list entry was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
" @@ -4380,6 +4400,10 @@ "volumeName": { "shape": "KxVolumeName", "documentation": "The name of the volume where you want to add data.
" + }, + "onDemand": { + "shape": "booleanValue", + "documentation": "Enables on-demand caching on the selected database path when a particular file or a column of the database is accessed. When on demand caching is True, dataviews perform minimal loading of files on the filesystem as needed. When it is set to False, everything is cached. The default value is False.
" } }, "documentation": "The configuration that contains the database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume. If you do not explicitly specify any database path for a volume, they are accessible from the cluster through the default S3/object store segment.
" @@ -4548,7 +4572,6 @@ }, "KxNAS1Size": { "type": "integer", - "max": 33600, "min": 1200 }, "KxNAS1Type": { @@ -4621,7 +4644,7 @@ }, "hostType": { "shape": "KxHostType", - "documentation": "The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.
" + "documentation": "The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.
You can add one of the following values:
kx.sg.4xlarge
– The host type with a configuration of 108 GiB memory and 16 vCPUs.
kx.sg.8xlarge
– The host type with a configuration of 216 GiB memory and 32 vCPUs.
kx.sg.16xlarge
– The host type with a configuration of 432 GiB memory and 64 vCPUs.
kx.sg.32xlarge
– The host type with a configuration of 864 GiB memory and 128 vCPUs.
kx.sg1.16xlarge
– The host type with a configuration of 1949 GiB memory and 64 vCPUs.
kx.sg1.24xlarge
– The host type with a configuration of 2948 GiB memory and 96 vCPUs.
The number of availability zones assigned to the volume. Currently, only SINGLE
is supported.
The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE
for volumes. This places dataview in a single AZ.
The number of availability zones you want to assign per cluster. This can be one of the following
SINGLE
– Assigns one availability zone per cluster.
MULTI
– Assigns all the availability zones per cluster.
The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE
for volumes. This places dataview in a single AZ.
The option to specify whether you want to apply all the future additions and corrections automatically to the dataview when new changesets are ingested. The default value is false.
" }, + "readWrite": { + "shape": "booleanValue", + "documentation": "Returns True if the dataview is created as writeable and False otherwise.
" + }, "description": { "shape": "Description", "documentation": "A description of the dataview.
" @@ -6212,7 +6239,7 @@ }, "azMode": { "shape": "KxAzMode", - "documentation": "The number of availability zones you want to assign per cluster. Currently, FinSpace only support SINGLE
for volumes.
The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE
for volumes. This places dataview in a single AZ.
Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created.
Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.
A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.
To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource.
Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Use CreateDelivery
to create a delivery by pairing exactly one delivery source and one delivery destination.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
You can't update an existing delivery. You can only create and delete deliveries.
" + "documentation": "Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created.
Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.
A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.
To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource.
Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Use CreateDelivery
to create a delivery by pairing exactly one delivery source and one delivery destination.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
You can't update an existing delivery. You can only create and delete deliveries.
" }, "CreateExportTask": { "name": "CreateExportTask", @@ -661,7 +661,7 @@ "shape": "ThrottlingException" } ], - "documentation": "Retrieves a list of the deliveries that have been created in the account.
A delivery is a connection between a delivery source and a delivery destination .
A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.
" + "documentation": "Retrieves a list of the deliveries that have been created in the account.
A delivery is a connection between a delivery source and a delivery destination .
A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.
" }, "DescribeDeliveryDestinations": { "name": "DescribeDeliveryDestinations", @@ -1036,7 +1036,7 @@ "shape": "ThrottlingException" } ], - "documentation": "Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination .
A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.
You need to specify the delivery id
in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.
Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination .
A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.
You need to specify the delivery id
in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.
Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account.
Data protection policy
A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.
Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.
If you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask
permission can use a GetLogEvents or FilterLogEvents operation with the unmask
parameter set to true
to view the unmasked log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.
To use the PutAccountPolicy
operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy
and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
Subscription filter policy
A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Kinesis Data Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.
A Lambda function in the same account as the subscription policy, for same-account delivery.
A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.
Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName
. To perform a PutAccountPolicy
subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account.
Data protection policy
A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.
Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.
If you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask
permission can use a GetLogEvents or FilterLogEvents operation with the unmask
parameter set to true
to view the unmasked log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.
To use the PutAccountPolicy
operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy
and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
Subscription filter policy
A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
An Firehose data stream in the same account as the subscription policy, for same-account delivery.
A Lambda function in the same account as the subscription policy, for same-account delivery.
A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName
. To perform a PutAccountPolicy
subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Kinesis Data Firehose are supported as logs delivery destinations.
To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource.
Use PutDeliveryDestination
to create a delivery destination, which is a logical object that represents the actual delivery destination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Use CreateDelivery
to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.
If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
" + "documentation": "Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations.
To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource.
Use PutDeliveryDestination
to create a delivery destination, which is a logical object that represents the actual delivery destination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Use CreateDelivery
to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.
If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
" }, "PutDeliveryDestinationPolicy": { "name": "PutDeliveryDestinationPolicy", @@ -1517,7 +1517,7 @@ "shape": "ThrottlingException" } ], - "documentation": "Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following:
Use PutDeliverySource
to create a delivery source, which is a logical object that represents the resource that is actually sending the logs.
Use PutDeliveryDestination
to create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Use CreateDelivery
to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.
If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.
" + "documentation": "Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose.
To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following:
Use PutDeliverySource
to create a delivery source, which is a logical object that represents the resource that is actually sending the logs.
Use PutDeliveryDestination
to create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Use CreateDelivery
to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.
If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.
" }, "PutDestination": { "name": "PutDestination", @@ -1732,7 +1732,7 @@ "shape": "ServiceUnavailableException" } ], - "documentation": "Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.
A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Kinesis Data Firehose as logical destinations.
An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.
An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.
Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName
.
To perform a PutSubscriptionFilter
operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.
A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations.
An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.
An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.
Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName
.
To perform a PutSubscriptionFilter
operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Displays whether the delivery destination associated with this delivery is CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
" + "documentation": "Displays whether the delivery destination associated with this delivery is CloudWatch Logs, Amazon S3, or Firehose.
" }, "tags": { "shape": "Tags", @@ -2716,7 +2716,7 @@ }, "deliveryDestinationType": { "shape": "DeliveryDestinationType", - "documentation": "Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
" + "documentation": "Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or Firehose.
" }, "outputFormat": { "shape": "OutputFormat", @@ -2731,7 +2731,7 @@ "documentation": "The tags that have been assigned to this delivery destination.
" } }, - "documentation": "This structure contains information about one delivery destination in your account. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, are supported as Kinesis Data Firehose delivery destinations.
To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource.
Create a delivery destination, which is a logical object that represents the actual delivery destination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
" + "documentation": "This structure contains information about one delivery destination in your account. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, are supported as Firehose delivery destinations.
To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource.
Create a delivery destination, which is a logical object that represents the actual delivery destination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
" }, "DeliveryDestinationConfiguration": { "type": "structure", @@ -2741,7 +2741,7 @@ "members": { "destinationResourceArn": { "shape": "Arn", - "documentation": "The ARN of the Amazon Web Services destination that this delivery destination represents. That Amazon Web Services destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.
" + "documentation": "The ARN of the Amazon Web Services destination that this delivery destination represents. That Amazon Web Services destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.
" } }, "documentation": "A structure that contains information about one logs delivery destination.
" @@ -2805,7 +2805,7 @@ "documentation": "The tags that have been assigned to this delivery source.
" } }, - "documentation": "This structure contains information about one delivery source in your account. A delivery source is an Amazon Web Services resource that sends logs to an Amazon Web Services destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.
To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource.
Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
" + "documentation": "This structure contains information about one delivery source in your account. A delivery source is an Amazon Web Services resource that sends logs to an Amazon Web Services destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose.
Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.
To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource.
Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
" }, "DeliverySourceName": { "type": "string", @@ -4208,8 +4208,18 @@ "event": true }, "LogEvent": { - "type": "string", - "min": 1 + "type": "structure", + "members": { + "timestamp": { + "shape": "Timestamp", + "documentation": "The time stamp of the log event.
" + }, + "message": { + "shape": "EventMessage", + "documentation": "The message content of the log event.
" + } + }, + "documentation": "This structure contains the information for one sample log event that is associated with an anomaly found by a log anomaly detector.
" }, "LogEventIndex": { "type": "integer" @@ -4669,7 +4679,7 @@ }, "policyDocument": { "shape": "AccountPolicyDocument", - "documentation": "Specify the policy, in JSON.
Data protection policy
A data protection policy must include two JSON blocks:
The first block must include both a DataIdentifer
array and an Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask.
The Operation
property with an Audit
action is required to find the sensitive data terms. This Audit
action must contain a FindingsDestination
object. You can optionally use that FindingsDestination
object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an Operation
property with an Deidentify
action. The DataIdentifer
array must exactly match the DataIdentifer
array in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the data, and it must contain the \"MaskConfig\": {}
object. The \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
The contents of the two DataIdentifer
arrays must match exactly.
In addition to the two JSON blocks, the policyDocument
can also include Name
, Description
, and Version
fields. The Name
is different than the operation's policyName
parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters long.
Subscription filter policy
A subscription filter policy can include the following attributes in a JSON block:
DestinationArn The ARN of the destination to deliver log events to. Supported destinations are:
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.
A Lambda function in the same account as the subscription policy, for same-account delivery.
A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.
RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.
FilterPattern A filter pattern for subscribing to a filtered stream of log events.
DistributionThe method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random
for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.
Specify the policy, in JSON.
Data protection policy
A data protection policy must include two JSON blocks:
The first block must include both a DataIdentifer
array and an Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask.
The Operation
property with an Audit
action is required to find the sensitive data terms. This Audit
action must contain a FindingsDestination
object. You can optionally use that FindingsDestination
object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an Operation
property with an Deidentify
action. The DataIdentifer
array must exactly match the DataIdentifer
array in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the data, and it must contain the \"MaskConfig\": {}
object. The \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
The contents of the two DataIdentifer
arrays must match exactly.
In addition to the two JSON blocks, the policyDocument
can also include Name
, Description
, and Version
fields. The Name
is different than the operation's policyName
parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters long.
Subscription filter policy
A subscription filter policy can include the following attributes in a JSON block:
DestinationArn The ARN of the destination to deliver log events to. Supported destinations are:
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
An Firehose data stream in the same account as the subscription policy, for same-account delivery.
A Lambda function in the same account as the subscription policy, for same-account delivery.
A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.
FilterPattern A filter pattern for subscribing to a filtered stream of log events.
DistributionThe method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random
for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.
Specify the data protection policy, in JSON.
This policy must include two JSON blocks:
The first block must include both a DataIdentifer
array and an Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask.
The Operation
property with an Audit
action is required to find the sensitive data terms. This Audit
action must contain a FindingsDestination
object. You can optionally use that FindingsDestination
object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an Operation
property with an Deidentify
action. The DataIdentifer
array must exactly match the DataIdentifer
array in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the data, and it must contain the \"MaskConfig\": {}
object. The \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
The contents of the two DataIdentifer
arrays must match exactly.
In addition to the two JSON blocks, the policyDocument
can also include Name
, Description
, and Version
fields. The Name
is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters.
Specify the data protection policy, in JSON.
This policy must include two JSON blocks:
The first block must include both a DataIdentifer
array and an Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask.
The Operation
property with an Audit
action is required to find the sensitive data terms. This Audit
action must contain a FindingsDestination
object. You can optionally use that FindingsDestination
object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an Operation
property with an Deidentify
action. The DataIdentifer
array must exactly match the DataIdentifer
array in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the data, and it must contain the \"MaskConfig\": {}
object. The \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
The contents of the two DataIdentifer
arrays must match exactly.
In addition to the two JSON blocks, the policyDocument
can also include Name
, Description
, and Version
fields. The Name
is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters.
Defines the type of log that the source is sending. For Amazon CodeWhisperer, the valid value is EVENT_LOGS
.
Defines the type of log that the source is sending.
For Amazon CodeWhisperer, the valid value is EVENT_LOGS
.
For IAM Identity Centerr, the valid value is ERROR_LOGS
.
For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS
, AUTHENTICATION_LOGS
, WORKMAIL_AVAILABILITY_PROVIDER_LOGS
, and WORKMAIL_MAILBOX_ACCESS_LOGS
.
The log events that are too new.
" + "documentation": "The index of the first log event that is too new. This field is inclusive.
" }, "tooOldLogEventEndIndex": { "shape": "LogEventIndex", - "documentation": "The log events that are dated too far in the past.
" + "documentation": "The index of the last log event that is too old. This field is exclusive.
" }, "expiredLogEventEndIndex": { "shape": "LogEventIndex", diff --git a/apis/managedblockchain-query-2023-05-04.min.json b/apis/managedblockchain-query-2023-05-04.min.json index 5e38d4e5f2..5f3679addf 100644 --- a/apis/managedblockchain-query-2023-05-04.min.json +++ b/apis/managedblockchain-query-2023-05-04.min.json @@ -316,6 +316,82 @@ } } }, + "ListFilteredTransactionEvents": { + "http": { + "requestUri": "/list-filtered-transaction-events", + "responseCode": 200 + }, + "input": { + "type": "structure", + "required": [ + "network", + "addressIdentifierFilter" + ], + "members": { + "network": {}, + "addressIdentifierFilter": { + "type": "structure", + "required": [ + "transactionEventToAddress" + ], + "members": { + "transactionEventToAddress": { + "type": "list", + "member": {} + } + } + }, + "timeFilter": { + "type": "structure", + "members": { + "from": { + "shape": "S9" + }, + "to": { + "shape": "S9" + } + } + }, + "voutFilter": { + "type": "structure", + "required": [ + "voutSpent" + ], + "members": { + "voutSpent": { + "type": "boolean" + } + } + }, + "confirmationStatusFilter": { + "shape": "S1b" + }, + "sort": { + "type": "structure", + "members": { + "sortBy": {}, + "sortOrder": {} + } + }, + "nextToken": {}, + "maxResults": { + "type": "integer" + } + } + }, + "output": { + "type": "structure", + "required": [ + "events" + ], + "members": { + "events": { + "shape": "S1i" + }, + "nextToken": {} + } + } + }, "ListTokenBalances": { "http": { "requestUri": "/list-token-balances", @@ -396,11 +472,11 @@ "input": { "type": "structure", "required": [ - "transactionHash", "network" ], "members": { "transactionHash": {}, + "transactionId": {}, "network": {}, "nextToken": {}, "maxResults": { @@ -415,29 +491,7 @@ ], "members": { "events": { - "type": "list", - "member": { - "type": "structure", - "required": [ - "network", - "transactionHash", - "eventType" - ], - "members": { - "network": {}, - "transactionHash": {}, - "eventType": {}, - "from": {}, - "to": {}, - "value": {}, - "contractAddress": {}, - "tokenId": {}, - "transactionId": {}, - "voutIndex": { - "type": "integer" - } - } - } + "shape": "S1i" }, "nextToken": {} } @@ -475,16 +529,7 @@ "type": "integer" }, "confirmationStatusFilter": { - "type": "structure", - "required": [ - "include" - ], - "members": { - "include": { - "type": "list", - "member": {} - } - } + "shape": "S1b" } } }, @@ -557,6 +602,55 @@ "network": {}, "contractAddress": {} } + }, + "S1b": { + "type": "structure", + "required": [ + "include" + ], + "members": { + "include": { + "type": "list", + "member": {} + } + } + }, + "S1i": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "network", + "transactionHash", + "eventType" + ], + "members": { + "network": {}, + "transactionHash": {}, + "eventType": {}, + "from": {}, + "to": {}, + "value": {}, + "contractAddress": {}, + "tokenId": {}, + "transactionId": {}, + "voutIndex": { + "type": "integer" + }, + "voutSpent": { + "type": "boolean" + }, + "spentVoutTransactionId": {}, + "spentVoutTransactionHash": {}, + "spentVoutIndex": { + "type": "integer" + }, + "blockchainInstant": { + "shape": "S9" + }, + "confirmationStatus": {} + } + } } } } \ No newline at end of file diff --git a/apis/managedblockchain-query-2023-05-04.normal.json b/apis/managedblockchain-query-2023-05-04.normal.json index 0f9b510843..ff38af88ea 100644 --- a/apis/managedblockchain-query-2023-05-04.normal.json +++ b/apis/managedblockchain-query-2023-05-04.normal.json @@ -185,6 +185,38 @@ ], "documentation": "Lists all the contracts for a given contract type deployed by an address (either a contract address or a wallet address).
The Bitcoin blockchain networks do not support this operation.
" }, + "ListFilteredTransactionEvents": { + "name": "ListFilteredTransactionEvents", + "http": { + "method": "POST", + "requestUri": "/list-filtered-transaction-events", + "responseCode": 200 + }, + "input": { + "shape": "ListFilteredTransactionEventsInput" + }, + "output": { + "shape": "ListFilteredTransactionEventsOutput" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ServiceQuotaExceededException" + } + ], + "documentation": "Lists all the transaction events for an address on the blockchain.
This operation is only supported on the Bitcoin networks.
An array of TransactionEvent
objects. Each object contains details about the transaction event.
This action will return transaction details for all transactions that are confirmed on the blockchain, even if they have not reached finality.
Lists all the transaction events for a transaction
This action will return transaction details for all transactions that are confirmed on the blockchain, even if they have not reached finality.
Lists all of the transactions on a given wallet address or to a specific contract.
" + "documentation": "Lists all the transaction events for a transaction.
" } }, "shapes": { + "AddressIdentifierFilter": { + "type": "structure", + "required": [ + "transactionEventToAddress" + ], + "members": { + "transactionEventToAddress": { + "shape": "AddressIdentifierFilterTransactionEventToAddressList", + "documentation": "The container for the recipient address of the transaction.
" + } + }, + "documentation": "This is the container for the unique public address on the blockchain.
" + }, + "AddressIdentifierFilterTransactionEventToAddressList": { + "type": "list", + "member": { + "shape": "ChainAddress" + }, + "max": 1, + "min": 1 + }, "AssetContract": { "type": "structure", "required": [ @@ -447,6 +500,10 @@ }, "documentation": "The container for time.
" }, + "Boolean": { + "type": "boolean", + "box": true + }, "ChainAddress": { "type": "string", "pattern": "[-A-Za-z0-9]{13,74}" @@ -651,7 +708,7 @@ "members": { "transactionHash": { "shape": "QueryTransactionHash", - "documentation": "The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + "documentation": "The hash of a transaction. It is generated when a transaction is created.
" }, "network": { "shape": "QueryNetwork", @@ -691,7 +748,7 @@ }, "maxResults": { "shape": "ListAssetContractsInputMaxResultsInteger", - "documentation": "The maximum number of contracts to list.
Default:100
Even if additional results can be retrieved, the request can return less results than maxResults
or an empty array of results.
To retrieve the next set of results, make another request with the returned nextToken
value. The value of nextToken
is null
when there are no more results to return
The maximum number of contracts to list.
Default: 100
Even if additional results can be retrieved, the request can return less results than maxResults
or an empty array of results.
To retrieve the next set of results, make another request with the returned nextToken
value. The value of nextToken
is null
when there are no more results to return
The blockchain network where the transaction occurred.
Valid Values: BITCOIN_MAINNET
| BITCOIN_TESTNET
This is the unique public address on the blockchain for which the transaction events are being requested.
" + }, + "timeFilter": { + "shape": "TimeFilter", + "documentation": "This container specifies the time frame for the transaction events returned in the response.
" + }, + "voutFilter": { + "shape": "VoutFilter", + "documentation": "This container specifies filtering attributes related to BITCOIN_VOUT event types
" + }, + "confirmationStatusFilter": { + "shape": "ConfirmationStatusFilter" + }, + "sort": { + "shape": "ListFilteredTransactionEventsSort", + "documentation": "The order by which the results will be sorted.
" + }, + "nextToken": { + "shape": "NextToken", + "documentation": "The pagination token that indicates the next set of results to retrieve.
" + }, + "maxResults": { + "shape": "ListFilteredTransactionEventsInputMaxResultsInteger", + "documentation": "The maximum number of transaction events to list.
Default: 100
Even if additional results can be retrieved, the request can return less results than maxResults
or an empty array of results.
To retrieve the next set of results, make another request with the returned nextToken
value. The value of nextToken
is null
when there are no more results to return
The transaction events returned by the request.
" + }, + "nextToken": { + "shape": "NextToken", + "documentation": "The pagination token that indicates the next set of results to retrieve.
" + } + } + }, + "ListFilteredTransactionEventsSort": { + "type": "structure", + "members": { + "sortBy": { + "shape": "ListFilteredTransactionEventsSortBy", + "documentation": "Container on how the results will be sorted by?
" + }, + "sortOrder": { + "shape": "SortOrder", + "documentation": "The container for the sort order for ListFilteredTransactionEvents
. The SortOrder
field only accepts the values ASCENDING
and DESCENDING
. Not providing SortOrder
will default to ASCENDING
.
Lists all the transaction events for an address on the blockchain.
This operation is only supported on the Bitcoin blockchain networks.
The maximum number of token balances to return.
Default:100
Even if additional results can be retrieved, the request can return less results than maxResults
or an empty array of results.
To retrieve the next set of results, make another request with the returned nextToken
value. The value of nextToken
is null
when there are no more results to return
The maximum number of token balances to return.
Default: 100
Even if additional results can be retrieved, the request can return less results than maxResults
or an empty array of results.
To retrieve the next set of results, make another request with the returned nextToken
value. The value of nextToken
is null
when there are no more results to return
The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + "documentation": "The hash of a transaction. It is generated when a transaction is created.
" + }, + "transactionId": { + "shape": "QueryTransactionId", + "documentation": "The identifier of a Bitcoin transaction. It is generated when a transaction is created.
transactionId
is only supported on the Bitcoin networks.
The maximum number of transaction events to list.
Default:100
Even if additional results can be retrieved, the request can return less results than maxResults
or an empty array of results.
To retrieve the next set of results, make another request with the returned nextToken
value. The value of nextToken
is null
when there are no more results to return
The maximum number of transaction events to list.
Default: 100
Even if additional results can be retrieved, the request can return less results than maxResults
or an empty array of results.
To retrieve the next set of results, make another request with the returned nextToken
value. The value of nextToken
is null
when there are no more results to return
The order by which the results will be sorted. If ASCENNDING
is selected, the results will be ordered by fromTime
.
The order by which the results will be sorted.
" }, "nextToken": { "shape": "NextToken", @@ -841,11 +983,11 @@ }, "maxResults": { "shape": "ListTransactionsInputMaxResultsInteger", - "documentation": "The maximum number of transactions to list.
Default:100
Even if additional results can be retrieved, the request can return less results than maxResults
or an empty array of results.
To retrieve the next set of results, make another request with the returned nextToken
value. The value of nextToken
is null
when there are no more results to return
The maximum number of transactions to list.
Default: 100
Even if additional results can be retrieved, the request can return less results than maxResults
or an empty array of results.
To retrieve the next set of results, make another request with the returned nextToken
value. The value of nextToken
is null
when there are no more results to return
This filter is used to include transactions in the response that haven't reached finality . Transactions that have reached finiality are always part of the response.
" + "documentation": "This filter is used to include transactions in the response that haven't reached finality . Transactions that have reached finality are always part of the response.
" } } }, @@ -924,7 +1066,7 @@ "documentation": "The contract or wallet address for the owner.
" } }, - "documentation": "The container for the identifier of the owner.
" + "documentation": "The container for the owner identifier.
" }, "QueryNetwork": { "type": "string", @@ -967,6 +1109,10 @@ "type": "string", "pattern": "(0x[A-Fa-f0-9]{64}|[A-Fa-f0-9]{64})" }, + "QueryTransactionId": { + "type": "string", + "pattern": "(0x[A-Fa-f0-9]{64}|[A-Fa-f0-9]{64})" + }, "SortOrder": { "type": "string", "enum": [ @@ -977,6 +1123,18 @@ "String": { "type": "string" }, + "TimeFilter": { + "type": "structure", + "members": { + "from": { + "shape": "BlockchainInstant" + }, + "to": { + "shape": "BlockchainInstant" + } + }, + "documentation": "This container is used to specify a time frame.
" + }, "Timestamp": { "type": "timestamp" }, @@ -1081,7 +1239,7 @@ }, "transactionHash": { "shape": "QueryTransactionHash", - "documentation": "The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + "documentation": "The hash of a transaction. It is generated when a transaction is created.
" }, "blockNumber": { "shape": "String", @@ -1141,7 +1299,7 @@ }, "transactionId": { "shape": "String", - "documentation": "The unique identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + "documentation": "The identifier of a Bitcoin transaction. It is generated when a transaction is created.
" }, "confirmationStatus": { "shape": "ConfirmationStatus", @@ -1168,7 +1326,7 @@ }, "transactionHash": { "shape": "QueryTransactionHash", - "documentation": "The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + "documentation": "The hash of a transaction. It is generated when a transaction is created.
" }, "eventType": { "shape": "QueryTransactionEventType", @@ -1188,7 +1346,7 @@ }, "contractAddress": { "shape": "ChainAddress", - "documentation": "The blockchain address. for the contract
" + "documentation": "The blockchain address for the contract
" }, "tokenId": { "shape": "QueryTokenId", @@ -1196,11 +1354,34 @@ }, "transactionId": { "shape": "String", - "documentation": "The unique identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + "documentation": "The identifier of a Bitcoin transaction. It is generated when a transaction is created.
" }, "voutIndex": { "shape": "Integer", - "documentation": "The position of the vout in the transaction output list.
" + "documentation": "The position of the transaction output in the transaction output list.
" + }, + "voutSpent": { + "shape": "Boolean", + "documentation": "Specifies if the transaction output is spent or unspent. This is only returned for BITCOIN_VOUT event types.
This is only returned for BITCOIN_VOUT
event types.
The transactionId that created the spent transaction output.
This is only returned for BITCOIN_VIN
event types.
The transactionHash that created the spent transaction output.
This is only returned for BITCOIN_VIN
event types.
The position of the spent transaction output in the output list of the creating transaction.
This is only returned for BITCOIN_VIN
event types.
This container specifies whether the transaction has reached Finality.
" } }, "documentation": "The container for the properties of a transaction event.
" @@ -1223,7 +1404,7 @@ "members": { "transactionHash": { "shape": "QueryTransactionHash", - "documentation": "The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + "documentation": "The hash of a transaction. It is generated when a transaction is created.
" }, "network": { "shape": "QueryNetwork", @@ -1247,6 +1428,19 @@ }, "max": 250, "min": 0 + }, + "VoutFilter": { + "type": "structure", + "required": [ + "voutSpent" + ], + "members": { + "voutSpent": { + "shape": "Boolean", + "documentation": "Specifies if the transaction output is spent or unspent.
" + } + }, + "documentation": "This container specifies filtering attributes related to BITCOIN_VOUT
event types
Amazon Managed Blockchain (AMB) Query provides you with convenient access to multi-blockchain network data, which makes it easier for you to extract contextual data related to blockchain activity. You can use AMB Query to read data from public blockchain networks, such as Bitcoin Mainnet and Ethereum Mainnet. You can also get information such as the current and historical balances of addresses, or you can get a list of blockchain transactions for a given time period. Additionally, you can get details of a given transaction, such as transaction events, which you can further analyze or use in business logic for your applications.
" diff --git a/apis/managedblockchain-query-2023-05-04.paginators.json b/apis/managedblockchain-query-2023-05-04.paginators.json index 7625c5cab4..3948bd4266 100644 --- a/apis/managedblockchain-query-2023-05-04.paginators.json +++ b/apis/managedblockchain-query-2023-05-04.paginators.json @@ -6,6 +6,12 @@ "limit_key": "maxResults", "result_key": "contracts" }, + "ListFilteredTransactionEvents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "events" + }, "ListTokenBalances": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/clients/cloudformation.d.ts b/clients/cloudformation.d.ts index 508c36efec..0daae08226 100644 --- a/clients/cloudformation.d.ts +++ b/clients/cloudformation.d.ts @@ -21,11 +21,11 @@ declare class CloudFormation extends Service { */ activateOrganizationsAccess(callback?: (err: AWSError, data: CloudFormation.Types.ActivateOrganizationsAccessOutput) => void): Request>1,l=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,d=a?0:s-1,y=a?1:-1,b=t<0||0===t&&1/t<0?1:0;for(t=Math.abs(t),isNaN(t)||t===1/0?(n=isNaN(t)?1:0,o=m):(o=Math.floor(Math.log(t)/Math.LN2),t*(u=Math.pow(2,-o))<1&&(o--,u*=2),t+=o+c>=1?l/u:l*Math.pow(2,1-c),t*u>=2&&(o++,u/=2),o+c>=m?(n=0,o=m):o+c>=1?(n=(t*u-1)*Math.pow(2,i),o+=c):(n=t*Math.pow(2,c-1)*Math.pow(2,i),o=0));i>=8;e[r+d]=255&n,d+=y,n/=256,i-=8);for(o=o<0;e[r+d]=255&o,d+=y,o/=256,p-=8);e[r+d-y]|=128*b}},{}],443:[function(e,t,r){var a={}.toString;t.exports=Array.isArray||function(e){return"[object Array]"==a.call(e)}},{}],444:[function(e,t,r){!function(e){"use strict";function t(e){return null!==e&&"[object Array]"===Object.prototype.toString.call(e)}function r(e){return null!==e&&"[object Object]"===Object.prototype.toString.call(e)}function a(e,i){if(e===i)return!0;if(Object.prototype.toString.call(e)!==Object.prototype.toString.call(i))return!1;if(!0===t(e)){if(e.length!==i.length)return!1;for(var s=0;s G((f-r)/g)&&i("overflow"),r+=(p-t)*g,t=p,u=0;u =0?(c=b.substr(0,S),l=b.substr(S+1)):(c=b,l=""),d=decodeURIComponent(c),y=decodeURIComponent(l),a(o,d)?i(o[d])?o[d].push(y):o[d]=[o[d],y]:o[d]=y}return o};var i=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)}},{}],448:[function(e,t,r){"use strict";function a(e,t){if(e.map)return e.map(t);for(var r=[],a=0;a >1,l=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,d=a?0:s-1,y=a?1:-1,b=t<0||0===t&&1/t<0?1:0;for(t=Math.abs(t),isNaN(t)||t===1/0?(n=isNaN(t)?1:0,o=m):(o=Math.floor(Math.log(t)/Math.LN2),t*(u=Math.pow(2,-o))<1&&(o--,u*=2),t+=o+c>=1?l/u:l*Math.pow(2,1-c),t*u>=2&&(o++,u/=2),o+c>=m?(n=0,o=m):o+c>=1?(n=(t*u-1)*Math.pow(2,i),o+=c):(n=t*Math.pow(2,c-1)*Math.pow(2,i),o=0));i>=8;e[r+d]=255&n,d+=y,n/=256,i-=8);for(o=o<0;e[r+d]=255&o,d+=y,o/=256,p-=8);e[r+d-y]|=128*b}},{}],443:[function(e,t,r){var a={}.toString;t.exports=Array.isArray||function(e){return"[object Array]"==a.call(e)}},{}],444:[function(e,t,r){!function(e){"use strict";function t(e){return null!==e&&"[object Array]"===Object.prototype.toString.call(e)}function r(e){return null!==e&&"[object Object]"===Object.prototype.toString.call(e)}function a(e,i){if(e===i)return!0;if(Object.prototype.toString.call(e)!==Object.prototype.toString.call(i))return!1;if(!0===t(e)){if(e.length!==i.length)return!1;for(var s=0;s G((f-r)/g)&&i("overflow"),r+=(p-t)*g,t=p,u=0;u =0?(c=b.substr(0,S),l=b.substr(S+1)):(c=b,l=""),d=decodeURIComponent(c),y=decodeURIComponent(l),a(o,d)?i(o[d])?o[d].push(y):o[d]=[o[d],y]:o[d]=y}return o};var i=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)}},{}],448:[function(e,t,r){"use strict";function a(e,t){if(e.map)return e.map(t);for(var r=[],a=0;a=55296&&t<=56319&&i65535&&(e-=65536,t+=w(e>>>10&1023|55296),e=56320|1023&e),t+=w(e)}).join("")}function p(e){return e-48<10?e-22:e-65<26?e-65:e-97<26?e-97:T}function m(e,t){return e+22+75*(e<26)-((0!=t)<<5)}function c(e,t,r){var a=0;for(e=r?G(e/R):e>>1,e+=G(e/t);e>L*k>>1;a+=T)e=G(e/L);return G(a+(L+1)*e/(e+A))}function l(e){var t,r,a,s,o,n,m,l,d,y,b=[],S=e.length,g=0,h=v,I=D;for(r=e.lastIndexOf(x),r<0&&(r=0),a=0;a=S&&i("invalid-input"),l=p(e.charCodeAt(s++)),(l>=T||l>G((f-g)/n))&&i("overflow"),g+=l*n,d=m<=I?C:m>=I+k?k:m-I,!(l=t&&bf&&i("overflow"),b==t){for(l=r,d=T;y=d<=o?C:d>=o+k?k:d-o,!(l=0&&delete e.httpRequest.headers["Content-Length"]}function i(e){var t=new l,r=e.service.api.operations[e.operation].input;if(r.payload){var a={},i=r.members[r.payload];a=e.params[r.payload],"structure"===i.type?(e.httpRequest.body=t.build(a||{},i),s(e)):void 0!==a&&(e.httpRequest.body=a,("binary"===i.type||i.isStreaming)&&s(e,!0))}else e.httpRequest.body=t.build(e.params,r),s(e)}function s(e,t){if(!e.httpRequest.headers["Content-Type"]){var r=t?"binary/octet-stream":"application/json";e.httpRequest.headers["Content-Type"]=r}}function o(e){m.buildRequest(e),y.indexOf(e.httpRequest.method)<0&&i(e)}function n(e){c.extractError(e)}function u(e){m.extractData(e);var t,r=e.request,a=r.service.api.operations[r.operation],i=r.service.api.operations[r.operation].output||{};a.hasEventOutput;if(i.payload){var s=i.members[i.payload],o=e.httpResponse.body;if(s.isEventStream)t=new d,e.data[payload]=p.createEventStream(2===AWS.HttpClient.streamsApiVersion?e.httpResponse.stream:o,t,s);else if("structure"===s.type||"list"===s.type){var t=new d;e.data[i.payload]=t.parse(o,s)}else"binary"===s.type||s.isStreaming?e.data[i.payload]=o:e.data[i.payload]=s.toType(o)}else{var n=e.data;c.extractData(e),e.data=p.merge(n,e.data)}}var p=e("../util"),m=e("./rest"),c=e("./json"),l=e("../json/builder"),d=e("../json/parser"),y=["GET","HEAD","DELETE"];t.exports={buildRequest:o,extractError:n,extractData:u,unsetContentLength:a}},{"../json/builder":374,"../json/parser":375,"../util":428,"./json":386,"./rest":388}],390:[function(e,t,r){function a(e){var t=e.service.api.operations[e.operation].input,r=new n.XML.Builder,a=e.params,i=t.payload;if(i){var s=t.members[i];if(void 0===(a=a[i]))return;if("structure"===s.type){var o=s.name;e.httpRequest.body=r.toXML(a,s,o,!0)}else e.httpRequest.body=a}else e.httpRequest.body=r.toXML(a,t,t.name||t.shape||u.string.upperFirst(e.operation)+"Request")}function i(e){p.buildRequest(e),["GET","HEAD"].indexOf(e.httpRequest.method)<0&&a(e)}function s(e){p.extractError(e);var t;try{t=(new n.XML.Parser).parse(e.httpResponse.body.toString())}catch(r){t={Code:e.httpResponse.statusCode,Message:e.httpResponse.statusMessage}}t.Errors&&(t=t.Errors),t.Error&&(t=t.Error),t.Code?e.error=u.error(new Error,{code:t.Code,message:t.Message}):e.error=u.error(new Error,{code:e.httpResponse.statusCode,message:null})}function o(e){p.extractData(e);var t,r=e.request,a=e.httpResponse.body,i=r.service.api.operations[r.operation],s=i.output,o=(i.hasEventOutput,s.payload);if(o){var m=s.members[o];m.isEventStream?(t=new n.XML.Parser,e.data[o]=u.createEventStream(2===n.HttpClient.streamsApiVersion?e.httpResponse.stream:e.httpResponse.body,t,m)):"structure"===m.type?(t=new n.XML.Parser,e.data[o]=t.parse(a.toString(),m)):"binary"===m.type||m.isStreaming?e.data[o]=a:e.data[o]=m.toType(a)}else if(a.length>0){t=new n.XML.Parser;var c=t.parse(a.toString(),s);u.update(e.data,c)}}var n=e("../core"),u=e("../util"),p=e("./rest");t.exports={buildRequest:i,extractError:s,extractData:o}},{"../core":350,"../util":428,"./rest":388}],391:[function(e,t,r){function a(){}function i(e){return e.isQueryName||"ec2"!==e.api.protocol?e.name:e.name[0].toUpperCase()+e.name.substr(1)}function s(e,t,r,a){p.each(r.members,function(r,s){var o=t[r];if(null!==o&&void 0!==o){var n=i(s);n=e?e+"."+n:n,u(n,o,s,a)}})}function o(e,t,r,a){var i=1;p.each(t,function(t,s){var o=r.flattened?".":".entry.",n=o+i+++".",p=n+(r.key.name||"key"),m=n+(r.value.name||"value");u(e+p,t,r.key,a),u(e+m,s,r.value,a)})}function n(e,t,r,a){var s=r.member||{};if(0===t.length)return void a.call(this,e,null);p.arrayEach(t,function(t,o){var n="."+(o+1);if("ec2"===r.api.protocol)n+="";else if(r.flattened){if(s.name){var p=e.split(".");p.pop(),p.push(i(s)),e=p.join(".")}}else n="."+(s.name?s.name:"member")+n;u(e+n,t,s,a)})}function u(e,t,r,a){null!==t&&void 0!==t&&("structure"===r.type?s(e,t,r,a):"list"===r.type?n(e,t,r,a):"map"===r.type?o(e,t,r,a):a(e,r.toWireFormat(t).toString()))}var p=e("../util");a.prototype.serialize=function(e,t,r){s("",e,t,r)},t.exports=a},{"../util":428}],392:[function(e,t,r){var a=e("../core"),i=null,s={signatureVersion:"v4",signingName:"rds-db",operations:{}},o={region:"string",hostname:"string",port:"number",username:"string"};a.RDS.Signer=a.util.inherit({constructor:function(e){this.options=e||{}},convertUrlToAuthToken:function(e){if(0===e.indexOf("https://"))return e.substring("https://".length)},getAuthToken:function(e,t){"function"==typeof e&&void 0===t&&(t=e,e={});var r=this,o="function"==typeof t;e=a.util.merge(this.options,e);var n=this.validateAuthTokenOptions(e);if(!0!==n){if(o)return t(n,null);throw n}var u={region:e.region,endpoint:new a.Endpoint(e.hostname+":"+e.port),paramValidation:!1,signatureVersion:"v4"};e.credentials&&(u.credentials=e.credentials),i=new a.Service(u),i.api=s;var p=i.makeRequest();if(this.modifyRequestForAuthToken(p,e),!o){var m=p.presign(900);return this.convertUrlToAuthToken(m)}p.presign(900,function(e,a){a&&(a=r.convertUrlToAuthToken(a)),t(e,a)})},modifyRequestForAuthToken:function(e,t){e.on("build",e.buildAsGet),e.httpRequest.body=a.util.queryParamsToString({Action:"connect",DBUser:t.username})},validateAuthTokenOptions:function(e){var t="";e=e||{};for(var r in o)Object.prototype.hasOwnProperty.call(o,r)&&typeof e[r]!==o[r]&&(t+="option '"+r+"' should have been type '"+o[r]+"', was '"+typeof e[r]+"'.\n");return!t.length||a.util.error(new Error,{code:"InvalidParameter",message:t})}})},{"../core":350}],393:[function(e,t,r){t.exports={now:function(){return"undefined"!=typeof performance&&"function"==typeof performance.now?performance.now():Date.now()}}},{}],394:[function(e,t,r){function a(e){return"string"==typeof e&&(e.startsWith("fips-")||e.endsWith("-fips"))}function i(e){return"string"==typeof e&&["aws-global","aws-us-gov-global"].includes(e)}function s(e){return["fips-aws-global","aws-fips","aws-global"].includes(e)?"us-east-1":["fips-aws-us-gov-global","aws-us-gov-global"].includes(e)?"us-gov-west-1":e.replace(/fips-(dkr-|prod-)?|-fips/,"")}t.exports={isFipsRegion:a,isGlobalRegion:i,getRealRegion:s}},{}],395:[function(e,t,r){function a(e){if(!e)return null;var t=e.split("-");return t.length<3?null:t.slice(0,t.length-2).join("-")+"-*"}function i(e){var t=e.config.region,r=a(t),i=e.api.endpointPrefix;return[[t,i],[r,i],[t,"*"],[r,"*"],["*",i],[t,"internal-*"],["*","*"]].map(function(e){return e[0]&&e[1]?e.join("/"):null})}function s(e,t){u.each(t,function(t,r){"globalEndpoint"!==t&&(void 0!==e.config[t]&&null!==e.config[t]||(e.config[t]=r))})}function o(e){for(var t=i(e),r=e.config.useFipsEndpoint,a=e.config.useDualstackEndpoint,o=0;o=a())throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+a().toString(16)+" bytes");return 0|e}function b(e){return+e!=e&&(e=0),s.alloc(+e)}function S(e,t){if(s.isBuffer(e))return e.length;if("undefined"!=typeof ArrayBuffer&&"function"==typeof ArrayBuffer.isView&&(ArrayBuffer.isView(e)||e instanceof ArrayBuffer))return e.byteLength;"string"!=typeof e&&(e=""+e);var r=e.length;if(0===r)return 0;for(var a=!1;;)switch(t){case"ascii":case"latin1":case"binary":return r;case"utf8":case"utf-8":case void 0:return K(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*r;case"hex":return r>>>1;case"base64":return H(e).length;default:if(a)return K(e).length;t=(""+t).toLowerCase(),a=!0}}function g(e,t,r){var a=!1;if((void 0===t||t<0)&&(t=0),t>this.length)return"";if((void 0===r||r>this.length)&&(r=this.length),r<=0)return"";if(r>>>=0,t>>>=0,r<=t)return"";for(e||(e="utf8");;)switch(e){case"hex":return E(this,t,r);case"utf8":case"utf-8":return v(this,t,r);case"ascii":return P(this,t,r);case"latin1":case"binary":return q(this,t,r);case"base64":return D(this,t,r);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return M(this,t,r);default:if(a)throw new TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),a=!0}}function h(e,t,r){var a=e[t];e[t]=e[r],e[r]=a}function I(e,t,r,a,i){if(0===e.length)return-1;if("string"==typeof r?(a=r,r=0):r>2147483647?r=2147483647:r<-2147483648&&(r=-2147483648),r=+r,isNaN(r)&&(r=i?0:e.length-1),r<0&&(r=e.length+r),r>=e.length){if(i)return-1;r=e.length-1}else if(r<0){if(!i)return-1;r=0}if("string"==typeof t&&(t=s.from(t,a)),s.isBuffer(t))return 0===t.length?-1:N(e,t,r,a,i);if("number"==typeof t)return t&=255,s.TYPED_ARRAY_SUPPORT&&"function"==typeof Uint8Array.prototype.indexOf?i?Uint8Array.prototype.indexOf.call(e,t,r):Uint8Array.prototype.lastIndexOf.call(e,t,r):N(e,[t],r,a,i);throw new TypeError("val must be string, number or Buffer")}function N(e,t,r,a,i){function s(e,t){return 1===o?e[t]:e.readUInt16BE(t*o)}var o=1,n=e.length,u=t.length;if(void 0!==a&&("ucs2"===(a=String(a).toLowerCase())||"ucs-2"===a||"utf16le"===a||"utf-16le"===a)){if(e.length<2||t.length<2)return-1;o=2,n/=2,u/=2,r/=2}var p;if(i){var m=-1;for(p=r;p>>8*(a?i:1-i)}function B(e,t,r,a){t<0&&(t=4294967295+t+1);for(var i=0,s=Math.min(e.length-r,4);i>>8*(a?i:3-i)&255}function U(e,t,r,a,i,s){if(r+a>e.length)throw new RangeError("Index out of range");if(r<0)throw new RangeError("Index out of range")}function _(e,t,r,a,i){return i||U(e,t,r,4,3.4028234663852886e38,-3.4028234663852886e38),X.write(e,t,r,a,23,4),r+4}function F(e,t,r,a,i){return i||U(e,t,r,8,1.7976931348623157e308,-1.7976931348623157e308),X.write(e,t,r,a,52,8),r+8}function O(e){if(e=V(e).replace(ee,""),e.length<2)return"";for(;e.length%4!=0;)e+="=";return e}function V(e){return e.trim?e.trim():e.replace(/^\s+|\s+$/g,"")}function z(e){return e<16?"0"+e.toString(16):e.toString(16)}function K(e,t){t=t||1/0;for(var r,a=e.length,i=null,s=[],o=0;o55295&&r<57344){if(!i){if(r>56319){(t-=3)>-1&&s.push(239,191,189);continue}if(o+1===a){(t-=3)>-1&&s.push(239,191,189);continue}i=r;continue}if(r<56320){(t-=3)>-1&&s.push(239,191,189),i=r;continue}r=65536+(i-55296<<10|r-56320)}else i&&(t-=3)>-1&&s.push(239,191,189);if(i=null,r<128){if((t-=1)<0)break;s.push(r)}else if(r<2048){if((t-=2)<0)break;s.push(r>>6|192,63&r|128)}else if(r<65536){if((t-=3)<0)break;s.push(r>>12|224,r>>6&63|128,63&r|128)}else{if(!(r<1114112))throw new Error("Invalid code point");if((t-=4)<0)break;s.push(r>>18|240,r>>12&63|128,r>>6&63|128,63&r|128)}}return s}function j(e){for(var t=[],r=0;r=55296&&t<=56319&&i65535&&(e-=65536,t+=w(e>>>10&1023|55296),e=56320|1023&e),t+=w(e)}).join("")}function p(e){return e-48<10?e-22:e-65<26?e-65:e-97<26?e-97:T}function m(e,t){return e+22+75*(e<26)-((0!=t)<<5)}function c(e,t,r){var a=0;for(e=r?G(e/R):e>>1,e+=G(e/t);e>L*k>>1;a+=T)e=G(e/L);return G(a+(L+1)*e/(e+A))}function l(e){var t,r,a,s,o,n,m,l,d,y,b=[],S=e.length,g=0,h=v,I=D;for(r=e.lastIndexOf(x),r<0&&(r=0),a=0;a=S&&i("invalid-input"),l=p(e.charCodeAt(s++)),(l>=T||l>G((f-g)/n))&&i("overflow"),g+=l*n,d=m<=I?C:m>=I+k?k:m-I,!(l=t&&bf&&i("overflow"),b==t){for(l=r,d=T;y=d<=o?C:d>=o+k?k:d-o,!(l