diff --git a/CHANGELOG.md b/CHANGELOG.md index da88c022527..0e434906a78 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,17 @@ +Release v1.32.11 (2020-06-26) +=== + +### Service Client Updates +* `service/cloudformation`: Updates service API and documentation + * ListStackInstances and DescribeStackInstance now return a new `StackInstanceStatus` object that contains `DetailedStatus` values: a disambiguation of the more generic `Status` value. ListStackInstances output can now be filtered on `DetailedStatus` using the new `Filters` parameter. +* `service/cognito-idp`: Updates service API +* `service/dms`: Updates service documentation + * This release contains miscellaneous API documentation updates for AWS DMS in response to several customer reported issues. +* `service/quicksight`: Updates service API and documentation + * Added support for cross-region DataSource credentials copying. +* `service/sagemaker`: Updates service API and documentation + * The new 'ModelClientConfig' parameter being added for CreateTransformJob and DescribeTransformJob api actions enable customers to configure model invocation related parameters such as timeout and retry. + Release v1.32.10 (2020-06-25) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 7585fc6000d..efa7645ba25 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -2949,6 +2949,12 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "honeycode": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -7116,6 +7122,13 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "backup": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ diff --git a/aws/version.go b/aws/version.go index f7f45e640e0..571a406e1d6 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.32.10" +const SDKVersion = "1.32.11" diff --git a/models/apis/cloudformation/2010-05-15/api-2.json b/models/apis/cloudformation/2010-05-15/api-2.json index 75d3e712188..36dcba50ad7 100644 --- a/models/apis/cloudformation/2010-05-15/api-2.json +++ b/models/apis/cloudformation/2010-05-15/api-2.json @@ -1841,6 +1841,7 @@ "StackSetName":{"shape":"StackSetName"}, "NextToken":{"shape":"NextToken"}, "MaxResults":{"shape":"MaxResults"}, + "Filters":{"shape":"StackInstanceFilters"}, "StackInstanceAccount":{"shape":"Account"}, "StackInstanceRegion":{"shape":"Region"} } @@ -2679,12 +2680,51 @@ "StackId":{"shape":"StackId"}, "ParameterOverrides":{"shape":"Parameters"}, "Status":{"shape":"StackInstanceStatus"}, + "StackInstanceStatus":{"shape":"StackInstanceComprehensiveStatus"}, "StatusReason":{"shape":"Reason"}, "OrganizationalUnitId":{"shape":"OrganizationalUnitId"}, "DriftStatus":{"shape":"StackDriftStatus"}, "LastDriftCheckTimestamp":{"shape":"Timestamp"} } }, + "StackInstanceComprehensiveStatus":{ + "type":"structure", + "members":{ + "DetailedStatus":{"shape":"StackInstanceDetailedStatus"} + } + }, + "StackInstanceDetailedStatus":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "SUCCEEDED", + "FAILED", + "CANCELLED", + "INOPERABLE" + ] + }, + "StackInstanceFilter":{ + "type":"structure", + "members":{ + "Name":{"shape":"StackInstanceFilterName"}, + "Values":{"shape":"StackInstanceFilterValues"} + } + }, + "StackInstanceFilterName":{ + "type":"string", + "enum":["DETAILED_STATUS"] + }, + "StackInstanceFilterValues":{ + "type":"string", + "max":10, + "min":6 + }, + "StackInstanceFilters":{ + "type":"list", + "member":{"shape":"StackInstanceFilter"}, + "max":1 + }, "StackInstanceNotFoundException":{ "type":"structure", "members":{ @@ -2717,6 +2757,7 @@ "StackId":{"shape":"StackId"}, "Status":{"shape":"StackInstanceStatus"}, "StatusReason":{"shape":"Reason"}, + "StackInstanceStatus":{"shape":"StackInstanceComprehensiveStatus"}, "OrganizationalUnitId":{"shape":"OrganizationalUnitId"}, "DriftStatus":{"shape":"StackDriftStatus"}, "LastDriftCheckTimestamp":{"shape":"Timestamp"} diff --git a/models/apis/cloudformation/2010-05-15/docs-2.json b/models/apis/cloudformation/2010-05-15/docs-2.json index 44a802e3452..43f151a1f59 100644 --- a/models/apis/cloudformation/2010-05-15/docs-2.json +++ b/models/apis/cloudformation/2010-05-15/docs-2.json @@ -37,7 +37,7 @@ "ListChangeSets": "
Returns the ID and status of each active change set for a stack. For example, AWS CloudFormation lists change sets that are in the CREATE_IN_PROGRESS
or CREATE_PENDING
state.
Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue
function.
For more information, see AWS CloudFormation Export Stack Output Values.
", "ListImports": "Lists all stacks that are importing an exported output value. To modify or remove an exported output value, first use this action to see which stacks are using it. To see the exported output values in your account, see ListExports.
For more information about importing an exported output value, see the Fn::ImportValue
function.
Returns summary information about stack instances that are associated with the specified stack set. You can filter for stack instances that are associated with a specific AWS account name or Region.
", + "ListStackInstances": "Returns summary information about stack instances that are associated with the specified stack set. You can filter for stack instances that are associated with a specific AWS account name or Region, or that have a specific status.
", "ListStackResources": "Returns descriptions of all resources of the specified stack.
For deleted stacks, ListStackResources returns resource information for up to 90 days after the stack has been deleted.
", "ListStackSetOperationResults": "Returns summary information about the results of a stack set operation.
", "ListStackSetOperations": "Returns summary information about operations performed on a stack set.
", @@ -1608,7 +1608,7 @@ "refs": { "ResourceChange$ResourceType": "The type of AWS CloudFormation resource, such as AWS::S3::Bucket
.
The template resource type of the target resources, such as AWS::S3::Bucket
.
The type of resource to import into your stack, such as AWS::S3::Bucket
.
The type of resource to import into your stack, such as AWS::S3::Bucket
. For a list of supported resource types, see Resources that support import operations in the AWS CloudFormation User Guide.
Type of resource. (For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)
", "StackResource$ResourceType": "Type of resource. (For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)
", @@ -1831,6 +1831,43 @@ "DescribeStackInstanceOutput$StackInstance": "The stack instance that matches the specified request parameters.
" } }, + "StackInstanceComprehensiveStatus": { + "base": "The detailed status of the stack instance.
", + "refs": { + "StackInstance$StackInstanceStatus": "The detailed status of the stack instance.
", + "StackInstanceSummary$StackInstanceStatus": "The detailed status of the stack instance.
" + } + }, + "StackInstanceDetailedStatus": { + "base": null, + "refs": { + "StackInstanceComprehensiveStatus$DetailedStatus": " CANCELLED
: The operation in the specified account and Region has been cancelled. This is either because a user has stopped the stack set operation, or because the failure tolerance of the stack set operation has been exceeded.
FAILED
: The operation in the specified account and Region failed. If the stack set operation fails in enough accounts within a Region, the failure tolerance for the stack set operation as a whole might be exceeded.
INOPERABLE
: A DeleteStackInstances
operation has failed and left the stack in an unstable state. Stacks in this state are excluded from further UpdateStackSet
operations. You might need to perform a DeleteStackInstances
operation, with RetainStacks
set to true
, to delete the stack instance, and then delete the stack manually.
PENDING
: The operation in the specified account and Region has yet to start.
RUNNING
: The operation in the specified account and Region is currently in progress.
SUCCEEDED
: The operation in the specified account and Region completed successfully.
The status that stack instances are filtered by.
", + "refs": { + "StackInstanceFilters$member": null + } + }, + "StackInstanceFilterName": { + "base": null, + "refs": { + "StackInstanceFilter$Name": "The type of filter to apply.
" + } + }, + "StackInstanceFilterValues": { + "base": null, + "refs": { + "StackInstanceFilter$Values": "The status to filter by.
" + } + }, + "StackInstanceFilters": { + "base": null, + "refs": { + "ListStackInstancesInput$Filters": "The status that stack instances are filtered by.
" + } + }, "StackInstanceNotFoundException": { "base": "The specified stack instance doesn't exist.
", "refs": { diff --git a/models/apis/cognito-idp/2016-04-18/api-2.json b/models/apis/cognito-idp/2016-04-18/api-2.json index 7b706d5ff57..38e7c60cac4 100644 --- a/models/apis/cognito-idp/2016-04-18/api-2.json +++ b/models/apis/cognito-idp/2016-04-18/api-2.json @@ -1260,7 +1260,8 @@ {"shape":"InternalErrorException"}, {"shape":"InvalidSmsRoleAccessPolicyException"}, {"shape":"InvalidSmsRoleTrustRelationshipException"} - ] + ], + "authtype":"none" }, "ListDevices":{ "name":"ListDevices", @@ -1480,7 +1481,8 @@ {"shape":"AliasExistsException"}, {"shape":"InternalErrorException"}, {"shape":"SoftwareTokenMFANotFoundException"} - ] + ], + "authtype":"none" }, "SetRiskConfiguration":{ "name":"SetRiskConfiguration", diff --git a/models/apis/dms/2016-01-01/docs-2.json b/models/apis/dms/2016-01-01/docs-2.json index c55fa1e71d5..2f7109754f8 100644 --- a/models/apis/dms/2016-01-01/docs-2.json +++ b/models/apis/dms/2016-01-01/docs-2.json @@ -91,17 +91,17 @@ "AuthMechanismValue": { "base": null, "refs": { - "MongoDbSettings$AuthMechanism": "The authentication mechanism you use to access the MongoDB source endpoint.
Valid values: DEFAULT, MONGODB_CR, SCRAM_SHA_1
DEFAULT – For MongoDB version 2.x, use MONGODB_CR. For MongoDB version 3.x, use SCRAM_SHA_1. This setting isn't used when authType=No.
" + "MongoDbSettings$AuthMechanism": "The authentication mechanism you use to access the MongoDB source endpoint.
For the default value, in MongoDB version 2.x, \"default\"
is \"mongodb_cr\"
. For MongoDB version 3.x or later, \"default\"
is \"scram_sha_1\"
. This setting isn't used when AuthType
is set to \"no\"
.
The authentication type you use to access the MongoDB source endpoint.
Valid values: NO, PASSWORD
When NO is selected, user name and password parameters are not used and can be empty.
" + "MongoDbSettings$AuthType": "The authentication type you use to access the MongoDB source endpoint.
When when set to \"no\"
, user name and password parameters are not used and can be empty.
The name of the Availability Zone for use during database migration.
", + "base": "The name of an Availability Zone for use during database migration.
", "refs": { "Subnet$SubnetAvailabilityZone": "The Availability Zone of the subnet.
" } @@ -140,7 +140,7 @@ "ModifyEventSubscriptionMessage$Enabled": "A Boolean value; set to true to activate the subscription.
", "ModifyReplicationInstanceMessage$MultiAZ": " Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone
parameter if the Multi-AZ parameter is set to true
.
A value that indicates that minor version upgrades are applied automatically to the replication instance during the maintenance window. Changing this parameter doesn't result in an outage, except in the case dsecribed following. The change is asynchronously applied as soon as possible.
An outage does result if these factors apply:
This parameter is set to true
during the maintenance window.
A newer minor version is available.
AWS DMS has enabled automatic patching for the given engine version.
If you want IAM authorization enabled for this endpoint, set this parameter to true
and attach the appropriate role policy document to your service role specified by ServiceAccessRoleArn
. The default is false
.
If you want AWS Identity and Access Management (IAM) authorization enabled for this endpoint, set this parameter to true
. Then attach the appropriate IAM policy document to your service role specified by ServiceAccessRoleArn
. The default is false
.
If this parameter is true
, the reboot is conducted through a Multi-AZ failover. (If the instance isn't configured for Multi-AZ, then you can't specify true
.)
A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to be loaded without generating an error. You can choose true
or false
(the default).
This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift inserts a NULL value into that field.
", "RedshiftSettings$EmptyAsNull": "A value that specifies whether AWS DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of true
sets empty CHAR and VARCHAR fields to null. The default is false
.
Provides information that defines an Elasticsearch endpoint.
", "refs": { - "CreateEndpointMessage$ElasticsearchSettings": "Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration User Guide.
", + "CreateEndpointMessage$ElasticsearchSettings": "Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration Service User Guide.
", "Endpoint$ElasticsearchSettings": "The settings for the Elasticsearch source endpoint. For more information, see the ElasticsearchSettings
structure.
Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration User Guide.
" + "ModifyEndpointMessage$ElasticsearchSettings": "Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration Service User Guide.
" } }, "EncodingTypeValue": { @@ -726,8 +726,8 @@ "ModifyReplicationInstanceMessage$AllocatedStorage": "The amount of storage (in gigabytes) to be allocated for the replication instance.
", "MongoDbSettings$Port": "The port value for the MongoDB source endpoint.
", "NeptuneSettings$ErrorRetryDuration": "The number of milliseconds for AWS DMS to wait to retry a bulk-load of migrated graph data to the Neptune target database before raising an error. The default is 250.
", - "NeptuneSettings$MaxFileSize": "The maximum size in KB of migrated graph data stored in a CSV file before AWS DMS bulk-loads the data to the Neptune target database. The default is 1048576 KB. If successful, AWS DMS clears the bucket, ready to store the next batch of migrated graph data.
", - "NeptuneSettings$MaxRetryCount": "The number of times for AWS DMS to retry a bulk-load of migrated graph data to the Neptune target database before raising an error. The default is 5.
", + "NeptuneSettings$MaxFileSize": "The maximum size in kilobytes of migrated graph data stored in a .csv file before AWS DMS bulk-loads the data to the Neptune target database. The default is 1,048,576 KB. If the bulk load is successful, AWS DMS clears the bucket, ready to store the next batch of migrated graph data.
", + "NeptuneSettings$MaxRetryCount": "The number of times for AWS DMS to retry a bulk load of migrated graph data to the Neptune target database before raising an error. The default is 5.
", "RedshiftSettings$ConnectionTimeout": "A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you initially establish a connection.
", "RedshiftSettings$FileTransferUploadStreams": "The number of threads used to upload a single file. This parameter accepts a value from 1 through 64. It defaults to 10.
", "RedshiftSettings$LoadTimeout": "The amount of time to wait (in milliseconds) before timing out, beginning from when you begin loading.
", @@ -788,9 +788,9 @@ "KafkaSettings": { "base": "Provides information that describes an Apache Kafka endpoint. This information includes the output format of records applied to the endpoint and details of transaction and control table data information.
", "refs": { - "CreateEndpointMessage$KafkaSettings": "Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration User Guide.
", + "CreateEndpointMessage$KafkaSettings": "Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
", "Endpoint$KafkaSettings": "The settings for the Apache Kafka target endpoint. For more information, see the KafkaSettings
structure.
Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration User Guide.
" + "ModifyEndpointMessage$KafkaSettings": "Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
" } }, "KeyList": { @@ -802,9 +802,9 @@ "KinesisSettings": { "base": "Provides information that describes an Amazon Kinesis Data Stream endpoint. This information includes the output format of records applied to the endpoint and details of transaction and control table data information.
", "refs": { - "CreateEndpointMessage$KinesisSettings": "Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration User Guide.
", + "CreateEndpointMessage$KinesisSettings": "Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
", "Endpoint$KinesisSettings": "The settings for the Amazon Kinesis target endpoint. For more information, see the KinesisSettings
structure.
Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration User Guide.
" + "ModifyEndpointMessage$KinesisSettings": "Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
" } }, "ListTagsForResourceMessage": { @@ -911,15 +911,15 @@ "NeptuneSettings": { "base": "Provides information that defines an Amazon Neptune endpoint.
", "refs": { - "CreateEndpointMessage$NeptuneSettings": "Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings in the AWS Database Migration Service User Guide.
", - "Endpoint$NeptuneSettings": "The settings for the MongoDB source endpoint. For more information, see the NeptuneSettings
structure.
Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings in the AWS Database Migration Service User Guide.
" + "CreateEndpointMessage$NeptuneSettings": "Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying Endpoint Settings for Amazon Neptune as a Target in the AWS Database Migration Service User Guide.
", + "Endpoint$NeptuneSettings": "The settings for the Amazon Neptune target endpoint. For more information, see the NeptuneSettings
structure.
Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying Endpoint Settings for Amazon Neptune as a Target in the AWS Database Migration Service User Guide.
" } }, "NestingLevelValue": { "base": null, "refs": { - "MongoDbSettings$NestingLevel": "Specifies either document or table mode.
Valid values: NONE, ONE
Default value is NONE. Specify NONE to use document mode. Specify ONE to use table mode.
" + "MongoDbSettings$NestingLevel": "Specifies either document or table mode.
Default value is \"none\"
. Specify \"none\"
to use document mode. Specify \"one\"
to use table mode.
The signing algorithm for the certificate.
", "Connection$ReplicationInstanceArn": "The ARN of the replication instance.
", "Connection$EndpointArn": "The ARN string that uniquely identifies the endpoint.
", - "Connection$Status": "The connection status.
", + "Connection$Status": "The connection status. This parameter can return one of the following values:
\"successful\"
\"testing\"
\"failed\"
\"deleting\"
The error message when the connection last failed.
", "Connection$EndpointIdentifier": "The identifier of the endpoint. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.
", "Connection$ReplicationInstanceIdentifier": "The replication instance identifier. This parameter is stored as a lowercase string.
", "CreateEndpointMessage$EndpointIdentifier": "The database endpoint identifier. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.
", - "CreateEndpointMessage$EngineName": "The type of engine for the endpoint. Valid values, depending on the EndpointType
value, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"documentdb\"
, and \"sqlserver\"
.
The type of engine for the endpoint. Valid values, depending on the EndpointType
value, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"documentdb\"
, \"sqlserver\"
, and \"neptune\"
.
The user name to be used to log in to the endpoint database.
", "CreateEndpointMessage$ServerName": "The name of the server where the endpoint database resides.
", "CreateEndpointMessage$DatabaseName": "The name of the endpoint database.
", @@ -1292,25 +1292,25 @@ "CreateEventSubscriptionMessage$SubscriptionName": "The name of the AWS DMS event notification subscription. This name must be less than 255 characters.
", "CreateEventSubscriptionMessage$SnsTopicArn": "The Amazon Resource Name (ARN) of the Amazon SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.
", "CreateEventSubscriptionMessage$SourceType": " The type of AWS DMS resource that generates the events. For example, if you want to be notified of events generated by a replication instance, you set this parameter to replication-instance
. If this value isn't specified, all events are returned.
Valid values: replication-instance
| replication-task
The replication instance identifier. This parameter is stored as a lowercase string.
Constraints:
Must contain from 1 to 63 alphanumeric characters or hyphens.
First character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
Example: myrepinstance
The compute and memory capacity of the replication instance as specified by the replication instance class.
Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge
The replication instance identifier. This parameter is stored as a lowercase string.
Constraints:
Must contain 1-63 alphanumeric characters or hyphens.
First character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
Example: myrepinstance
The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\"
.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.
", "CreateReplicationInstanceMessage$AvailabilityZone": "The Availability Zone where the replication instance will be created. The default value is a random, system-chosen Availability Zone in the endpoint's AWS Region, for example: us-east-1d
A subnet group to associate with the replication instance.
", "CreateReplicationInstanceMessage$PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
Format: ddd:hh24:mi-ddd:hh24:mi
Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region, occurring on a random day of the week.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun
Constraints: Minimum 30-minute window.
", "CreateReplicationInstanceMessage$EngineVersion": "The engine version number of the replication instance.
", "CreateReplicationInstanceMessage$KmsKeyId": "An AWS KMS key identifier that is used to encrypt the data on the replication instance.
If you don't specify a value for the KmsKeyId
parameter, then AWS DMS uses your default encryption key.
AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.
", - "CreateReplicationInstanceMessage$DnsNameServers": "A list of DNS name servers supported for the replication instance.
", + "CreateReplicationInstanceMessage$DnsNameServers": "A list of custom DNS name servers supported for the replication instance to access your on-premise source or target database. This list overrides the default name servers supported by the replication instance. You can specify a comma-separated list of internet addresses for up to four on-premise DNS name servers. For example: \"1.1.1.1,2.2.2.2,3.3.3.3,4.4.4.4\"
The name for the replication subnet group. This value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens. Must not be \"default\".
Example: mySubnetgroup
The description for the subnet group.
", - "CreateReplicationTaskMessage$ReplicationTaskIdentifier": "An identifier for the replication task.
Constraints:
Must contain from 1 to 255 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
An identifier for the replication task.
Constraints:
Must contain 1-255 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
An Amazon Resource Name (ARN) that uniquely identifies the source endpoint.
", "CreateReplicationTaskMessage$TargetEndpointArn": "An Amazon Resource Name (ARN) that uniquely identifies the target endpoint.
", "CreateReplicationTaskMessage$ReplicationInstanceArn": "The Amazon Resource Name (ARN) of a replication instance.
", - "CreateReplicationTaskMessage$TableMappings": "The table mappings for the task, in JSON format. For more information, see Using Table Mapping to Specify Task Settings in the AWS Database Migration User Guide.
", + "CreateReplicationTaskMessage$TableMappings": "The table mappings for the task, in JSON format. For more information, see Using Table Mapping to Specify Task Settings in the AWS Database Migration Service User Guide.
", "CreateReplicationTaskMessage$ReplicationTaskSettings": "Overall settings for the task, in JSON format. For more information, see Specifying Task Settings for AWS Database Migration Service Tasks in the AWS Database Migration User Guide.
", "CreateReplicationTaskMessage$CdcStartPosition": "Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.
The value can be in date, checkpoint, or LSN/SCN format.
Date Example: --cdc-start-position “2018-03-08T12:12:12”
Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"
LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”
When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName
extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.
Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.
Server time example: --cdc-stop-position “server_time:3018-02-09T12:12:12”
Commit time example: --cdc-stop-position “commit_time: 3018-02-09T12:12:12 “
", - "CreateReplicationTaskMessage$TaskData": "Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration User Guide.
", + "CreateReplicationTaskMessage$TaskData": "Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.
", "DeleteCertificateMessage$CertificateArn": "The Amazon Resource Name (ARN) of the deleted certificate.
", "DeleteConnectionMessage$EndpointArn": "The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
", "DeleteConnectionMessage$ReplicationInstanceArn": "The Amazon Resource Name (ARN) of the replication instance.
", @@ -1368,7 +1368,7 @@ "ElasticsearchSettings$ServiceAccessRoleArn": "The Amazon Resource Name (ARN) used by service to access the IAM role.
", "ElasticsearchSettings$EndpointUri": "The endpoint for the Elasticsearch cluster.
", "Endpoint$EndpointIdentifier": "The database endpoint identifier. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.
", - "Endpoint$EngineName": "The database engine name. Valid values, depending on the EndpointType, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"documentdb\"
, and \"sqlserver\"
.
The database engine name. Valid values, depending on the EndpointType, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"documentdb\"
, \"sqlserver\"
, and \"neptune\"
.
The expanded name for the engine name. For example, if the EngineName
parameter is \"aurora,\" this value would be \"Amazon Aurora MySQL.\"
The user name used to connect to the endpoint.
", "Endpoint$ServerName": "The name of the server at the endpoint.
", @@ -1389,7 +1389,7 @@ "EventSubscription$CustSubscriptionId": "The AWS DMS event notification subscription Id.
", "EventSubscription$SnsTopicArn": "The topic ARN of the AWS DMS event notification subscription.
", "EventSubscription$Status": "The status of the AWS DMS event notification subscription.
Constraints:
Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist
The status \"no-permission\" indicates that AWS DMS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.
", - "EventSubscription$SubscriptionCreationTime": "The time the RDS event notification subscription was created.
", + "EventSubscription$SubscriptionCreationTime": "The time the AWS DMS event notification subscription was created.
", "EventSubscription$SourceType": "The type of AWS DMS resource that generates events.
Valid values: replication-instance | replication-server | security-group | replication-task
", "Filter$Name": "The name of the filter.
", "FilterValueList$member": null, @@ -1403,7 +1403,7 @@ "ListTagsForResourceMessage$ResourceArn": "The Amazon Resource Name (ARN) string that uniquely identifies the AWS DMS resource.
", "ModifyEndpointMessage$EndpointArn": "The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
", "ModifyEndpointMessage$EndpointIdentifier": "The database endpoint identifier. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.
", - "ModifyEndpointMessage$EngineName": "The type of engine for the endpoint. Valid values, depending on the EndpointType, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"documentdb\"
, and \"sqlserver\"
.
The type of engine for the endpoint. Valid values, depending on the EndpointType, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"documentdb\"
, \"sqlserver\"
, and \"neptune\"
.
The user name to be used to login to the endpoint database.
", "ModifyEndpointMessage$ServerName": "The name of the server where the endpoint database resides.
", "ModifyEndpointMessage$DatabaseName": "The name of the endpoint database.
", @@ -1415,31 +1415,31 @@ "ModifyEventSubscriptionMessage$SnsTopicArn": "The Amazon Resource Name (ARN) of the Amazon SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.
", "ModifyEventSubscriptionMessage$SourceType": "The type of AWS DMS resource that generates the events you want to subscribe to.
Valid values: replication-instance | replication-task
", "ModifyReplicationInstanceMessage$ReplicationInstanceArn": "The Amazon Resource Name (ARN) of the replication instance.
", - "ModifyReplicationInstanceMessage$ReplicationInstanceClass": "The compute and memory capacity of the replication instance.
Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge
The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\"
.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.
", "ModifyReplicationInstanceMessage$PreferredMaintenanceWindow": "The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes
", "ModifyReplicationInstanceMessage$EngineVersion": "The engine version number of the replication instance.
", "ModifyReplicationInstanceMessage$ReplicationInstanceIdentifier": "The replication instance identifier. This parameter is stored as a lowercase string.
", "ModifyReplicationSubnetGroupMessage$ReplicationSubnetGroupIdentifier": "The name of the replication instance subnet group.
", "ModifyReplicationSubnetGroupMessage$ReplicationSubnetGroupDescription": "A description for the replication instance subnet group.
", "ModifyReplicationTaskMessage$ReplicationTaskArn": "The Amazon Resource Name (ARN) of the replication task.
", - "ModifyReplicationTaskMessage$ReplicationTaskIdentifier": "The replication task identifier.
Constraints:
Must contain from 1 to 255 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
The replication task identifier.
Constraints:
Must contain 1-255 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
When using the AWS CLI or boto3, provide the path of the JSON file that contains the table mappings. Precede the path with file://
. When working with the DMS API, provide the JSON as the parameter value, for example: --table-mappings file://mappingfile.json
JSON file that contains settings for the task, such as task metadata settings.
", "ModifyReplicationTaskMessage$CdcStartPosition": "Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.
The value can be in date, checkpoint, or LSN/SCN format.
Date Example: --cdc-start-position “2018-03-08T12:12:12”
Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"
LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”
When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName
extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.
Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.
Server time example: --cdc-stop-position “server_time:3018-02-09T12:12:12”
Commit time example: --cdc-stop-position “commit_time: 3018-02-09T12:12:12 “
", - "ModifyReplicationTaskMessage$TaskData": "Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration User Guide.
", + "ModifyReplicationTaskMessage$TaskData": "Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.
", "MongoDbSettings$Username": "The user name you use to access the MongoDB source endpoint.
", "MongoDbSettings$ServerName": "The name of the server on the MongoDB source endpoint.
", "MongoDbSettings$DatabaseName": "The database name on the MongoDB source endpoint.
", - "MongoDbSettings$ExtractDocId": " Specifies the document ID. Use this setting when NestingLevel
is set to NONE.
Default value is false.
", - "MongoDbSettings$DocsToInvestigate": " Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel
is set to ONE.
Must be a positive value greater than 0. Default value is 1000.
", - "MongoDbSettings$AuthSource": " The MongoDB database name. This setting isn't used when authType=NO
.
The default is admin.
", + "MongoDbSettings$ExtractDocId": " Specifies the document ID. Use this setting when NestingLevel
is set to \"none\"
.
Default value is \"false\"
.
Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel
is set to \"one\"
.
Must be a positive value greater than 0
. Default value is 1000
.
The MongoDB database name. This setting isn't used when AuthType
is set to \"no\"
.
The default is \"admin\"
.
The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId
parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.
The ARN of the service role you have created for the Neptune target endpoint. For more information, see https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.ServiceRole in the AWS Database Migration Service User Guide.
", - "NeptuneSettings$S3BucketName": "The name of the S3 bucket for AWS DMS to temporarily store migrated graph data in CSV files before bulk-loading it to the Neptune target database. AWS DMS maps the SQL source data to graph data before storing it in these CSV files.
", - "NeptuneSettings$S3BucketFolder": "A folder path where you where you want AWS DMS to store migrated graph data in the S3 bucket specified by S3BucketName
The Amazon Resource Name (ARN) of the service role that you created for the Neptune target endpoint. For more information, see Creating an IAM Service Role for Accessing Amazon Neptune as a Target in the AWS Database Migration Service User Guide.
", + "NeptuneSettings$S3BucketName": "The name of the Amazon S3 bucket where AWS DMS can temporarily store migrated graph data in .csv files before bulk-loading it to the Neptune target database. AWS DMS maps the SQL source data to graph data before storing it in these .csv files.
", + "NeptuneSettings$S3BucketFolder": "A folder path where you want AWS DMS to store migrated graph data in the S3 bucket specified by S3BucketName
The version of the replication engine.
", - "OrderableReplicationInstance$ReplicationInstanceClass": "The compute and memory capacity of the replication instance.
Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge
The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\"
.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.
", "OrderableReplicationInstance$StorageType": "The type of storage used by the replication instance.
", "PendingMaintenanceAction$Action": "The type of pending maintenance action that is available for the resource.
", "PendingMaintenanceAction$OptInStatus": "The type of opt-in request that has been received for the resource.
", @@ -1465,9 +1465,9 @@ "ReloadTablesMessage$ReplicationTaskArn": "The Amazon Resource Name (ARN) of the replication task.
", "ReloadTablesResponse$ReplicationTaskArn": "The Amazon Resource Name (ARN) of the replication task.
", "RemoveTagsFromResourceMessage$ResourceArn": "An AWS DMS resource from which you want to remove tag(s). The value for this parameter is an Amazon Resource Name (ARN).
", - "ReplicationInstance$ReplicationInstanceIdentifier": "The replication instance identifier. This parameter is stored as a lowercase string.
Constraints:
Must contain from 1 to 63 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
Example: myrepinstance
The compute and memory capacity of the replication instance.
Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge
The status of the replication instance.
", + "ReplicationInstance$ReplicationInstanceIdentifier": "The replication instance identifier. This parameter is stored as a lowercase string.
Constraints:
Must contain 1-63 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
Example: myrepinstance
The compute and memory capacity of the replication instance as defined for the specified replication instance class.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.
", + "ReplicationInstance$ReplicationInstanceStatus": "The status of the replication instance. The possible return values include:
\"available\"
\"creating\"
\"deleted\"
\"deleting\"
\"failed\"
\"modifying\"
\"upgrading\"
\"rebooting\"
\"resetting-master-credentials\"
\"storage-full\"
\"incompatible-credentials\"
\"incompatible-network\"
\"maintenance\"
The Availability Zone for the instance.
", "ReplicationInstance$PreferredMaintenanceWindow": "The maintenance window times for the replication instance.
", "ReplicationInstance$EngineVersion": "The engine version number of the replication instance.
", @@ -1476,18 +1476,18 @@ "ReplicationInstance$ReplicationInstancePublicIpAddress": "The public IP address of the replication instance.
", "ReplicationInstance$ReplicationInstancePrivateIpAddress": "The private IP address of the replication instance.
", "ReplicationInstance$SecondaryAvailabilityZone": "The Availability Zone of the standby replication instance in a Multi-AZ deployment.
", - "ReplicationInstance$DnsNameServers": "The DNS name servers for the replication instance.
", + "ReplicationInstance$DnsNameServers": "The DNS name servers supported for the replication instance to access your on-premise source or target database.
", "ReplicationInstancePrivateIpAddressList$member": null, "ReplicationInstancePublicIpAddressList$member": null, "ReplicationInstanceTaskLog$ReplicationTaskName": "The name of the replication task.
", "ReplicationInstanceTaskLog$ReplicationTaskArn": "The Amazon Resource Name (ARN) of the replication task.
", - "ReplicationPendingModifiedValues$ReplicationInstanceClass": "The compute and memory capacity of the replication instance.
Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge
The compute and memory capacity of the replication instance as defined for the specified replication instance class.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.
", "ReplicationPendingModifiedValues$EngineVersion": "The engine version number of the replication instance.
", "ReplicationSubnetGroup$ReplicationSubnetGroupIdentifier": "The identifier of the replication instance subnet group.
", "ReplicationSubnetGroup$ReplicationSubnetGroupDescription": "A description for the replication subnet group.
", "ReplicationSubnetGroup$VpcId": "The ID of the VPC.
", "ReplicationSubnetGroup$SubnetGroupStatus": "The status of the subnet group.
", - "ReplicationTask$ReplicationTaskIdentifier": "The user-assigned replication task identifier or name.
Constraints:
Must contain from 1 to 255 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
The user-assigned replication task identifier or name.
Constraints:
Must contain 1-255 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
", "ReplicationTask$TargetEndpointArn": "The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
", "ReplicationTask$ReplicationInstanceArn": "The Amazon Resource Name (ARN) of the replication instance.
", @@ -1495,12 +1495,12 @@ "ReplicationTask$ReplicationTaskSettings": "The settings for the replication task.
", "ReplicationTask$Status": "The status of the replication task.
", "ReplicationTask$LastFailureMessage": "The last error (failure) message generated for the replication instance.
", - "ReplicationTask$StopReason": "The reason the replication task was stopped.
", + "ReplicationTask$StopReason": "The reason the replication task was stopped. This response parameter can return one of the following values:
\"STOP_REASON_FULL_LOAD_COMPLETED\"
– Full-load migration completed.
\"STOP_REASON_CACHED_CHANGES_APPLIED\"
– Change data capture (CDC) load completed.
\"STOP_REASON_CACHED_CHANGES_NOT_APPLIED\"
– In a full-load and CDC migration, the full-load stopped as specified before starting the CDC migration.
\"STOP_REASON_SERVER_TIME\"
– The migration stopped at the specified server time.
Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition
or CdcStartTime
to specify when you want the CDC operation to start. Specifying both values results in an error.
The value can be in date, checkpoint, or LSN/SCN format.
Date Example: --cdc-start-position “2018-03-08T12:12:12”
Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"
LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”
", "ReplicationTask$CdcStopPosition": "Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.
Server time example: --cdc-stop-position “server_time:3018-02-09T12:12:12”
Commit time example: --cdc-stop-position “commit_time: 3018-02-09T12:12:12 “
", "ReplicationTask$RecoveryCheckpoint": "Indicates the last checkpoint that occurred during a change data capture (CDC) operation. You can provide this value to the CdcStartPosition
parameter to start a CDC operation that begins at that checkpoint.
The Amazon Resource Name (ARN) of the replication task.
", - "ReplicationTask$TaskData": "Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration User Guide.
", + "ReplicationTask$TaskData": "Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.
", "ReplicationTaskAssessmentResult$ReplicationTaskIdentifier": "The replication task identifier of the task on which the task assessment was run.
", "ReplicationTaskAssessmentResult$ReplicationTaskArn": "The Amazon Resource Name (ARN) of the replication task.
", "ReplicationTaskAssessmentResult$AssessmentStatus": "The status of the task assessment.
", @@ -1526,7 +1526,7 @@ "Subnet$SubnetIdentifier": "The subnet identifier.
", "Subnet$SubnetStatus": "The status of the subnet.
", "SubnetIdentifierList$member": null, - "SupportedEndpointType$EngineName": "The database engine name. Valid values, depending on the EndpointType, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"documentdb\"
, and \"sqlserver\"
.
The database engine name. Valid values, depending on the EndpointType, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"documentdb\"
, \"sqlserver\"
, and \"neptune\"
.
The earliest AWS DMS engine version that supports this endpoint engine. Note that endpoint engines released with AWS DMS versions earlier than 3.1.1 do not return a value for this parameter.
", "SupportedEndpointType$EngineDisplayName": "The expanded name for the engine name. For example, if the EngineName
parameter is \"aurora,\" this value would be \"Amazon Aurora MySQL.\"
The schema name.
", @@ -1536,12 +1536,12 @@ "TableStatistics$ValidationStateDetails": "Additional details about the state of validation.
", "TableToReload$SchemaName": "The schema name of the table to be reloaded.
", "TableToReload$TableName": "The table name of the table to be reloaded.
", - "Tag$Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").
", - "Tag$Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").
", + "Tag$Key": "A key is the required name of the tag. The string value can be 1-128 Unicode characters in length and can't be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regular expressions: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").
", + "Tag$Value": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regular expressions: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").
", "TestConnectionMessage$ReplicationInstanceArn": "The Amazon Resource Name (ARN) of the replication instance.
", "TestConnectionMessage$EndpointArn": "The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
", "VpcSecurityGroupIdList$member": null, - "VpcSecurityGroupMembership$VpcSecurityGroupId": "The VPC security group Id.
", + "VpcSecurityGroupMembership$VpcSecurityGroupId": "The VPC security group ID.
", "VpcSecurityGroupMembership$Status": "The status of the VPC security group.
" } }, @@ -1678,7 +1678,7 @@ } }, "VpcSecurityGroupMembership": { - "base": "Describes status of a security group associated with the virtual private cloud hosting your replication and DB instances.
", + "base": "Describes the status of a security group associated with the virtual private cloud (VPC) hosting your replication and DB instances.
", "refs": { "VpcSecurityGroupMembershipList$member": null } diff --git a/models/apis/quicksight/2018-04-01/api-2.json b/models/apis/quicksight/2018-04-01/api-2.json index 33fc4c29758..2e085ccb261 100644 --- a/models/apis/quicksight/2018-04-01/api-2.json +++ b/models/apis/quicksight/2018-04-01/api-2.json @@ -1444,6 +1444,10 @@ "error":{"httpStatusCode":409}, "exception":true }, + "CopySourceArn":{ + "type":"string", + "pattern":"^arn:[-a-z0-9]*:quicksight:[-a-z0-9]*:[0-9]{12}:datasource/.+" + }, "CreateColumnsOperation":{ "type":"structure", "required":["Columns"], @@ -1809,7 +1813,8 @@ ], "members":{ "Username":{"shape":"Username"}, - "Password":{"shape":"Password"} + "Password":{"shape":"Password"}, + "AlternateDataSourceParameters":{"shape":"DataSourceParametersList"} } }, "CustomSql":{ @@ -2075,6 +2080,7 @@ "CreatedTime":{"shape":"Timestamp"}, "LastUpdatedTime":{"shape":"Timestamp"}, "DataSourceParameters":{"shape":"DataSourceParameters"}, + "AlternateDataSourceParameters":{"shape":"DataSourceParametersList"}, "VpcConnectionProperties":{"shape":"VpcConnectionProperties"}, "SslProperties":{"shape":"SslProperties"}, "ErrorInfo":{"shape":"DataSourceErrorInfo"} @@ -2083,7 +2089,8 @@ "DataSourceCredentials":{ "type":"structure", "members":{ - "CredentialPair":{"shape":"CredentialPair"} + "CredentialPair":{"shape":"CredentialPair"}, + "CopySourceArn":{"shape":"CopySourceArn"} }, "sensitive":true }, @@ -2097,6 +2104,8 @@ "DataSourceErrorInfoType":{ "type":"string", "enum":[ + "ACCESS_DENIED", + "COPY_SOURCE_NOT_FOUND", "TIMEOUT", "ENGINE_VERSION_NOT_SUPPORTED", "UNKNOWN_HOST", @@ -2133,6 +2142,12 @@ "TwitterParameters":{"shape":"TwitterParameters"} } }, + "DataSourceParametersList":{ + "type":"list", + "member":{"shape":"DataSourceParameters"}, + "max":50, + "min":1 + }, "DataSourceType":{ "type":"string", "enum":[ diff --git a/models/apis/quicksight/2018-04-01/docs-2.json b/models/apis/quicksight/2018-04-01/docs-2.json index 0e2c125a9a4..b49520cd1a9 100644 --- a/models/apis/quicksight/2018-04-01/docs-2.json +++ b/models/apis/quicksight/2018-04-01/docs-2.json @@ -3,7 +3,7 @@ "service": "Amazon QuickSight is a fully managed, serverless business intelligence service for the AWS Cloud that makes it easy to extend data and insights to every user in your organization. This API reference contains documentation for a programming interface that you can use to manage Amazon QuickSight.
", "operations": { "CancelIngestion": "Cancels an ongoing ingestion of data into SPICE.
", - "CreateDashboard": "Creates a dashboard from a template. To first create a template, see the CreateTemplate API operation.
A dashboard is an entity in QuickSight that identifies QuickSight reports, created from analyses. You can share QuickSight dashboards. With the right permissions, you can create scheduled email reports from them. The CreateDashboard
, DescribeDashboard
, and ListDashboardsByUser
API operations act on the dashboard entity. If you have the correct permissions, you can create a dashboard from a template that exists in a different AWS account.
Creates a dashboard from a template. To first create a template, see the CreateTemplate API operation.
A dashboard is an entity in QuickSight that identifies QuickSight reports, created from analyses. You can share QuickSight dashboards. With the right permissions, you can create scheduled email reports from them. The CreateDashboard
, DescribeDashboard
, and ListDashboardsByUser
API operations act on the dashboard entity. If you have the correct permissions, you can create a dashboard from a template that exists in a different AWS account.
Creates a dataset.
", "CreateDataSource": "Creates a data source.
", "CreateGroup": "Creates an Amazon QuickSight group.
The permissions resource is arn:aws:quicksight:us-east-1:<relevant-aws-account-id>:group/default/<group-name>
.
The response is a group object.
", @@ -35,7 +35,7 @@ "DescribeTemplateAlias": "Describes the template alias for a template.
", "DescribeTemplatePermissions": "Describes read and write permissions on a template.
", "DescribeUser": "Returns information about a user, given the user name.
", - "GetDashboardEmbedUrl": "Generates a server-side embeddable URL and authorization code. For this process to work properly, first configure the dashboards and user permissions. For more information, see Embedding Amazon QuickSight Dashboards in the Amazon QuickSight User Guide or Embedding Amazon QuickSight Dashboards in the Amazon QuickSight API Reference.
Currently, you can use GetDashboardEmbedURL
only from the server, not from the user’s browser.
Generates a URL and authorization code that you can embed in your web server code. Before you use this command, make sure that you have configured the dashboards and permissions.
Currently, you can use GetDashboardEmbedURL
only from the server, not from the user's browser. The following rules apply to the combination of URL and authorization code:
They must be used together.
They can be used one time only.
They are valid for 5 minutes after you run this command.
The resulting user session is valid for 10 hours.
For more information, see Embedding Amazon QuickSight Dashboards in the Amazon QuickSight User Guide or Embedding Amazon QuickSight Dashboards in the Amazon QuickSight API Reference.
", "ListDashboardVersions": "Lists all the versions of the dashboards in the QuickSight subscription.
", "ListDashboards": "Lists dashboards in an AWS account.
", "ListDataSets": "Lists all of the datasets belonging to the current AWS account in an AWS Region.
The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/*
.
The name that you want to give to the template alias that you're creating. Don't start the alias name with the $
character. Alias names that start with $
are reserved by QuickSight.
The name for the template alias. If you name a specific alias, you delete the version that the alias points to. You can specify the latest version of the template by providing the keyword $LATEST
in the AliasName
parameter.
The name for the template alias. To delete a specific alias, you delete the version that the alias points to. You can specify the alias name, or specify the latest version of the template by providing the keyword $LATEST
in the AliasName
parameter.
The name for the template alias.
", "DescribeDashboardRequest$AliasName": "The alias name.
", "DescribeTemplateAliasRequest$AliasName": "The name of the template alias that you want to describe. If you name a specific alias, you describe the version that the alias points to. You can specify the latest version of the template by providing the keyword $LATEST
in the AliasName
parameter. The keyword $PUBLISHED
doesn't apply to templates.
The Secure Socket Layer (SSL) properties that apply for the resource.
", "DeleteDataSetResponse$Arn": "The Amazon Resource Name (ARN) of the dataset.
", "DeleteDataSourceResponse$Arn": "The Amazon Resource Name (ARN) of the data source that you deleted.
", - "DeleteTemplateAliasResponse$Arn": "The Amazon Resource Name (ARN) of the resource.
", + "DeleteTemplateAliasResponse$Arn": "The Amazon Resource Name (ARN) of the template you want to delete.
", "DeleteTemplateResponse$Arn": "The Amazon Resource Name (ARN) of the resource.
", "DescribeDashboardPermissionsResponse$DashboardArn": "The Amazon Resource Name (ARN) of the dashboard.
", "DescribeDataSetPermissionsResponse$DataSetArn": "The Amazon Resource Name (ARN) of the dataset.
", @@ -433,7 +433,7 @@ } }, "ColumnTag": { - "base": "A tag for a column in a TagColumnOperation
structure. This is a variant type structure. For this structure to be valid, only one of the attributes can be non-null.
A tag for a column in a TagColumnOperation structure. This is a variant type structure. For this structure to be valid, only one of the attributes can be non-null.
", "refs": { "ColumnTagList$member": null } @@ -454,6 +454,12 @@ "refs": { } }, + "CopySourceArn": { + "base": null, + "refs": { + "DataSourceCredentials$CopySourceArn": "The Amazon Resource Name (ARN) of a data source that has the credential pair that you want to use. When CopySourceArn
is not null, the credential pair from the data source in the ARN is used as the credentials for the DataSourceCredentials
structure.
A transform operation that creates calculated columns. Columns created in one such operation form a lexical closure.
", "refs": { @@ -553,7 +559,7 @@ "CredentialPair": { "base": "The combination of user name and password that are used as credentials.
", "refs": { - "DataSourceCredentials$CredentialPair": "Credential pair.
" + "DataSourceCredentials$CredentialPair": "Credential pair. For more information, see CredentialPair.
" } }, "CustomSql": { @@ -602,7 +608,7 @@ "DashboardFilterAttribute": { "base": null, "refs": { - "DashboardSearchFilter$Name": "The name of the value that you want to use as a filter. For example, \"Name\": \"QUICKSIGHT_USER\"
.
The name of the value that you want to use as a filter, for example, \"Name\": \"QUICKSIGHT_USER\"
.
Dashboard publish options.
", "refs": { - "CreateDashboardRequest$DashboardPublishOptions": "Options for publishing the dashboard when you create it:
AvailabilityStatus
for AdHocFilteringOption
- This status can be either ENABLED
or DISABLED
. When this is set to DISABLED
, QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is ENABLED
by default.
AvailabilityStatus
for ExportToCSVOption
- This status can be either ENABLED
or DISABLED
. The visual option to export data to .csv format isn't enabled when this is set to DISABLED
. This option is ENABLED
by default.
VisibilityState
for SheetControlsOption
- This visibility state can be either COLLAPSED
or EXPANDED
. The sheet controls pane is collapsed by default when set to true. This option is COLLAPSED
by default.
Options for publishing the dashboard when you create it:
AvailabilityStatus
for AdHocFilteringOption
- This status can be either ENABLED
or DISABLED
. When this is set to DISABLED
, QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is ENABLED
by default.
AvailabilityStatus
for ExportToCSVOption
- This status can be either ENABLED
or DISABLED
. The visual option to export data to .csv format isn't enabled when this is set to DISABLED
. This option is ENABLED
by default.
VisibilityState
for SheetControlsOption
- This visibility state can be either COLLAPSED
or EXPANDED
. The sheet controls pane is collapsed by default when set to true. This option is COLLAPSED
by default.
Options for publishing the dashboard when you create it:
AvailabilityStatus
for AdHocFilteringOption
- This status can be either ENABLED
or DISABLED
. When this is set to DISABLED
, QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is ENABLED
by default.
AvailabilityStatus
for ExportToCSVOption
- This status can be either ENABLED
or DISABLED
. The visual option to export data to .csv format isn't enabled when this is set to DISABLED
. This option is ENABLED
by default.
VisibilityState
for SheetControlsOption
- This visibility state can be either COLLAPSED
or EXPANDED
. This option is COLLAPSED
by default.
Options for publishing the dashboard when you create it:
AvailabilityStatus
for AdHocFilteringOption
- This status can be either ENABLED
or DISABLED
. When this is set to DISABLED
, QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is ENABLED
by default.
AvailabilityStatus
for ExportToCSVOption
- This status can be either ENABLED
or DISABLED
. The visual option to export data to .csv format isn't enabled when this is set to DISABLED
. This option is ENABLED
by default.
VisibilityState
for SheetControlsOption
- This visibility state can be either COLLAPSED
or EXPANDED
. This option is COLLAPSED
by default.
The filters to apply to the search. Currently, you can search only by user name. For example, \"Filters\": [ { \"Name\": \"QUICKSIGHT_USER\", \"Operator\": \"StringEquals\", \"Value\": \"arn:aws:quicksight:us-east-1:1:user/default/UserName1\" } ]
The filters to apply to the search. Currently, you can search only by user name, for example, \"Filters\": [ { \"Name\": \"QUICKSIGHT_USER\", \"Operator\": \"StringEquals\", \"Value\": \"arn:aws:quicksight:us-east-1:1:user/default/UserName1\" } ]
Dashboard source entity.
", "refs": { - "CreateDashboardRequest$SourceEntity": "The source entity from which the dashboard is created. The source entity accepts the Amazon Resource Name (ARN) of the source template or analysis and also references the replacement datasets for the placeholders set when creating the template. The replacement datasets need to follow the same schema as the datasets for which placeholders were created when creating the template.
If you are creating a dashboard from a source entity in a different AWS account, use the ARN of the source template.
", - "UpdateDashboardRequest$SourceEntity": "The template or analysis from which the dashboard is created. The SouceTemplate
entity accepts the Amazon Resource Name (ARN) of the template and also references to replacement datasets for the placeholders set when creating the template. The replacement datasets need to follow the same schema as the datasets for which placeholders were created when creating the template.
The entity that you are using as a source when you create the dashboard. In SourceEntity
, you specify the type of object you're using as source. You can only create a dashboard from a template, so you use a SourceTemplate
entity. If you need to create a dashboard from an analysis, first convert the analysis to a template by using the CreateTemplate API operation. For SourceTemplate
, specify the Amazon Resource Name (ARN) of the source template. The SourceTemplate
ARN can contain any AWS Account and any QuickSight-supported AWS Region.
Use the DataSetReferences
entity within SourceTemplate
to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.
The entity that you are using as a source when you update the dashboard. In SourceEntity
, you specify the type of object you're using as source. You can only update a dashboard from a template, so you use a SourceTemplate
entity. If you need to update a dashboard from an analysis, first convert the analysis to a template by using the CreateTemplate API operation. For SourceTemplate
, specify the Amazon Resource Name (ARN) of the source template. The SourceTemplate
ARN can contain any AWS Account and any QuickSight-supported AWS Region.
Use the DataSetReferences
entity within SourceTemplate
to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.
Data source credentials.
", + "base": "Data source credentials. This is a variant type structure. For this structure to be valid, only one of the attributes can be non-null.
", "refs": { "CreateDataSourceRequest$Credentials": "The credentials QuickSight that uses to connect to your underlying source. Currently, only credentials based on user name and password are supported.
", "UpdateDataSourceRequest$Credentials": "The credentials that QuickSight that uses to connect to your underlying source. Currently, only credentials based on user name and password are supported.
" @@ -790,9 +796,17 @@ "refs": { "CreateDataSourceRequest$DataSourceParameters": "The parameters that QuickSight uses to connect to your underlying source.
", "DataSource$DataSourceParameters": "The parameters that Amazon QuickSight uses to connect to your underlying source. This is a variant type structure. For this structure to be valid, only one of the attributes can be non-null.
", + "DataSourceParametersList$member": null, "UpdateDataSourceRequest$DataSourceParameters": "The parameters that QuickSight uses to connect to your underlying source.
" } }, + "DataSourceParametersList": { + "base": null, + "refs": { + "CredentialPair$AlternateDataSourceParameters": "A set of alternate data source parameters that you want to share for these credentials. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API compares the DataSourceParameters
structure that's in the request with the structures in the AlternateDataSourceParameters
allowlist. If the structures are an exact match, the request is allowed to use the new data source with the existing credentials. If the AlternateDataSourceParameters
list is null, the DataSourceParameters
originally used with these Credentials
is automatically allowed.
A set of alternate data source parameters that you want to share for the credentials stored with this data source. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API compares the DataSourceParameters
structure that's in the request with the structures in the AlternateDataSourceParameters
allowlist. If the structures are an exact match, the request is allowed to use the credentials from this existing data source. If the AlternateDataSourceParameters
list is null, the Credentials
originally used with this DataSourceParameters
are automatically allowed.
An URL that you can put into your server-side webpage to embed your dashboard. This URL is valid for 5 minutes, and the resulting session is valid for 10 hours. The API provides the URL with an auth_code
value that enables a single sign-on session.
A single-use URL that you can put into your server-side webpage to embed your dashboard. This URL is valid for 5 minutes. The API provides the URL with an auth_code
value that enables one (and only one) sign-on to a user session that is valid for 10 hours.
Limit exceeded.
", - "ResourceExistsException$ResourceType": "The AWS request ID for this request.
", - "ResourceNotFoundException$ResourceType": "The AWS request ID for this request.
", + "ResourceExistsException$ResourceType": "The resource type for this request.
", + "ResourceNotFoundException$ResourceType": "The resource type for this request.
", "ResourceUnavailableException$ResourceType": "The resource type for this request.
" } }, @@ -1147,7 +1161,7 @@ "FilterOperator": { "base": null, "refs": { - "DashboardSearchFilter$Operator": "The comparison operator that you want to use as a filter. For example, \"Operator\": \"StringEquals\"
.
The comparison operator that you want to use as a filter, for example, \"Operator\": \"StringEquals\"
.
Parameters.
", "refs": { - "CreateDashboardRequest$Parameters": "A structure that contains the parameters of the dashboard. These are parameter overrides for a dashboard. A dashboard can have any type of parameters, and some parameters might accept multiple values. You can use the dashboard permissions structure described following to override two string parameters that accept multiple values.
", - "UpdateDashboardRequest$Parameters": "A structure that contains the parameters of the dashboard.
" + "CreateDashboardRequest$Parameters": "A structure that contains the parameters of the dashboard. These are parameter overrides for a dashboard. A dashboard can have any type of parameters, and some parameters might accept multiple values.
", + "UpdateDashboardRequest$Parameters": "A structure that contains the parameters of the dashboard. These are parameter overrides for a dashboard. A dashboard can have any type of parameters, and some parameters might accept multiple values.
" } }, "Password": { @@ -2261,7 +2275,7 @@ "CreateIAMPolicyAssignmentResponse$RequestId": "The AWS request ID for this operation.
", "CreateTemplateAliasResponse$RequestId": "The AWS request ID for this operation.
", "CreateTemplateResponse$RequestId": "The AWS request ID for this operation.
", - "DashboardSearchFilter$Value": "The value of the named item, in this case QUICKSIGHT_USER
, that you want to use as a filter. For example, \"Value\": \"arn:aws:quicksight:us-east-1:1:user/default/UserName1\"
.
The value of the named item, in this case QUICKSIGHT_USER
, that you want to use as a filter, for example, \"Value\": \"arn:aws:quicksight:us-east-1:1:user/default/UserName1\"
.
Placeholder.
", "DataSourceErrorInfo$Message": "Error message.
", "DeleteDashboardResponse$RequestId": "The AWS request ID for this operation.
", @@ -2515,8 +2529,8 @@ "TemplateSourceEntity": { "base": "The source entity of the template.
", "refs": { - "CreateTemplateRequest$SourceEntity": "The Amazon Resource Name (ARN) of the source entity from which this template is being created. Currently, you can create a template from an analysis or another template. If the ARN is for an analysis, include its dataset references.
", - "UpdateTemplateRequest$SourceEntity": "The source QuickSight entity from which this template is being updated. You can currently update templates from an Analysis or another template.
" + "CreateTemplateRequest$SourceEntity": "The entity that you are using as a source when you create the template. In SourceEntity
, you specify the type of object you're using as source: SourceTemplate
for a template or SourceAnalysis
for an analysis. Both of these require an Amazon Resource Name (ARN). For SourceTemplate
, specify the ARN of the source template. For SourceAnalysis
, specify the ARN of the source analysis. The SourceTemplate
ARN can contain any AWS Account and any QuickSight-supported AWS Region.
Use the DataSetReferences
entity within SourceTemplate
or SourceAnalysis
to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.
The entity that you are using as a source when you update the template. In SourceEntity
, you specify the type of object you're using as source: SourceTemplate
for a template or SourceAnalysis
for an analysis. Both of these require an Amazon Resource Name (ARN). For SourceTemplate
, specify the ARN of the source template. For SourceAnalysis
, specify the ARN of the source analysis. The SourceTemplate
ARN can contain any AWS Account and any QuickSight-supported AWS Region.
Use the DataSetReferences
entity within SourceTemplate
or SourceAnalysis
to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.
When a job is created, it is assigned a unique ARN.
", "DescribeAutoMLJobResponse$AutoMLJobArn": "Returns the job's ARN.
", "DescribeProcessingJobResponse$AutoMLJobArn": "The ARN of an AutoML job associated with this processing job.
", - "DescribeTrainingJobResponse$AutoMLJobArn": "", - "DescribeTransformJobResponse$AutoMLJobArn": "", + "DescribeTrainingJobResponse$AutoMLJobArn": "The Amazon Resource Name (ARN) of an AutoML job.
", + "DescribeTransformJobResponse$AutoMLJobArn": "The Amazon Resource Name (ARN) of the AutoML transform job.
", "ProcessingJob$AutoMLJobArn": "The Amazon Resource Name (ARN) of the AutoML job associated with this processing job.
", "TrainingJob$AutoMLJobArn": "The Amazon Resource Name (ARN) of the job.
" } @@ -2887,6 +2887,18 @@ "ParameterRanges$IntegerParameterRanges": "The array of IntegerParameterRange objects that specify ranges of integer hyperparameters that a hyperparameter tuning job searches.
" } }, + "InvocationsMaxRetries": { + "base": null, + "refs": { + "ModelClientConfig$InvocationsMaxRetries": "The maximum number of retries when invocation requests are failing.
" + } + }, + "InvocationsTimeoutInSeconds": { + "base": null, + "refs": { + "ModelClientConfig$InvocationsTimeoutInSeconds": "The timeout value in seconds for an invocation request.
" + } + }, "JobReferenceCode": { "base": null, "refs": { @@ -3682,6 +3694,13 @@ "TrainingJob$ModelArtifacts": "Information about the Amazon S3 location that is configured for storing model artifacts.
" } }, + "ModelClientConfig": { + "base": "Configures the timeout and maximum number of retries for processing a transform job invocation.
", + "refs": { + "CreateTransformJobRequest$ModelClientConfig": "Configures the timeout and maximum number of retries for processing a transform job invocation.
", + "DescribeTransformJobResponse$ModelClientConfig": "The timeout and maximum number of retries for processing a transform job invocation.
" + } + }, "ModelName": { "base": null, "refs": { @@ -4303,7 +4322,7 @@ "Operator": { "base": null, "refs": { - "Filter$Operator": "A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:
The value of Name
equals Value
.
The value of Name
doesn't equal Value
.
The value of Name
is greater than Value
. Not supported for text properties.
The value of Name
is greater than or equal to Value
. Not supported for text properties.
The value of Name
is less than Value
. Not supported for text properties.
The value of Name
is less than or equal to Value
. Not supported for text properties.
The value of Name
contains the string Value
. A SearchExpression
can include only one Contains
operator. Only supported for text properties.
The Name
property exists.
The Name
property does not exist.
The value of Name
is one of the comma delimited strings in Value
. Only supported for text properties.
A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:
The value of Name
equals Value
.
The value of Name
doesn't equal Value
.
The Name
property exists.
The Name
property does not exist.
The value of Name
is greater than Value
. Not supported for text properties.
The value of Name
is greater than or equal to Value
. Not supported for text properties.
The value of Name
is less than Value
. Not supported for text properties.
The value of Name
is less than or equal to Value
. Not supported for text properties.
The value of Name
is one of the comma delimited strings in Value
. Only supported for text properties.
The value of Name
contains the string Value
. Only supported for text properties.
A SearchExpression
can include the Contains
operator multiple times when the value of Name
is one of the following:
Experiment.DisplayName
Experiment.ExperimentName
Experiment.Tags
Trial.DisplayName
Trial.TrialName
Trial.Tags
TrialComponent.DisplayName
TrialComponent.TrialComponentName
TrialComponent.Tags
TrialComponent.InputArtifacts
TrialComponent.OutputArtifacts
A SearchExpression
can include only one Contains
operator for all other values of Name
. In these cases, if you include multiple Contains
operators in the SearchExpression
, the result is the following error message: \"'CONTAINS' operator usage limit of 1 exceeded.
\"
A multi-expression that searches for the specified resource or resources in a search. All resource objects that satisfy the expression's condition are included in the search results. You must specify at least one subexpression, filter, or nested filter. A SearchExpression
can contain up to twenty elements.
A SearchExpression
contains the following components:
A list of Filter
objects. Each filter defines a simple Boolean expression comprised of a resource property name, Boolean operator, and value. A SearchExpression
can include only one Contains
operator.
A list of NestedFilter
objects. Each nested filter defines a list of Boolean expressions using a list of resource properties. A nested filter is satisfied if a single object in the list satisfies all Boolean expressions.
A list of SearchExpression
objects. A search expression object can be nested in a list of search expression objects.
A Boolean operator: And
or Or
.
A multi-expression that searches for the specified resource or resources in a search. All resource objects that satisfy the expression's condition are included in the search results. You must specify at least one subexpression, filter, or nested filter. A SearchExpression
can contain up to twenty elements.
A SearchExpression
contains the following components:
A list of Filter
objects. Each filter defines a simple Boolean expression comprised of a resource property name, Boolean operator, and value.
A list of NestedFilter
objects. Each nested filter defines a list of Boolean expressions using a list of resource properties. A nested filter is satisfied if a single object in the list satisfies all Boolean expressions.
A list of SearchExpression
objects. A search expression object can be nested in a list of search expression objects.
A Boolean operator: And
or Or
.
A Boolean conditional statement. Resources must satisfy this condition to be included in search results. You must provide at least one subexpression, filter, or nested filter. The maximum number of recursive SubExpressions
, NestedFilters
, and Filters
that can be included in a SearchExpression
object is 50.